This commit is contained in:
timlyeee 2023-07-06 15:07:07 -07:00 committed by GitHub
commit a515eb683f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
229 changed files with 119678 additions and 3 deletions

View File

@ -33,6 +33,21 @@ set_target_properties(uv PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES ${platform_spec_path}/include/uv
)
add_library(libsamplerate STATIC IMPORTED GLOBAL)
set_target_properties(libsamplerate PROPERTIES
IMPORTED_LOCATION ${platform_spec_path}/libsamplerate.a
)
add_library(libwavpack STATIC IMPORTED GLOBAL)
set_target_properties(libwavpack PROPERTIES
IMPORTED_LOCATION ${platform_spec_path}/liblibwavpack.a
)
add_library(libnyquist STATIC IMPORTED GLOBAL)
set_target_properties(libnyquist PROPERTIES
IMPORTED_LOCATION ${platform_spec_path}/liblibnyquist.a
)
add_library(webp STATIC IMPORTED GLOBAL)
set_target_properties(webp PROPERTIES
IMPORTED_LOCATION ${platform_spec_path}/libwebp.a
@ -142,6 +157,9 @@ list(APPEND CC_EXTERNAL_LIBS
png
webp
uv
libsamplerate
libnyquist
libwavpack
android_platform
${PhysXSDK}
)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
android/x86/liblibnyquist.a Normal file

Binary file not shown.

BIN
android/x86/liblibwavpack.a Normal file

Binary file not shown.

BIN
android/x86/libsamplerate.a Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -43,6 +43,21 @@ set_target_properties(websockets PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libwebsockets.a
)
add_library(libsamplerate STATIC IMPORTED GLOBAL)
set_target_properties(libsamplerate PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libsamplerate.a
)
add_library(libwavpack STATIC IMPORTED GLOBAL)
set_target_properties(libwavpack PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/liblibwavpack.a
)
add_library(libnyquist STATIC IMPORTED GLOBAL)
set_target_properties(libnyquist PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/liblibnyquist.a
)
add_library(v8_monolith STATIC IMPORTED GLOBAL)
set(V8_COMPILE_FLAGS
V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=64
@ -220,6 +235,9 @@ list(APPEND CC_EXTERNAL_LIBS
jpeg
png
webp
libsamplerate
libnyquist
libwavpack
${glslang_libs_name}
${spirv-cross_libs_name}
${tbb_libs_name}

BIN
ios/libs/liblibnyquist.a Normal file

Binary file not shown.

BIN
ios/libs/liblibwavpack.a Normal file

Binary file not shown.

BIN
ios/libs/libsamplerate.a Normal file

Binary file not shown.

View File

@ -80,6 +80,20 @@ add_library(GLESv2 SHARED IMPORTED GLOBAL)
set_target_properties(GLESv2 PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libGLESv2.dylib
)
add_library(libsamplerate STATIC IMPORTED GLOBAL)
set_target_properties(libsamplerate PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libsamplerate.a
)
add_library(libwavpack STATIC IMPORTED GLOBAL)
set_target_properties(libwavpack PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/liblibwavpack.a
)
add_library(libnyquist STATIC IMPORTED GLOBAL)
set_target_properties(libnyquist PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/liblibnyquist.a
)
set(se_libs_name)
@ -205,6 +219,9 @@ list(APPEND CC_EXTERNAL_LIBS
png
webp
curl
libsamplerate
libwavpack
libnyquist
${glslang_libs_name}
${spirv-cross_libs_name}
${tbb_libs_name}

BIN
mac/libs/liblibnyquist.a Normal file

Binary file not shown.

BIN
mac/libs/liblibwavpack.a Normal file

Binary file not shown.

BIN
mac/libs/libsamplerate.a Normal file

Binary file not shown.

View File

@ -76,7 +76,7 @@ if(ANDROID OR OHOS OR NX)
list(APPEND CC_EXTERNAL_LIBS
pvmp3dec
vorbisidec
# vorbisidec
)
elseif(WINDOWS)
@ -107,3 +107,20 @@ if(USE_PHYSICS_PHYSX)
${CMAKE_CURRENT_LIST_DIR}/PhysX/PxShared/include
)
endif()
set(CC_USE_AUDIO_DEBUG ON)
if (OHOS)
elseif(CC_USE_AUDIO_DEBUG)
include(${CMAKE_CURRENT_LIST_DIR}/LabSound/CMakeLists.txt)
list(APPEND CC_EXTERNAL_SOURCES
${LABSOUND_SOURCES}
)
list(APPEND CC_EXTERNAL_INCLUDES
${LABSOUND_INCLUDE}
)
else()
include(${CMAKE_CURRENT_LIST_DIR}/LabSound/CMakeLists.txt)
list(APPEND CC_EXTERNAL_INCLUDES
${LABSOUND_INCLUDE}
)
endif()

View File

@ -0,0 +1,18 @@
include_guard()
set(LABSOUND_ROOT "${CMAKE_CURRENT_LIST_DIR}")
# default backend set up
if (LINUX)
set(LABSOUND_PULSE ON)
set(LIBNYQUIST_PULSE ON)
endif()
# NOTE: This configuration is build for debug level 4, as debug into libnyquist decode procedure.
# include(${CMAKE_CURRENT_LIST_DIR}/third_party/libnyquist/CMakeLists.txt)
# suppress testing of libsamplerate
# include(${CMAKE_CURRENT_LIST_DIR}/third_party/libsamplerate/CMakeLists.txt)
include(${CMAKE_CURRENT_LIST_DIR}/LabSound.cmake)

View File

@ -0,0 +1,101 @@
# LabSound
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2020, The LabSound Authors. All rights reserved.
#
# Will create a target named LabSound
file(GLOB labsnd_core_h "${LABSOUND_ROOT}/include/LabSound/core/*")
file(GLOB labsnd_extended_h "${LABSOUND_ROOT}/include/LabSound/extended/*")
file(GLOB labsnd_core "${LABSOUND_ROOT}/src/core/*")
file(GLOB labsnd_extended "${LABSOUND_ROOT}/src/extended/*")
file(GLOB labsnd_int_h "${LABSOUND_ROOT}/src/internal/*")
file(GLOB labsnd_int_src "${LABSOUND_ROOT}/src/internal/src/*")
file(GLOB ooura_src "${LABSOUND_ROOT}/third_party/ooura/src/*")
# backend selection
if (IOS)
option(LABSOUND_USE_MINIAUDIO "Use miniaudio" ON)
option(LABSOUND_USE_RTAUDIO "Use RtAudio" OFF)
elseif (APPLE)
option(LABSOUND_USE_MINIAUDIO "Use miniaudio" OFF)
option(LABSOUND_USE_RTAUDIO "Use RtAudio" ON)
elseif (WIN32)
option(LABSOUND_USE_MINIAUDIO "Use miniaudio" OFF)
option(LABSOUND_USE_RTAUDIO "Use RtAudio" ON)
elseif (ANDROID)
option(LABSOUND_USE_MINIAUDIO "Use miniaudio" ON)
option(LABSOUND_USE_RTAUDIO "Use RtAudio" OFF)
elseif (LINUX)
option(LABSOUND_USE_MINIAUDIO "Use miniaudio" OFF)
option(LABSOUND_USE_RTAUDIO "Use RtAudio" ON)
else ()
# For Harmony OS, we should not link this library.
# message(FATAL, " Untested platform. Please try miniaudio and report results on the LabSound issues page")
endif()
if (LABSOUND_USE_MINIAUDIO)
message(STATUS "Using miniaudio backend")
if (IOS)
set(labsnd_backend
"${LABSOUND_ROOT}/src/backends/miniaudio/AudioDevice_Miniaudio.mm"
"${LABSOUND_ROOT}/src/backends/miniaudio/AudioDevice_Miniaudio.h"
"${LABSOUND_ROOT}/third_party/miniaudio/miniaudio.h"
)
else()
set(labsnd_backend
"${LABSOUND_ROOT}/src/backends/miniaudio/AudioDevice_Miniaudio.cpp"
"${LABSOUND_ROOT}/src/backends/miniaudio/AudioDevice_Miniaudio.h"
"${LABSOUND_ROOT}/third_party/miniaudio/miniaudio.h"
)
endif()
elseif (LABSOUND_USE_RTAUDIO)
message(STATUS "Using RtAudio backend")
set(labsnd_backend
"${LABSOUND_ROOT}/src/backends/RtAudio/AudioDevice_RtAudio.cpp"
"${LABSOUND_ROOT}/src/backends/RtAudio/AudioDevice_RtAudio.h"
"${LABSOUND_ROOT}/src/backends/RtAudio/RtAudio.cpp"
"${LABSOUND_ROOT}/src/backends/RtAudio/RtAudio.h"
)
endif()
# FFT
if (APPLE)
set(labsnd_fft_src "${LABSOUND_ROOT}/src/backends/FFTFrameAppleAcclerate.cpp")
else()
file(GLOB labsnd_fft_src "${LABSOUND_ROOT}/third_party/kissfft/src/*")
endif()
# TODO ooura or kissfft? benchmark and choose. Then benchmark vs FFTFrameAppleAcclerate
set(ooura_src
"${LABSOUND_ROOT}/third_party/ooura/src/fftsg.cpp"
"${LABSOUND_ROOT}/third_party/ooura/fftsg.h")
set(LABSOUND_SOURCES
"${LABSOUND_ROOT}/include/LabSound/LabSound.h"
${labsnd_core_h}
${labsnd_core}
${labsnd_extended_h}
${labsnd_extended}
${labsnd_int_h}
${labsnd_int_src}
${labsnd_backend}
${labsnd_fft_src}
${ooura_src}
)
# LABSOUND_INCLUDE: Export for cocos
list(APPEND LABSOUND_INCLUDE
${LABSOUND_ROOT}/src
${LABSOUND_ROOT}/include
${LABSOUND_ROOT}/third_party
${LABSOUND_ROOT}/third_party/libnyquist/include
${LABSOUND_ROOT}/third_party/ooura
${LABSOUND_ROOT}/third_party/ooura/src
${LABSOUND_ROOT}/third_party/kissfft/src
${LABSOUND_ROOT}/third_party/miniaudio
)

View File

@ -0,0 +1,83 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015, The LabSound Authors. All rights reserved.
#pragma once
#ifndef LABSOUND_H
#define LABSOUND_H
// WebAudio Public API
#include "LabSound/core/AudioDevice.h"
#include "LabSound/core/NullDeviceNode.h"
#include "LabSound/core/AudioHardwareDeviceNode.h"
#include "LabSound/core/AudioHardwareInputNode.h"
#include "LabSound/core/AnalyserNode.h"
#include "LabSound/core/AudioBasicInspectorNode.h"
#include "LabSound/core/AudioBasicProcessorNode.h"
#include "LabSound/core/AudioContext.h"
#include "LabSound/core/AudioListener.h"
#include "LabSound/core/AudioNodeInput.h"
#include "LabSound/core/AudioNodeOutput.h"
#include "LabSound/core/AudioScheduledSourceNode.h"
#include "LabSound/core/BiquadFilterNode.h"
#include "LabSound/core/ChannelMergerNode.h"
#include "LabSound/core/ChannelSplitterNode.h"
#include "LabSound/core/ConvolverNode.h"
#include "LabSound/core/DelayNode.h"
#include "LabSound/core/DynamicsCompressorNode.h"
#include "LabSound/core/GainNode.h"
#include "LabSound/core/OscillatorNode.h"
#include "LabSound/core/PannerNode.h"
#include "LabSound/core/SampledAudioNode.h"
#include "LabSound/core/StereoPannerNode.h"
#include "LabSound/core/WaveShaperNode.h"
// LabSound Extended Public API
#include "LabSound/extended/ADSRNode.h"
#include "LabSound/extended/AudioFileReader.h"
#include "LabSound/extended/BPMDelayNode.h"
#include "LabSound/extended/ClipNode.h"
#include "LabSound/extended/DiodeNode.h"
#include "LabSound/extended/FunctionNode.h"
#include "LabSound/extended/GranulationNode.h"
#include "LabSound/extended/NoiseNode.h"
//#include "LabSound/extended/PdNode.h"
#include "LabSound/extended/PeakCompNode.h"
#include "LabSound/extended/PingPongDelayNode.h"
#include "LabSound/extended/PolyBLEPNode.h"
#include "LabSound/extended/PowerMonitorNode.h"
#include "LabSound/extended/PWMNode.h"
#include "LabSound/extended/RealtimeAnalyser.h"
#include "LabSound/extended/Registry.h"
#include "LabSound/extended/RecorderNode.h"
#include "LabSound/extended/SfxrNode.h"
#include "LabSound/extended/SpatializationNode.h"
#include "LabSound/extended/SpectralMonitorNode.h"
#include "LabSound/extended/SupersawNode.h"
namespace lab
{
const std::vector<AudioDeviceInfo> MakeAudioDeviceList();
const AudioDeviceIndex GetDefaultOutputAudioDeviceIndex();
const AudioDeviceIndex GetDefaultInputAudioDeviceIndex();
std::unique_ptr<AudioContext> MakeRealtimeAudioContext(const AudioStreamConfig & outputConfig, const AudioStreamConfig & inputConfig);
std::unique_ptr<AudioContext> MakeOfflineAudioContext(const AudioStreamConfig & offlineConfig, double recordTimeMilliseconds);
struct OfflineContext
{
void * device;
std::unique_ptr<AudioContext> context;
void process(size_t samples);
};
OfflineContext MakeOfflineAudioContext(const AudioStreamConfig &);
std::shared_ptr<AudioHardwareInputNode> MakeAudioHardwareInputNode(ContextRenderLock & r);
AudioStreamConfig GetDefaultInputAudioDeviceConfiguration();
AudioStreamConfig GetDefaultOutputAudioDeviceConfiguration();
}
#endif

View File

@ -0,0 +1,69 @@
// License: BSD 2 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AnalyserNode_h
#define AnalyserNode_h
#include "LabSound/core/AudioBasicInspectorNode.h"
namespace lab
{
class AudioSetting;
// If the analyserNode is intended to run without it's output
// being connected to an AudioDestination, the AnalyserNode must be
// registered with the AudioContext via addAutomaticPullNode.
// params:
// settings: fftSize, minDecibels, maxDecibels, smoothingTimeConstant
//
class AnalyserNode : public AudioBasicInspectorNode
{
void shared_construction(int fftSize);
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
struct Detail;
Detail * _detail = nullptr;
public:
AnalyserNode(AudioContext & ac);
AnalyserNode(AudioContext & ac, int fftSize);
virtual ~AnalyserNode();
static const char* static_name() { return "Analyser"; }
virtual const char* name() const override { return static_name(); }
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
void setFftSize(ContextRenderLock &, int fftSize);
size_t fftSize() const;
// a value large enough to hold all the data return from get*FrequencyData
size_t frequencyBinCount() const;
void setMinDecibels(double k);
double minDecibels() const;
void setMaxDecibels(double k);
double maxDecibels() const;
void setSmoothingTimeConstant(double k);
double smoothingTimeConstant() const;
// frequency bins, reported in db
void getFloatFrequencyData(std::vector<float> & array);
// frequency bins, reported as a linear mapping of minDecibels to maxDecibles onto 0-255.
// if resample is true, then the computed values will be linearly resampled
void getByteFrequencyData(std::vector<uint8_t> & array, bool resample = false);
void getFloatTimeDomainData(std::vector<float> & array);
void getByteTimeDomainData(std::vector<uint8_t> & array);
};
} // namespace lab
#endif // AnalyserNode_h

View File

@ -0,0 +1,138 @@
// License: BSD 3 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioArray_h
#define AudioArray_h
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
namespace lab
{
template <typename T>
class AudioArray
{
public:
AudioArray()
: m_allocation(0)
, m_alignedData(0)
, m_size(0)
{
}
explicit AudioArray(size_t n)
: m_allocation(0)
, m_alignedData(0)
, m_size(0)
{
allocate(n);
}
~AudioArray()
{
free(m_allocation);
}
// It's OK to call allocate() multiple times, but data will *not* be copied from an initial allocation
// if re-allocated. Allocations are zero-initialized.
void allocate(size_t n)
{
size_t initialSize = sizeof(T) * n;
const size_t alignment = 16;
if (m_allocation)
free(m_allocation);
bool isAllocationGood = false;
while (!isAllocationGood)
{
// Initially we try to allocate the exact size, but if it's not aligned
// then we'll have to reallocate and from then on allocate extra.
static size_t extraAllocationBytes = 0;
T * allocation = static_cast<T *>(malloc(initialSize + extraAllocationBytes));
if (!allocation)
{
//CRASH(); // dimitri
}
T * alignedData = alignedAddress(allocation, alignment);
if (alignedData == allocation || extraAllocationBytes == alignment)
{
m_allocation = allocation;
m_alignedData = alignedData;
m_size = n;
isAllocationGood = true;
zero();
}
else
{
extraAllocationBytes = alignment; // always allocate extra after the first alignment failure.
free(allocation);
}
}
}
T * data() { return m_alignedData; }
const T * data() const { return m_alignedData; }
size_t size() const { return m_size; }
T & at(size_t i)
{
// Note that although it is a size_t, m_size is now guaranteed to be
// no greater than max unsigned. This guarantee is enforced in allocate().
return data()[i];
}
T & operator[](size_t i) { return at(i); }
void zero()
{
// This multiplication is made safe by the check in allocate().
memset(this->data(), 0, sizeof(T) * this->size());
}
void zeroRange(unsigned start, unsigned end)
{
bool isSafe = (start <= end) && (end <= this->size());
if (!isSafe)
return;
// This expression cannot overflow because end - start cannot be
// greater than m_size, which is safe due to the check in allocate().
memset(this->data() + start, 0, sizeof(T) * (end - start));
}
void copyToRange(const T * sourceData, unsigned start, unsigned end)
{
bool isSafe = (start <= end) && (end <= this->size());
if (!isSafe)
return;
// This expression cannot overflow because end - start cannot be
// greater than m_size, which is safe due to the check in allocate().
memcpy(this->data() + start, sourceData, sizeof(T) * (end - start));
}
private:
static T * alignedAddress(T * address, intptr_t alignment)
{
intptr_t value = reinterpret_cast<intptr_t>(address);
return reinterpret_cast<T *>((value + alignment - 1) & ~(alignment - 1));
}
T * m_allocation;
T * m_alignedData;
size_t m_size;
};
typedef AudioArray<float> AudioFloatArray;
typedef AudioArray<double> AudioDoubleArray;
} // lab
#endif // AudioArray_h

View File

@ -0,0 +1,34 @@
// License: BSD 2 Clause
// Copyright (C) 2012, Intel Corporation. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioBasicInspectorNode_h
#define AudioBasicInspectorNode_h
#include "LabSound/core/AudioNode.h"
namespace lab
{
// AudioBasicInspectorNode is an AudioNode with one input and one output where
// the output might not necessarily connect to another node's input.
// If the output is not connected to any other node, then the
// AudioBasicInspectorNode's processIfNecessary() function can be called
// automatically by AudioContext before the end of each render quantum so that
// it can inspect the audio stream.
// In order that the automatic behavior is invoked, the AudioBasicInspectorNode
// must be registered with the AudioContext, via addAutomaticPullNode.
// NONFINAL NODE
class AudioBasicInspectorNode : public AudioNode
{
public:
AudioBasicInspectorNode(AudioContext & ac, int outputChannelCount);
virtual ~AudioBasicInspectorNode() = default;
};
} // namespace lab
#endif // AudioBasicInspectorNode_h

View File

@ -0,0 +1,48 @@
// License: BSD 2 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioBasicProcessorNode_h
#define AudioBasicProcessorNode_h
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioProcessor.h"
#include "LabSound/extended/AudioContextLock.h"
#include <memory>
namespace lab
{
class AudioBus;
class AudioNodeInput;
// AudioBasicProcessorNode is an AudioNode with one input and one output where
// the input and output have the same number of channels.
class AudioBasicProcessorNode : public AudioNode
{
public:
AudioBasicProcessorNode(AudioContext &);
virtual ~AudioBasicProcessorNode() = default;
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
virtual void initialize() override;
virtual void uninitialize() override;
// Returns the number of channels for both the input and the output.
int numberOfChannels();
protected:
virtual double tailTime(ContextRenderLock & r) const override;
virtual double latencyTime(ContextRenderLock & r) const override;
AudioProcessor * processor();
AudioProcessor * processor() const;
std::unique_ptr<AudioProcessor> m_processor;
};
} // namespace lab
#endif // AudioBasicProcessorNode_h

View File

@ -0,0 +1,148 @@
// License: BSD 3 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioBus_h
#define AudioBus_h
#include "LabSound/core/AudioChannel.h"
#include "LabSound/core/Mixing.h"
#include <iostream>
#include <vector>
namespace lab
{
using lab::Channel;
using lab::ChannelInterpretation;
// An AudioBus represents a collection of one or more AudioChannels.
// The data layout is "planar" as opposed to "interleaved".
// An AudioBus with one channel is mono, an AudioBus with two channels is stereo, etc.
class AudioBus
{
AudioBus(const AudioBus &); // noncopyable
public:
// Can define non-standard layouts here:
enum
{
LayoutCanonical = 0
};
// allocate indicates whether or not to initially have the AudioChannels created with managed storage.
// Normal usage is to pass true here, in which case the AudioChannels will memory-manage their own storage.
// If allocate is false then setChannelMemory() has to be called later on for each channel before the AudioBus is useable...
AudioBus(int numberOfChannels, int length, bool allocate = true);
// Tells the given channel to use an externally allocated buffer.
void setChannelMemory(int channelIndex, float * storage, int length);
// Channels
int numberOfChannels() const { return static_cast<int>(m_channels.size()); }
// Use this when looping over channels
AudioChannel * channel(int channel) { return m_channels[channel].get(); }
const AudioChannel * channel(int channel) const
{
return const_cast<AudioBus *>(this)->m_channels[channel].get();
}
// use this when accessing channels semantically
AudioChannel * channelByType(Channel type);
const AudioChannel * channelByType(Channel type) const;
// Number of sample-frames
int length() const { return m_length; }
// resizeSmaller() can only be called with a new length <= the current length.
// The data stored in the bus will remain undisturbed.
void resizeSmaller(int newLength);
// Sample-rate : 0.0 if unknown or "don't care"
float sampleRate() const { return m_sampleRate; }
void setSampleRate(float sampleRate) { m_sampleRate = sampleRate; }
// Zeroes all channels.
void zero();
// Clears the silent flag on all channels.
void clearSilentFlag();
// Returns true if the silent bit is set on all channels.
bool isSilent() const;
// Returns true if the channel count and frame-size match.
bool topologyMatches(const AudioBus & sourceBus) const;
// Scales all samples by the same amount.
void scale(float scale);
void reset() { m_isFirstTime = true; } // for de-zippering
// Assuming sourceBus has the same topology, copies sample data from each channel of sourceBus to our corresponding channel.
void copyFrom(const AudioBus & sourceBus, ChannelInterpretation = ChannelInterpretation::Speakers);
// Sums the sourceBus into our bus with unity gain.
// Our own internal gain m_busGain is ignored.
void sumFrom(const AudioBus & sourceBus, ChannelInterpretation = ChannelInterpretation::Speakers);
// Copy each channel from sourceBus into our corresponding channel.
// We scale by targetGain (and our own internal gain m_busGain), performing "de-zippering" to smoothly change from *lastMixGain to (targetGain*m_busGain).
// The caller is responsible for setting up lastMixGain to point to storage which is unique for every "stream" which will be applied to this bus.
// This represents the dezippering memory.
void copyWithGainFrom(const AudioBus & sourceBus, float * lastMixGain, float targetGain);
// Copies the sourceBus by scaling with sample-accurate gain values.
void copyWithSampleAccurateGainValuesFrom(const AudioBus & sourceBus, float * gainValues, int numberOfGainValues);
// Returns maximum absolute value across all channels (useful for normalization).
float maxAbsValue() const;
// Makes maximum absolute value == 1.0 (if possible).
void normalize();
bool isFirstTime() { return m_isFirstTime; }
// Static Functions
// Creates a new buffer from a range in the source buffer.
// 0 may be returned if the range does not fit in the sourceBuffer
static std::unique_ptr<AudioBus> createBufferFromRange(const AudioBus * sourceBus, int startFrame, int endFrame);
// Creates a new AudioBus by sample-rate converting sourceBus to the newSampleRate.
// setSampleRate() must have been previously called on sourceBus.
static std::unique_ptr<AudioBus> createBySampleRateConverting(const AudioBus * sourceBus, bool mixToMono, float newSampleRate);
// Creates a new AudioBus by mixing all the channels down to mono.
// If sourceBus is already mono, then the returned AudioBus will simply be a copy.
static std::unique_ptr<AudioBus> createByMixingToMono(const AudioBus * sourceBus);
// Creates a new AudioBus by cloning an existing one
static std::unique_ptr<AudioBus> createByCloning(const AudioBus * sourceBus);
protected:
AudioBus() = default;
void speakersCopyFrom(const AudioBus &);
void discreteCopyFrom(const AudioBus &);
void speakersSumFrom(const AudioBus &);
void discreteSumFrom(const AudioBus &);
void speakersSumFrom5_1_ToMono(const AudioBus &);
void speakersSumFrom7_1_ToMono(const AudioBus &);
std::unique_ptr<AudioFloatArray> m_dezipperGainValues;
std::vector<std::unique_ptr<AudioChannel>> m_channels;
bool m_isFirstTime = true;
float m_sampleRate = 0.0f;
float m_busGain = 1.0f;
int m_layout = LayoutCanonical;
int m_length = 0;
};
} // lab
#endif // AudioBus_h

View File

@ -0,0 +1,122 @@
// License: BSD 3 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioChannel_h
#define AudioChannel_h
#include "LabSound/core/AudioArray.h"
#include <memory>
namespace lab
{
// An AudioChannel represents a buffer of non-interleaved floating-point audio samples.
// The PCM samples are normally assumed to be in a nominal range -1.0 -> +1.0
class AudioChannel
{
AudioChannel(const AudioChannel &); // noncopyable
public:
// Memory can be externally referenced, or can be internally allocated with an AudioFloatArray.
// Reference an external buffer.
AudioChannel(float * storage, int length)
: m_length(length)
, m_rawPointer(storage)
, m_silent(false)
{
}
// Manage storage for us.
explicit AudioChannel(int length)
: m_length(length)
, m_silent(true)
{
m_memBuffer.reset(new AudioFloatArray(length));
}
// An empty audio channel -- must call set() before it's useful...
AudioChannel()
: m_length(0)
, m_silent(true)
{
}
// Redefine the memory for this channel.
// storage represents external memory not managed by this object.
void set(float * storage, int length)
{
m_memBuffer.reset(); // clean up managed storage
m_rawPointer = storage;
m_length = length;
m_silent = false;
}
// How many sample-frames do we contain?
int length() const { return m_length; }
// resizeSmaller() can only be called with a new length <= the current length.
// The data stored in the bus will remain undisturbed.
void resizeSmaller(int newLength);
// Direct access to PCM sample data. Non-const accessor clears silent flag.
float * mutableData()
{
clearSilentFlag();
return const_cast<float *>(data());
}
const float * data() const
{
if (m_rawPointer)
return m_rawPointer;
if (m_memBuffer)
return m_memBuffer->data();
return nullptr;
}
// Zeroes out all sample values in buffer.
void zero()
{
if (m_silent) return;
m_silent = true;
if (m_memBuffer)
m_memBuffer->zero();
else if (m_rawPointer)
memset(m_rawPointer, 0, sizeof(float) * m_length);
}
// Clears the silent flag.
void clearSilentFlag() { m_silent = false; }
bool isSilent() const { return m_silent; }
// Scales all samples by the same amount.
void scale(float scale);
// A simple memcpy() from the source channel
void copyFrom(const AudioChannel * sourceChannel);
// Copies the given range from the source channel.
void copyFromRange(const AudioChannel * sourceChannel, int startFrame, int endFrame);
// Sums (with unity gain) from the source channel.
void sumFrom(const AudioChannel * sourceChannel);
// Returns maximum absolute value (useful for normalization).
float maxAbsValue() const;
private:
int m_length = 0;
float * m_rawPointer = nullptr;
std::unique_ptr<AudioFloatArray> m_memBuffer;
bool m_silent = true;
};
} // lab
#endif // AudioChannel_h

View File

@ -0,0 +1,219 @@
// License: BSD 2 Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef lab_audio_context_h
#define lab_audio_context_h
#include "LabSound/core/AudioDevice.h"
#include "LabSound/core/AudioHardwareDeviceNode.h"
#include "LabSound/core/AudioScheduledSourceNode.h"
#include <atomic>
#include <condition_variable>
#include <functional>
#include <memory>
#include <mutex>
#include <set>
#include <string>
#include <thread>
#include <vector>
namespace lab
{
class AudioHardwareDeviceNode;
class AudioListener;
class AudioNode;
class AudioScheduledSourceNode;
class AudioHardwareInputNode;
class AudioNodeInput;
class AudioNodeOutput;
class ContextGraphLock;
class ContextRenderLock;
class AudioContext
{
friend class ContextGraphLock;
friend class ContextRenderLock;
public:
// AudioNodes should not retain the AudioContext. The majority of
// methods that take AudioContext as a parameter do so via reference.
// If methods from the context, such as currentTime() are
// required in methods that do not take AudioContext as a parameter,
// they should instead retain a weak pointer to the
// AudioNodeInterface from the AudioContext they were instantiated from.
// Locking the AudioNodeInterface
// pointer is a way to check if the AudioContext is still instantiated.
// AudioContextInterface contains data that is safe for any thread, or
// audio processing callback to read, even if the audio context is in
// the process of stopping or destruction.
class AudioContextInterface
{
friend class AudioContext;
int _id = 0;
AudioContext * _ac = nullptr;
double _currentTime = 0;
public:
AudioContextInterface(AudioContext * ac, int id)
: _id(id)
, _ac(ac)
{
}
~AudioContextInterface() { }
// the contextId of two AudioNodeInterfaces can be compared
// to discover if they refer to the same context.
int contextId() const { return _id; }
double currentTime() const { return _currentTime; }
};
std::weak_ptr<AudioContextInterface> audioContextInterface() { return m_audioContextInterface; }
// Debugging/Sanity Checking
std::string m_graphLocker;
std::string m_renderLocker;
explicit AudioContext(bool isOffline, bool autoDispatchEvents = true);
~AudioContext();
bool isInitialized() const;
// External users shouldn't use this; it should be called by
// LabSound::MakeRealtimeAudioContext(lab::Channels::Stereo)
// It *is* harmless to call it though, it's just not necessary.
void lazyInitialize();
void setDeviceNode(std::shared_ptr<AudioNode> device);
std::shared_ptr<AudioNode> device();
bool isOfflineContext() const;
// The current time, measured at the start of the render quantum currently
// being processed. Most useful in a Node's process routine.
double currentTime() const;
// The current epoch (total count of previously processed render quanta)
// Useful in recurrent graphs to discover if a node has already done its
// processing for the present quanta.
uint64_t currentSampleFrame() const;
// The current time, accurate versus the audio clock. Whereas currentTime
// advances discretely, once per render quanta, predictedCurrentTime
// is the sum of currentTime, and the amount of realworld time that has
// elapsed since the start of the current render quanta. This is useful on
// the main thread of an application in order to precisely synchronize
// expected audio events and other systems.
double predictedCurrentTime() const;
float sampleRate() const;
std::shared_ptr<AudioListener> listener();
// Called at the start of each render quantum.
void handlePreRenderTasks(ContextRenderLock &);
// Called at the end of each render quantum.
void handlePostRenderTasks(ContextRenderLock &);
// AudioContext can pull node(s) at the end of each render quantum even
// when they are not connected to any downstream nodes. These two methods
// are called by the nodes who want to add/remove themselves into/from the
// automatic pull lists.
void addAutomaticPullNode(std::shared_ptr<AudioNode>);
void removeAutomaticPullNode(std::shared_ptr<AudioNode>);
// Called right before handlePostRenderTasks() to handle nodes which need to
// be pulled even when they are not connected to anything.
// Only an AudioHardwareDeviceNode should call this.
void processAutomaticPullNodes(ContextRenderLock &, int framesToProcess);
void connect(std::shared_ptr<AudioNode> destination, std::shared_ptr<AudioNode> source, int destIdx = 0, int srcIdx = 0);
void disconnect(std::shared_ptr<AudioNode> destination, std::shared_ptr<AudioNode> source, int destIdx = 0, int srcidx = 0);
bool isConnected(std::shared_ptr<AudioNode> destination, std::shared_ptr<AudioNode> source);
// completely disconnect the node from the graph
void disconnect(std::shared_ptr<AudioNode> node, int destIdx = 0);
// connect a parameter to receive the indexed output of a node
void connectParam(std::shared_ptr<AudioParam> param, std::shared_ptr<AudioNode> driver, int index);
// connect destinationNode's named parameter input to driver's indexed output
void connectParam(std::shared_ptr<AudioNode> destinationNode, char const*const parameterName, std::shared_ptr<AudioNode> driver, int index);
// disconnect a parameter from the indexed output of a node
void disconnectParam(std::shared_ptr<AudioParam> param, std::shared_ptr<AudioNode> driver, int index);
// connecting and disconnecting busses and parameters occurs asynchronously.
// synchronizeConnections will block until there are no pending connections,
// or until the timeout occurs.
void synchronizeConnections(int timeOut_ms = 1000);
void startOfflineRendering();
std::function<void()> offlineRenderCompleteCallback;
// suspend the progression of time in the audio context, any queued samples will play
void suspend();
// if the context was suspended, resume the progression of time and processing in the audio context
void resume();
// event dispatching will be called automatically, depending on constructor
// argument. If not automatically dispatching, it is the user's responsibility
// to call dispatchEvents often enough to satisfy the user's needs.
void enqueueEvent(std::function<void()> &);
void dispatchEvents();
void appendDebugBuffer(AudioBus* bus, int channel, int count);
void flushDebugBuffer(char const* const wavFilePath);
private:
// @TODO migrate most of the internal datastructures such as
// PendingConnection into Internals as there's no need to expose these at all.
struct Internals;
std::unique_ptr<Internals> m_internal;
std::shared_ptr<AudioContextInterface> m_audioContextInterface;
std::mutex m_graphLock;
std::mutex m_renderLock;
std::mutex m_updateMutex;
std::condition_variable cv;
// -1 means run forever, 0 means stop, n > 0 means run this many times
// n > 0 will decrement to zero each time update runs.
std::atomic<int> updateThreadShouldRun{-1};
std::thread graphUpdateThread;
float graphKeepAlive{0.f};
float lastGraphUpdateTime{0.f};
bool m_isInitialized = false;
bool m_isAudioThreadFinished = false;
bool m_isOfflineContext = false;
bool m_automaticPullNodesNeedUpdating = false; // indicates m_automaticPullNodes was modified.
friend class NullDeviceNode; // needs to be able to call update()
void update();
void updateAutomaticPullNodes();
void uninitialize();
AudioDeviceRenderCallback * device_callback{nullptr};
std::shared_ptr<AudioNode> m_device;
std::shared_ptr<AudioListener> m_listener;
std::set<std::shared_ptr<AudioNode>> m_automaticPullNodes; // queue for added pull nodes
std::vector<std::shared_ptr<AudioNode>> m_renderingAutomaticPullNodes; // vector of known pull nodes
};
} // End namespace lab
#endif // lab_audio_context_h

View File

@ -0,0 +1,125 @@
// License: BSD 3 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef lab_audiodevice_h
#define lab_audiodevice_h
#include <chrono>
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <vector>
namespace lab
{
class AudioBus;
class AudioContext;
class AudioNodeInput;
class AudioHardwareInput;
class AudioDevice;
/////////////////////////////////////////////
// Audio Device Configuration Settings //
/////////////////////////////////////////////
struct AudioDeviceInfo
{
int32_t index{-1};
std::string identifier;
uint32_t num_output_channels{0};
uint32_t num_input_channels{0};
std::vector<float> supported_samplerates;
float nominal_samplerate{0};
bool is_default_output{false};
bool is_default_input{false};
};
// Input and Output
struct AudioStreamConfig
{
int32_t device_index{-1};
uint32_t desired_channels{0};
float desired_samplerate{0};
};
// low bit of current_sample_frame indexes time point 0 or 1
// (so that time and epoch are written atomically, after the alternative epoch has been filled in)
struct SamplingInfo
{
uint64_t current_sample_frame{0};
double current_time{0.0};
float sampling_rate{0.0};
std::chrono::high_resolution_clock::time_point epoch[2];
};
// This is a free function to consolidate and implement the required functionality to take buffers
// from the hardware (both input and output) and begin pulling the graph until fully rendered per quanta.
// This was formerly a function found on the removed `AudioDestinationNode` class (removed
// to de-complicate some annoying inheritance chains).
//
// `pull_graph(...)` will need to be called by a single AudioNode per-context. For instance,
// the `AudioHardwareDeviceNode` or the `NullDeviceNode`.
void pull_graph(AudioContext * ctx, AudioNodeInput * required_inlet, AudioBus * src, AudioBus * dst, int frames,
const SamplingInfo & info, AudioHardwareInput * optional_hardware_input = nullptr);
///////////////////////////////////
// AudioDeviceRenderCallback //
///////////////////////////////////
// `render()` is called periodically to get the next render quantum of audio into destinationBus.
// Optional audio input is given in src (if not nullptr). This structure also keeps
// track of timing information with respect to the callback.
class AudioDeviceRenderCallback
{
AudioStreamConfig outputConfig;
AudioStreamConfig inputConfig;
friend class AudioDevice;
public:
virtual ~AudioDeviceRenderCallback() {}
virtual void render(AudioBus * src, AudioBus * dst, int frames, const SamplingInfo & info) = 0;
virtual void start() = 0;
virtual void stop() = 0;
virtual bool isRunning() const = 0;
virtual const SamplingInfo & getSamplingInfo() const = 0;
virtual const AudioStreamConfig & getOutputConfig() const = 0;
virtual const AudioStreamConfig & getInputConfig() const = 0;
};
/////////////////////
// AudioDevice //
/////////////////////
// The audio hardware periodically calls the AudioDeviceRenderCallback `render()` method asking it to
// render/output the next render quantum of audio. It optionally will pass in local/live audio
// input when it calls `render()`.
struct AudioDeviceIndex
{
uint32_t index;
bool valid;
};
class AudioDevice
{
public:
static std::vector<AudioDeviceInfo> MakeAudioDeviceList();
static AudioDeviceIndex GetDefaultOutputAudioDeviceIndex() noexcept;
static AudioDeviceIndex GetDefaultInputAudioDeviceIndex() noexcept;
static AudioDevice * MakePlatformSpecificDevice(AudioDeviceRenderCallback &,
const AudioStreamConfig & outputConfig, const AudioStreamConfig & inputConfig);
virtual ~AudioDevice() {}
virtual void start() = 0;
virtual void stop() = 0;
virtual bool isRunning() const = 0;
virtual void backendReinitialize() {}
};
} // lab
#endif // lab_audiodevice_h

View File

@ -0,0 +1,66 @@
// License: BSD 2 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef audio_hardware_io_node_hpp
#define audio_hardware_io_node_hpp
#include "LabSound/core/AudioDevice.h"
#include "LabSound/core/AudioNode.h"
namespace lab
{
class AudioBus;
class AudioContext;
class AudioHardwareInput;
struct AudioSourceProvider;
class AudioHardwareDeviceNode : public AudioNode, public AudioDeviceRenderCallback
{
protected:
// AudioNode interface
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
AudioContext * m_context;
// Platform specific implementation
std::unique_ptr<AudioDevice> m_platformAudioDevice;
AudioHardwareInput * m_audioHardwareInput{nullptr};
SamplingInfo last_info;
const AudioStreamConfig outConfig;
const AudioStreamConfig inConfig;
public:
AudioHardwareDeviceNode(AudioContext & context,
const AudioStreamConfig & outputConfig, const AudioStreamConfig & inputConfig);
virtual ~AudioHardwareDeviceNode();
static const char* static_name() { return "AudioHardwareDevice"; }
virtual const char* name() const override { return static_name(); }
// AudioNode interface
virtual void process(ContextRenderLock &, int bufferSize) override {} // AudioHardwareDeviceNode is pulled by hardware so this is never called
virtual void reset(ContextRenderLock &) override;
virtual void initialize() override;
virtual void uninitialize() override;
virtual void backendReinitialize();
// AudioDeviceRenderCallback interface
virtual void render(AudioBus * src, AudioBus * dst, int frames, const SamplingInfo & info) override;
virtual void start() override;
virtual void stop() override;
virtual bool isRunning() const override;
virtual const SamplingInfo & getSamplingInfo() const override;
virtual const AudioStreamConfig & getOutputConfig() const override;
virtual const AudioStreamConfig & getInputConfig() const override;
AudioSourceProvider * AudioHardwareInputProvider();
};
} // namespace lab
#endif // end audio_hardware_io_node_hpp

View File

@ -0,0 +1,39 @@
// License: BSD 2 Clause
// Copyright (C) 2012, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef audio_hardware_input_node
#define audio_hardware_input_node
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioSourceProvider.h"
namespace lab
{
class AudioContext;
class AudioHardwareInputNode : public AudioNode
{
// As an audio source, we will never propagate silence.
virtual bool propagatesSilence(ContextRenderLock & r) const override { return false; }
AudioSourceProvider * m_audioSourceProvider;
int m_sourceNumberOfChannels{0};
public:
AudioHardwareInputNode(AudioContext & ac, AudioSourceProvider * provider_from_context);
virtual ~AudioHardwareInputNode();
static const char* static_name() { return "AudioHardwareDevice"; }
virtual const char* name() const override { return static_name(); }
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
// AudioNode interface
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
};
} // namespace lab
#endif

View File

@ -0,0 +1,84 @@
// License: BSD 2 Clause
// Copyright (C) 2018, The LabSound Authors. All rights reserved.
#ifndef AudioListener_h
#define AudioListener_h
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/FloatPoint3D.h"
#include <memory>
namespace lab
{
class AudioParam;
// AudioListener maintains the state of the listener in the audio scene as defined in the OpenAL specification.
class AudioListener
{
std::shared_ptr<AudioParam> m_dopplerFactor;
std::shared_ptr<AudioParam> m_speedOfSound;
std::shared_ptr<AudioParam> m_forwardX;
std::shared_ptr<AudioParam> m_forwardY;
std::shared_ptr<AudioParam> m_forwardZ;
std::shared_ptr<AudioParam> m_upX;
std::shared_ptr<AudioParam> m_upY;
std::shared_ptr<AudioParam> m_upZ;
std::shared_ptr<AudioParam> m_velocityX;
std::shared_ptr<AudioParam> m_velocityY;
std::shared_ptr<AudioParam> m_velocityZ;
std::shared_ptr<AudioParam> m_positionX;
std::shared_ptr<AudioParam> m_positionY;
std::shared_ptr<AudioParam> m_positionZ;
public:
AudioListener();
~AudioListener() = default;
// Position
void setPosition(float x, float y, float z) { setPosition({x, y, z}); }
void setPosition(const FloatPoint3D & position);
std::shared_ptr<AudioParam> positionX() const { return m_positionX; }
std::shared_ptr<AudioParam> positionY() const { return m_positionY; }
std::shared_ptr<AudioParam> positionZ() const { return m_positionZ; }
// Orientation
void setOrientation(float x, float y, float z, float upX, float upY, float upZ)
{
setForward(FloatPoint3D(x, y, z));
setUpVector(FloatPoint3D(upX, upY, upZ));
}
// Forward represents the horizontal position of the listener's forward
// direction in the same cartesian coordinate sytem as the position
// values. The forward and up values are linearly independent of each other.
void setForward(const FloatPoint3D & fwd);
std::shared_ptr<AudioParam> forwardX() const { return m_forwardX; }
std::shared_ptr<AudioParam> forwardY() const { return m_forwardY; }
std::shared_ptr<AudioParam> forwardZ() const { return m_forwardZ; }
// Up-vector
void setUpVector(const FloatPoint3D & upVector);
std::shared_ptr<AudioParam> upX() const { return m_upX; }
std::shared_ptr<AudioParam> upY() const { return m_upY; }
std::shared_ptr<AudioParam> upZ() const { return m_upZ; }
// Velocity
void setVelocity(float x, float y, float z) { setVelocity(FloatPoint3D(x, y, z)); }
void setVelocity(const FloatPoint3D & velocity);
std::shared_ptr<AudioParam> velocityX() const { return m_velocityX; }
std::shared_ptr<AudioParam> velocityY() const { return m_velocityY; }
std::shared_ptr<AudioParam> velocityZ() const { return m_velocityZ; }
// Doppler factor
void setDopplerFactor(float dopplerFactor) { m_dopplerFactor->setValue(dopplerFactor); }
std::shared_ptr<AudioParam> dopplerFactor() const { return m_dopplerFactor; }
// Speed of sound
void setSpeedOfSound(float speedOfSound) { m_speedOfSound->setValue(speedOfSound); }
std::shared_ptr<AudioParam> speedOfSound() const { return m_speedOfSound; }
};
} // lab
#endif // AudioListener_h

View File

@ -0,0 +1,268 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2020, The LabSound Authors. All rights reserved.
#pragma once
#ifndef AudioNode_h
#define AudioNode_h
#include "LabSound/core/Mixing.h"
#include "LabSound/core/Profiler.h"
#include <functional>
#include <memory>
#include <string>
#include <vector>
namespace lab
{
// clang-format off
enum PanningMode
{
PANNING_NONE = 0,
EQUALPOWER = 1,
HRTF = 2,
_PanningModeCount
};
enum FilterType
{
FILTER_NONE = 0,
LOWPASS = 1,
HIGHPASS = 2,
BANDPASS = 3,
LOWSHELF = 4,
HIGHSHELF = 5,
PEAKING = 6,
NOTCH = 7,
ALLPASS = 8,
_FilterTypeCount
};
enum OscillatorType
{
OSCILLATOR_NONE = 0,
SINE = 1,
FAST_SINE = 2,
SQUARE = 3,
SAWTOOTH = 4,
TRIANGLE = 5,
CUSTOM = 6,
_OscillatorTypeCount
};
enum class SchedulingState : int
{
UNSCHEDULED = 0, // Initial playback state. Created, but not yet scheduled
SCHEDULED, // Scheduled to play (via noteOn() or noteGrainOn()), but not yet playing
FADE_IN, // First epoch, fade in, then play
PLAYING, // Generating sound
STOPPING, // Transitioning to finished
RESETTING, // Node is resetting to initial, unscheduled state
FINISHING, // Playing has finished
FINISHED // Node has finished
};
const char* schedulingStateName(SchedulingState);
// clang-format on
class AudioContext;
class AudioNodeInput;
class AudioNodeOutput;
class AudioParam;
class AudioSetting;
class ContextGraphLock;
class ContextRenderLock;
class AudioNodeScheduler
{
public:
explicit AudioNodeScheduler() = delete;
explicit AudioNodeScheduler(float sampleRate);
~AudioNodeScheduler() = default;
// Scheduling.
void start(double when);
void stop(double when);
void finish(ContextRenderLock&); // Called when there is no more sound to play or the noteOff/stop() time has been reached.
void reset();
SchedulingState playbackState() const { return _playbackState; }
bool hasFinished() const { return _playbackState == SchedulingState::FINISHED; }
bool update(ContextRenderLock&, int epoch_length);
SchedulingState _playbackState = SchedulingState::UNSCHEDULED;
// epoch is a long count at sample rate; 136 years at 48kHz
// For use in an interstellar sound installation or radio frequency signal processing,
// please consider upgrading these to uint64_t or write some rollover logic.
uint64_t _epoch = 0; // the epoch rendered currently in the busses
uint64_t _epochLength = 0; // number of frames in current epoch
uint64_t _startWhen = std::numeric_limits<uint64_t>::max(); // requested start in epochal coordinate system
uint64_t _stopWhen = std::numeric_limits<uint64_t>::max(); // requested end in epochal coordinate system
int _renderOffset = 0; // where rendering starts in the current frame
int _renderLength = 0; // number of rendered frames in the current frame
float _sampleRate = 1;
std::function<void()> _onEnded;
std::function<void(double when)> _onStart;
};
// An AudioNode is the basic building block for handling audio within an AudioContext.
// It may be an audio source, an intermediate processing module, or an audio destination.
// Each AudioNode can have inputs and/or outputs.
// An AudioHardwareDeviceNode has one input and no outputs and represents the final destination to the audio hardware.
// Most processing nodes such as filters will have one input and one output, although multiple inputs and outputs are possible.
class AudioNode
{
public:
enum : int
{
ProcessingSizeInFrames = 128
};
AudioNode() = delete;
virtual ~AudioNode();
explicit AudioNode(AudioContext &);
//--------------------------------------------------
// required interface
//
virtual const char* name() const = 0;
// The AudioNodeInput(s) (if any) will already have their input data available when process() is called.
// Subclasses will take this input data and put the results in the AudioBus(s) of its AudioNodeOutput(s) (if any).
// Called from context's audio thread.
virtual void process(ContextRenderLock &, int bufferSize) = 0;
// Resets DSP processing state (clears delay lines, filter memory, etc.)
// Called from context's audio thread.
virtual void reset(ContextRenderLock &) = 0;
// tailTime() is the length of time (not counting latency time) where non-zero output may occur after continuous silent input.
virtual double tailTime(ContextRenderLock & r) const = 0;
// latencyTime() is the length of time it takes for non-zero output to appear after non-zero input is provided. This only applies to
// processing delay which is an artifact of the processing algorithm chosen and is *not* part of the intrinsic desired effect. For
// example, a "delay" effect is expected to delay the signal, and thus would not be considered latency.
virtual double latencyTime(ContextRenderLock & r) const = 0;
//--------------------------------------------------
// required interface
//
// If the final node class has ScheduledNode in its class hierarchy, this will return true.
// This is to save the cost of a dynamic_cast when scheduling nodes.
virtual bool isScheduledNode() const { return false; }
// No significant resources should be allocated until initialize() is called.
// Processing may not occur until a node is initialized.
virtual void initialize();
virtual void uninitialize();
bool isInitialized() const { return m_isInitialized; }
// These locked versions can be called at run time.
void addInput(ContextGraphLock&, std::unique_ptr<AudioNodeInput> input);
void addOutput(ContextGraphLock&, std::unique_ptr<AudioNodeOutput> output);
int numberOfInputs() const { return static_cast<int>(m_inputs.size()); }
int numberOfOutputs() const { return static_cast<int>(m_outputs.size()); }
std::shared_ptr<AudioNodeInput> input(int index);
std::shared_ptr<AudioNodeOutput> output(int index);
std::shared_ptr<AudioNodeOutput> output(char const* const str);
// processIfNecessary() is called by our output(s) when the rendering graph needs this AudioNode to process.
// This method ensures that the AudioNode will only process once per rendering time quantum even if it's called repeatedly.
// This handles the case of "fanout" where an output is connected to multiple AudioNode inputs.
// Called from context's audio thread.
void processIfNecessary(ContextRenderLock & r, int bufferSize);
// Called when a new connection has been made to one of our inputs or the connection number of channels has changed.
// This potentially gives us enough information to perform a lazy initialization or, if necessary, a re-initialization.
// Called from main thread.
virtual void checkNumberOfChannelsForInput(ContextRenderLock &, AudioNodeInput *);
virtual void conformChannelCounts();
// propagatesSilence() should return true if the node will generate silent output when given silent input. By default, AudioNode
// will take tailTime() and latencyTime() into account when determining whether the node will propagate silence.
virtual bool propagatesSilence(ContextRenderLock & r) const;
bool inputsAreSilent(ContextRenderLock &);
void silenceOutputs(ContextRenderLock &);
void unsilenceOutputs(ContextRenderLock &);
int channelCount();
void setChannelCount(ContextGraphLock & g, int channelCount);
ChannelCountMode channelCountMode() const { return m_channelCountMode; }
void setChannelCountMode(ContextGraphLock & g, ChannelCountMode mode);
ChannelInterpretation channelInterpretation() const { return m_channelInterpretation; }
void setChannelInterpretation(ChannelInterpretation interpretation) { m_channelInterpretation = interpretation; }
// returns a vector of parameter names
std::vector<std::string> paramNames() const;
std::vector<std::string> paramShortNames() const;
// returns a vector of setting names
std::vector<std::string> settingNames() const;
std::vector<std::string> settingShortNames() const;
std::shared_ptr<AudioParam> param(char const * const str);
std::shared_ptr<AudioSetting> setting(char const * const str);
std::vector<std::shared_ptr<AudioParam>> params() const { return m_params; }
std::vector<std::shared_ptr<AudioSetting>> settings() const { return m_settings; }
AudioNodeScheduler _scheduler;
ProfileSample graphTime; // how much time the node spend pulling inputs
ProfileSample totalTime; // total time spent by the node. total-graph is the self time.
protected:
// Inputs and outputs must be created before the AudioNode is initialized.
// It is only legal to call this during a constructor.
void addInput(std::unique_ptr<AudioNodeInput> input);
void addOutput(std::unique_ptr<AudioNodeOutput> output);
// Called by processIfNecessary() to cause all parts of the rendering graph connected to us to process.
// Each rendering quantum, the audio data for each of the AudioNode's inputs will be available after this method is called.
// Called from context's audio thread.
void pullInputs(ContextRenderLock &, int bufferSize);
friend class AudioContext;
bool m_isInitialized {false};
std::vector<std::shared_ptr<AudioNodeInput>> m_inputs;
std::vector<std::shared_ptr<AudioNodeOutput>> m_outputs;
std::vector<std::shared_ptr<AudioParam>> m_params;
std::vector<std::shared_ptr<AudioSetting>> m_settings;
int m_channelCount{ 0 };
ChannelCountMode m_channelCountMode{ ChannelCountMode::Max };
ChannelInterpretation m_channelInterpretation{ ChannelInterpretation::Speakers };
// starts an immediate ramp to zero in preparation for disconnection
void scheduleDisconnect() { _scheduler.stop(0); }
// returns true if the disconnection ramp has reached zero.
// This is intended to allow the AudioContext to manage popping artifacts
bool disconnectionReady() const { return _scheduler._playbackState != SchedulingState::PLAYING; }
};
} // namespace lab
#endif // AudioNode_h

View File

@ -0,0 +1,60 @@
// License: BSD 2 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioNodeInput_h
#define AudioNodeInput_h
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioSummingJunction.h"
#include <set>
namespace lab
{
class AudioNode;
class AudioNodeOutput;
class AudioBus;
// An AudioNodeInput represents an input to an AudioNode and can be connected from one or more AudioNodeOutputs.
// In the case of multiple connections, the input will act as a unity-gain summing junction, mixing all the outputs.
// The number of channels of the input's bus is the maximum of the number of channels of all its connections.
class AudioNodeInput : public AudioSummingJunction
{
AudioNode * m_destinationNode;
std::unique_ptr<AudioBus> m_internalSummingBus;
public:
explicit AudioNodeInput(AudioNode * audioNode, int processingSizeInFrames = AudioNode::ProcessingSizeInFrames);
virtual ~AudioNodeInput();
// Can be called from any thread.
AudioNode * destinationNode() const { return m_destinationNode; }
// Must be called with the context's graph lock. Static because a shared pointer to this is required
static void connect(ContextGraphLock &, std::shared_ptr<AudioNodeInput> fromInput, std::shared_ptr<AudioNodeOutput> toOutput);
static void disconnect(ContextGraphLock &, std::shared_ptr<AudioNodeInput> fromInput, std::shared_ptr<AudioNodeOutput> toOutput);
static void disconnectAll(ContextGraphLock&, std::shared_ptr<AudioNodeInput> fromInput);
// pull() processes all of the AudioNodes connected to this NodeInput.
// In the case of multiple connections, the result is summed onto the internal summing bus.
// In the single connection case, it allows in-place processing where possible using inPlaceBus.
// It returns the bus which it rendered into, returning inPlaceBus if in-place processing was performed.
AudioBus * pull(ContextRenderLock &, AudioBus * inPlaceBus, int bufferSize);
// bus() contains the rendered audio after pull() has been called for each time quantum.
AudioBus * bus(ContextRenderLock &);
// updateInternalBus() updates m_internalSummingBus appropriately for the number of channels.
// This must be called when we own the context's graph lock in the audio thread at the very start or end of the render quantum.
void updateInternalBus(ContextRenderLock &);
// The number of channels of the connection with the largest number of channels.
// Only valid during render quantum because it is dependent on the active bus
int numberOfChannels(ContextRenderLock &) const;
};
} // namespace lab
#endif // AudioNodeInput_h

View File

@ -0,0 +1,123 @@
// License: BSD 2 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioNodeOutput_h
#define AudioNodeOutput_h
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
#include <set>
namespace lab
{
class AudioContext;
class AudioNodeInput;
class AudioBus;
// AudioNodeOutput represents a single output for an AudioNode.
// It may be connected to one or more AudioNodeInputs.
class AudioNodeOutput
{
public:
// It's OK to pass 0 for numberOfChannels in which case setNumberOfChannels() must be called later on.
AudioNodeOutput(AudioNode * audioNode, int numberOfChannels, int processingSizeInFrames = AudioNode::ProcessingSizeInFrames);
AudioNodeOutput(AudioNode * audioNode, char const*const name, int numberOfChannels, int processingSizeInFrames = AudioNode::ProcessingSizeInFrames);
virtual ~AudioNodeOutput();
// Can be called from any thread.
AudioNode * sourceNode() const { return m_sourceNode; }
// Causes our AudioNode to process if it hasn't already for this render quantum.
// It returns the bus containing the processed audio for this output, returning inPlaceBus if in-place processing was possible.
// Called from context's audio thread.
AudioBus * pull(ContextRenderLock &, AudioBus * inPlaceBus, int bufferSize);
// bus() will contain the rendered audio after pull() is called for each rendering time quantum.
AudioBus * bus(ContextRenderLock &) const;
// renderingFanOutCount() is the number of AudioNodeInputs that we're connected to during rendering.
// Unlike fanOutCount() it will not change during the course of a render quantum.
int renderingFanOutCount() const;
// renderingParamFanOutCount() is the number of AudioParams that we're connected to during rendering.
// Unlike paramFanOutCount() it will not change during the course of a render quantum.
int renderingParamFanOutCount() const;
void setNumberOfChannels(ContextRenderLock &, int);
int numberOfChannels() const { return m_numberOfChannels; }
bool isChannelCountKnown() const { return numberOfChannels() > 0; }
bool isConnected() { return fanOutCount() > 0 || paramFanOutCount() > 0; }
// updateRenderingState() is called in the audio thread at the start or end of the render quantum to handle any recent changes to the graph state.
void updateRenderingState(ContextRenderLock &);
const std::string& name() const { return m_name; }
// Must be called within the context's graph lock.
static void disconnectAll(ContextGraphLock &, std::shared_ptr<AudioNodeOutput>);
static void disconnectAllInputs(ContextGraphLock &, std::shared_ptr<AudioNodeOutput>);
static void disconnectAllParams(ContextGraphLock &, std::shared_ptr<AudioNodeOutput>);
private:
AudioNode * m_sourceNode;
friend class AudioNodeInput;
friend class AudioParam;
// These are called from AudioNodeInput.
// They must be called with the context's graph lock.
void addInput(ContextGraphLock & g, std::shared_ptr<AudioNodeInput>);
void removeInput(ContextGraphLock & g, std::shared_ptr<AudioNodeInput>);
void addParam(ContextGraphLock & g, std::shared_ptr<AudioParam>);
void removeParam(ContextGraphLock & g, std::shared_ptr<AudioParam>);
// fanOutCount() is the number of AudioNodeInputs that we're connected to.
// This method should not be called in audio thread rendering code, instead renderingFanOutCount() should be used.
// It must be called with the context's graph lock.
int fanOutCount();
// Similar to fanOutCount(), paramFanOutCount() is the number of AudioParams that we're connected to.
// This method should not be called in audio thread rendering code, instead renderingParamFanOutCount() should be used.
// It must be called with the context's graph lock.
int paramFanOutCount();
// updateInternalBus() updates m_internalBus appropriately for the number of channels.
// It is called in the constructor or in the audio thread with the context's graph lock.
void updateInternalBus();
// Announce to any nodes we're connected to that we changed our channel count for its input.
void propagateChannelCount(ContextRenderLock &);
std::string m_name;
// m_numberOfChannels will only be changed in the audio thread.
// The main thread sets m_desiredNumberOfChannels which will later get picked up in the audio thread
int m_numberOfChannels;
int m_desiredNumberOfChannels;
// m_internalBus and m_inPlaceBus must only be changed in the audio thread with the context's render lock (or constructor).
std::unique_ptr<AudioBus> m_internalBus;
// Temporary, during render quantum
// @tofix - Should this be some kind of shared pointer? It is only valid for a single render quantum, so probably no.
AudioBus * m_inPlaceBus;
std::vector<std::shared_ptr<AudioNodeInput>> m_inputs;
// For the purposes of rendering, keeps track of the number of inputs and AudioParams we're connected to.
// These value should only be changed at the very start or end of the rendering quantum.
int m_renderingFanOutCount;
int m_renderingParamFanOutCount;
// connected params
std::set<std::shared_ptr<AudioParam>> m_params;
typedef std::set<AudioParam *>::iterator ParamsIterator;
};
} // namespace lab
#endif // AudioNodeOutput_h

View File

@ -0,0 +1,104 @@
// License: BSD 3 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioParam_h
#define AudioParam_h
#include "LabSound/core/AudioContext.h"
#include "LabSound/core/AudioParamTimeline.h"
#include "LabSound/core/AudioSummingJunction.h"
#include <string>
#include <sys/types.h>
namespace lab
{
class AudioNodeOutput;
class AudioParam : public AudioSummingJunction
{
public:
static const double DefaultSmoothingConstant;
static const double SnapThreshold;
AudioParam(const std::string & name, const std::string & short_name, double defaultValue, double minValue, double maxValue, unsigned units = 0);
virtual ~AudioParam();
// Intrinsic value.
float value() const;
void setValue(float);
// Final value for k-rate parameters, otherwise use calculateSampleAccurateValues() for a-rate.
float finalValue(ContextRenderLock &);
std::string name() const { return m_name; }
std::string shortName() const { return m_shortName; }
float minValue() const { return static_cast<float>(m_minValue); }
float maxValue() const { return static_cast<float>(m_maxValue); }
float defaultValue() const { return static_cast<float>(m_defaultValue); }
unsigned units() const { return m_units; }
// Value smoothing:
// When a new value is set with setValue(), in our internal use of the parameter we don't immediately jump to it.
// Instead we smoothly approach this value to avoid glitching.
float smoothedValue();
// Smoothly exponentially approaches to (de-zippers) the desired value.
// Returns true if smoothed value has already snapped exactly to value.
bool smooth(ContextRenderLock &);
void resetSmoothedValue() { m_smoothedValue = m_value; }
void setSmoothingConstant(double k) { m_smoothingConstant = k; }
// Parameter automation.
// Time is a double representing the time (in seconds) after the AudioContext was first created that the change in value will happen
// Returns a reference for chaining calls.
AudioParam & setValueAtTime(float value, float time) { m_timeline.setValueAtTime(value, time); return *this; }
AudioParam & linearRampToValueAtTime(float value, float time) { m_timeline.linearRampToValueAtTime(value, time); return *this; }
AudioParam & exponentialRampToValueAtTime(float value, float time) { m_timeline.exponentialRampToValueAtTime(value, time); return *this; }
AudioParam & setTargetAtTime(float target, float time, float timeConstant) { m_timeline.setTargetAtTime(target, time, timeConstant); return *this; }
AudioParam & setValueCurveAtTime(std::vector<float> curve, float time, float duration) { m_timeline.setValueCurveAtTime(curve, time, duration); return *this; }
AudioParam & cancelScheduledValues(float startTime) { m_timeline.cancelScheduledValues(startTime); return *this; }
bool hasSampleAccurateValues() { return m_timeline.hasValues() || numberOfConnections(); }
// Calculates numberOfValues parameter values starting at the context's current time.
// Must be called in the context's render thread.
void calculateSampleAccurateValues(ContextRenderLock &, float * values, int numberOfValues);
AudioBus const* const bus() const;
// Connect an audio-rate signal to control this parameter.
static void connect(ContextGraphLock & g, std::shared_ptr<AudioParam>, std::shared_ptr<AudioNodeOutput>);
static void disconnect(ContextGraphLock & g, std::shared_ptr<AudioParam>, std::shared_ptr<AudioNodeOutput>);
static void disconnectAll(ContextGraphLock & g, std::shared_ptr<AudioParam>);
private:
// sampleAccurate corresponds to a-rate (audio rate) vs. k-rate in the Web Audio specification.
void calculateFinalValues(ContextRenderLock & r, float * values, int numberOfValues, bool sampleAccurate);
void calculateTimelineValues(ContextRenderLock & r, float * values, int numberOfValues);
std::string m_name;
std::string m_shortName;
double m_value;
double m_defaultValue;
double m_minValue;
double m_maxValue;
unsigned m_units;
// Smoothing (de-zippering)
double m_smoothedValue;
double m_smoothingConstant;
AudioParamTimeline m_timeline;
std::unique_ptr<AudioBus> m_internalSummingBus;
};
} // namespace lab
#endif // AudioParam_h

View File

@ -0,0 +1,117 @@
// License: BSD 3 Clause
// Copyright (C) 2011, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioParamTimeline_h
#define AudioParamTimeline_h
#include "LabSound/core/AudioContext.h"
#include <mutex>
#include <vector>
namespace lab
{
class AudioParamTimeline
{
public:
AudioParamTimeline() {}
void setValueAtTime(float value, float time);
void linearRampToValueAtTime(float value, float time);
void exponentialRampToValueAtTime(float value, float time);
void setTargetAtTime(float target, float time, float timeConstant);
void setValueCurveAtTime(std::vector<float> & curve, float time, float duration);
void cancelScheduledValues(float startTime);
// hasValue is set to true if a valid timeline value is returned.
// otherwise defaultValue is returned.
float valueForContextTime(ContextRenderLock &, float defaultValue, bool & hasValue);
// Given the time range, calculates parameter values into the values buffer
// and returns the last parameter value calculated for "values" or the defaultValue if none were calculated.
// controlRate is the rate (number per second) at which parameter values will be calculated.
// It should equal sampleRate for sample-accurate parameter changes, and otherwise will usually match
// the render quantum size such that the parameter value changes once per render quantum.
float valuesForTimeRange(double startTime, double endTime, float defaultValue,
float * values, size_t numberOfValues, double sampleRate, double controlRate);
bool hasValues() { return m_events.size() > 0; }
private:
// @tofix - move to implementation file to hide from public API
class ParamEvent
{
public:
enum Type
{
SetValue,
LinearRampToValue,
ExponentialRampToValue,
SetTarget,
SetValueCurve,
LastType
};
ParamEvent(Type type, float value, float time, float timeConstant, float duration, std::vector<float> curve)
: m_type(type)
, m_value(value)
, m_time(time)
, m_timeConstant(timeConstant)
, m_duration(duration)
, m_curve(curve)
{
}
ParamEvent(const ParamEvent & rhs)
: m_type(rhs.m_type)
, m_value(rhs.m_value)
, m_time(rhs.m_time)
, m_timeConstant(rhs.m_timeConstant)
, m_duration(rhs.m_duration)
, m_curve(rhs.m_curve)
{
}
const ParamEvent & operator=(const ParamEvent & rhs)
{
m_type = rhs.m_type;
m_value = rhs.m_value;
m_time = rhs.m_time;
m_timeConstant = rhs.m_timeConstant;
m_duration = rhs.m_duration;
m_curve = rhs.m_curve;
return *this;
}
unsigned type() const { return m_type; }
float value() const { return m_value; }
float time() const { return m_time; }
float timeConstant() const { return m_timeConstant; }
float duration() const { return m_duration; }
std::vector<float> & curve() { return m_curve; }
private:
unsigned m_type;
float m_value;
float m_time;
float m_timeConstant;
float m_duration;
std::vector<float> m_curve;
};
void insertEvent(const ParamEvent &);
float valuesForTimeRangeImpl(double startTime, double endTime, float defaultValue,
float * values, size_t numberOfValues, double sampleRate, double controlRate);
std::vector<ParamEvent> m_events;
};
} // namespace lab
#endif // AudioParamTimeline_h

View File

@ -0,0 +1,48 @@
// License: BSD 3 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioProcessor_h
#define AudioProcessor_h
#include <stddef.h>
namespace lab
{
class AudioBus;
class ContextRenderLock;
// AudioProcessor is an abstract base class representing an audio signal processing object with a single input and a single output,
// where the number of input channels equals the number of output channels. It can be used as one part of a complex DSP algorithm,
// or as the processor for a basic (one input - one output) AudioNode.
class AudioProcessor
{
public:
AudioProcessor()
{
}
virtual ~AudioProcessor() {}
// Full initialization can be done here instead of in the constructor.
virtual void initialize() = 0;
virtual void uninitialize() = 0;
// Processes the source to destination bus. The number of channels must match in source and destination.
virtual void process(ContextRenderLock &, const AudioBus * source, AudioBus * destination, int bufferSize) = 0;
// Resets filter state
virtual void reset() = 0;
bool isInitialized() const { return m_initialized; }
virtual double tailTime(ContextRenderLock & r) const = 0;
virtual double latencyTime(ContextRenderLock & r) const = 0;
protected:
bool m_initialized = false;
};
} // namespace lab
#endif // AudioProcessor_h

View File

@ -0,0 +1,49 @@
// License: BSD 3 Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioScheduledSourceNode_h
#define AudioScheduledSourceNode_h
#include "LabSound/core/AudioNode.h"
namespace lab
{
/*
AudioScheduledSourceNode adds a scheduling interface to
AudioNode. The scheduler is in the base class, but only nodes
derived from AudioScheduledSourceNode can be scheduled.
*/
class AudioScheduledSourceNode : public AudioNode
{
public:
AudioScheduledSourceNode() = delete;
explicit AudioScheduledSourceNode(AudioContext & ac) : AudioNode(ac) { }
virtual ~AudioScheduledSourceNode() = default;
virtual bool isScheduledNode() const override { return true; }
bool isPlayingOrScheduled() const
{
return _scheduler._playbackState >= SchedulingState::SCHEDULED &&
_scheduler._playbackState <= SchedulingState::STOPPING;
}
// Start time, measured as seconds from the current epochal time
void start(float when) { _scheduler.start(when); }
uint64_t startWhen() const { return _scheduler._startWhen; }
// Stop time, measured as seconds from the current epochal time
void stop(float when) { _scheduler.stop(when); }
SchedulingState playbackState() const { return _scheduler._playbackState; }
bool hasFinished() const { return _scheduler.hasFinished(); }
void setOnEnded(std::function<void()> fn) { _scheduler._onEnded = fn; }
};
} // namespace lab
#endif // AudioScheduledSourceNode_h

View File

@ -0,0 +1,158 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015, The LabSound Authors. All rights reserved.
#ifndef AudioSetting_h
#define AudioSetting_h
#include "LabSound/core/AudioBus.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <vector>
namespace lab
{
// value defaults to zero, assumed set as integer.
// floatAssigned() is provided so that a user interface or RPC binding can configure
// automatically for float or unsigned integer values.
//
// a value changed callback is provided so that if tools set values, they can
// be responded to.
class AudioSetting
{
public:
enum class Type
{
None,
Bool,
Integer,
Float,
Enumeration,
Bus
};
private:
std::string _name;
std::string _shortName;
Type _type;
float _valf = 0;
uint32_t _vali = 0;
bool _valb = false;
std::shared_ptr<AudioBus> _valBus;
std::function<void()> _valueChanged;
char const * const * _enums = nullptr;
public:
explicit AudioSetting(const std::string & n, const std::string & sn, Type t)
: _name(n)
, _shortName(sn)
, _type(t)
{
}
explicit AudioSetting(char const * const n, char const * const sn, Type t)
: _name(n)
, _shortName(sn)
, _type(t)
{
}
explicit AudioSetting(const std::string & n, const std::string & sn, char const * const * enums)
: _name(n)
, _shortName(sn)
, _type(Type::Enumeration)
, _enums(enums)
{
}
explicit AudioSetting(char const * const n, char const * const sn, char const * const * enums)
: _name(n)
, _shortName(sn)
, _type(Type::Enumeration)
, _enums(enums)
{
}
std::string name() const { return _name; }
std::string shortName() const { return _shortName; }
Type type() const { return _type; }
char const * const * enums() const { return _enums; }
int enumFromName(char const* const e)
{
if (!_enums || !e)
return -1;
int enum_idx = 0;
for (char const* const* names_p = _enums; *names_p != nullptr; ++names_p, ++enum_idx)
{
if (!strcmp(e, *names_p))
return enum_idx;
}
return -1;
}
bool valueBool() const { return _valb; }
float valueFloat() const { return _valf; }
uint32_t valueUint32() const { return _vali; }
std::shared_ptr<AudioBus> valueBus() const { return _valBus; }
void setBool(bool v, bool notify = true)
{
if (v == _valb) return;
_valb = v;
if (notify && _valueChanged) _valueChanged();
}
void setFloat(float v, bool notify = true)
{
if (v == _valf) return;
_valf = v;
if (notify && _valueChanged) _valueChanged();
}
void setUint32(uint32_t v, bool notify = true)
{
if (v == _vali) return;
_vali = v;
if (notify && _valueChanged) _valueChanged();
}
void setEnumeration(int v, bool notify = true)
{
if (v == _vali) return;
if (v < 0) return;
_vali = static_cast<int>(v);
if (notify && _valueChanged) _valueChanged();
}
void setEnumeration(char const*const v, bool notify = true)
{
if (v) setEnumeration(enumFromName(v), notify);
}
void setString(char const*const, bool notify = true)
{
if (notify && _valueChanged) _valueChanged();
}
// nb: Invoking setBus will create and cache a duplicate of the supplied bus.
void setBus(const AudioBus * incoming, bool notify = true)
{
std::unique_ptr<AudioBus> new_bus = AudioBus::createByCloning(incoming);
_valBus = std::move(new_bus);
if (notify && _valueChanged)
_valueChanged();
}
void setValueChanged(std::function<void()> fn) { _valueChanged = fn; }
};
} // namespace lab
#endif

View File

@ -0,0 +1,62 @@
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioSourceProvider_h
#define AudioSourceProvider_h
#include "LabSound/extended/AudioContextLock.h"
#include "LabSound/core/AudioBus.h"
namespace lab
{
class AudioBus;
/////////////////////////////
// AudioSourceProvider //
/////////////////////////////
// Abstract base-class for a pull-model client.
// provideInput() gets called repeatedly to render time-slices of a continuous audio stream.
struct AudioSourceProvider
{
virtual void provideInput(AudioBus * bus, int bufferSize) = 0;
virtual ~AudioSourceProvider() {}
};
////////////////////////////
// AudioHardwareInput //
////////////////////////////
// AudioHardwareInput allows us to expose an AudioSourceProvider for local/live audio input.
// If there is local/live audio input, we call set() with the audio input data every render quantum.
// `set()` is called in ... which is one or two frames above the actual hardware io.
class AudioHardwareInput : public AudioSourceProvider
{
AudioBus m_sourceBus;
public:
AudioHardwareInput(int channelCount)
: m_sourceBus(channelCount, AudioNode::ProcessingSizeInFrames)
{
}
virtual ~AudioHardwareInput() {}
void set(AudioBus * bus)
{
if (bus) m_sourceBus.copyFrom(*bus);
}
// Satisfy the AudioSourceProvider interface
virtual void provideInput(AudioBus * destinationBus, int bufferSize)
{
bool isGood = destinationBus && destinationBus->length() == bufferSize && m_sourceBus.length() == bufferSize;
//ASSERT(isGood);
if (isGood) destinationBus->copyFrom(m_sourceBus);
}
};
} // lab
#endif // AudioSourceProvider_h

View File

@ -0,0 +1,78 @@
// License: BSD 2 Clause
// Copyright (C) 2012, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioSummingJunction_h
#define AudioSummingJunction_h
#include <memory>
#include <vector>
namespace lab
{
class AudioContext;
class AudioNodeOutput;
class AudioBus;
class ContextGraphLock;
class ContextRenderLock;
// An AudioSummingJunction represents a point where zero, one, or more AudioNodeOutputs connect.
class AudioSummingJunction
{
public:
explicit AudioSummingJunction();
virtual ~AudioSummingJunction();
// This must be called whenever we modify m_outputs.
void changedOutputs(ContextGraphLock &);
// This copies m_outputs to m_renderingOutputs. See comments for these lists below.
void updateRenderingState(ContextRenderLock & r);
// will count expired pointers
int numberOfConnections() const { return static_cast<int>(m_connectedOutputs.size()); }
// Rendering code accesses its version of the current connections here.
int numberOfRenderingConnections(ContextRenderLock &) const;
std::shared_ptr<AudioNodeOutput> renderingOutput(ContextRenderLock &, int i)
{
return i < m_renderingOutputs.size() ? m_renderingOutputs[i].lock() : nullptr;
}
const std::shared_ptr<AudioNodeOutput> renderingOutput(ContextRenderLock &, int i) const
{
return i < m_renderingOutputs.size() ? m_renderingOutputs[i].lock() : nullptr;
}
bool isConnected() const { return numberOfConnections() > 0; }
void junctionConnectOutput(std::shared_ptr<AudioNodeOutput>);
void junctionDisconnectOutput(std::shared_ptr<AudioNodeOutput>);
void junctionDisconnectAllOutputs();
void setDirty() { m_renderingStateNeedUpdating = true; }
static void handleDirtyAudioSummingJunctions(ContextRenderLock & r);
bool isConnected(std::shared_ptr<AudioNodeOutput> o) const;
protected:
// m_outputs contains the AudioNodeOutputs representing current connections.
// The rendering code should never use this directly, but instead uses m_renderingOutputs.
std::vector<std::weak_ptr<AudioNodeOutput>> m_connectedOutputs;
// m_renderingOutputs is a copy of m_connectedOutputs which will never be modified during the graph rendering on the audio thread.
// This is the list which is used by the rendering code.
// Whenever m_outputs is modified, the context is told so it can later update m_renderingOutputs from m_outputs at a safe time.
// Most of the time, m_renderingOutputs is identical to m_outputs.
std::vector<std::weak_ptr<AudioNodeOutput>> m_renderingOutputs;
// m_renderingStateNeedUpdating indicates outputs were changed
bool m_renderingStateNeedUpdating;
};
} // namespace lab
#endif // AudioSummingJunction_h

View File

@ -0,0 +1,43 @@
// License: BSD 2 Clause
// Copyright (C) 2011, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef BiquadFilterNode_h
#define BiquadFilterNode_h
#include "LabSound/core/AudioBasicProcessorNode.h"
namespace lab
{
class AudioParam;
class Biquad;
class BiquadFilterNode : public AudioBasicProcessorNode
{
class BiquadFilterNodeInternal;
BiquadFilterNodeInternal * biquad_impl;
public:
BiquadFilterNode(AudioContext& ac);
virtual ~BiquadFilterNode();
static const char* static_name() { return "BiquadFilter"; }
virtual const char* name() const override { return static_name(); }
FilterType type() const;
void setType(FilterType type);
std::shared_ptr<AudioParam> frequency();
std::shared_ptr<AudioParam> q();
std::shared_ptr<AudioParam> gain();
std::shared_ptr<AudioParam> detune();
// Get the magnitude and phase response of the filter at the given
// set of frequencies (in Hz). The phase response is in radians.
void getFrequencyResponse(ContextRenderLock &, const std::vector<float> & frequencyHz, std::vector<float> & magResponse, std::vector<float> & phaseResponse);
};
} // namespace lab
#endif // BiquadFilterNode_h

View File

@ -0,0 +1,40 @@
// License: BSD 3 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef ChannelMergerNode_h
#define ChannelMergerNode_h
#include "LabSound/core/AudioNode.h"
namespace lab
{
class AudioContext;
class ChannelMergerNode : public AudioNode
{
public:
ChannelMergerNode(AudioContext& ac, int numberOfInputs = 1);
virtual ~ChannelMergerNode() = default;
static const char* static_name(){ return "ChannelMerger"; }
virtual const char* name() const override { return static_name(); }
void addInputs(int n);
void setOutputChannelCount(int n) { m_desiredNumberOfOutputChannels = n; }
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock&) override {}
private:
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
int m_desiredNumberOfOutputChannels = 1; // default
};
} // namespace lab
#endif // ChannelMergerNode_h

View File

@ -0,0 +1,38 @@
// License: BSD 2 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef ChannelSplitterNode_h
#define ChannelSplitterNode_h
#include "LabSound/core/AudioNode.h"
namespace lab
{
class AudioContext;
class ChannelSplitterNode : public AudioNode
{
public:
ChannelSplitterNode(AudioContext& ac, int numberOfOutputs = 1);
virtual ~ChannelSplitterNode() = default;
static const char* static_name() { return "ChannelSplitter"; }
virtual const char* name() const override { return static_name(); }
void addOutputs(int numberOfOutputs);
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
private:
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
};
} // namespace lab
#endif // ChannelSplitterNode_h

View File

@ -0,0 +1,61 @@
// License: BSD 2 Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#include <condition_variable>
#include <mutex>
#include <queue>
namespace lab
{
template <typename Data>
class ConcurrentQueue
{
std::queue<Data> the_queue;
mutable std::mutex the_mutex;
std::condition_variable the_condition_variable;
public:
void push(Data const & data)
{
{
std::lock_guard<std::mutex> lock(the_mutex);
the_queue.push(data);
}
the_condition_variable.notify_one();
}
bool empty() const
{
std::lock_guard<std::mutex> lock(the_mutex);
return the_queue.empty();
}
bool try_pop(Data & popped_value)
{
std::lock_guard<std::mutex> lock(the_mutex);
if (the_queue.empty())
{
return false;
}
popped_value = the_queue.front();
the_queue.pop();
return true;
}
void wait_and_pop(Data & popped_value)
{
std::unique_lock<std::mutex> lock(the_mutex);
while (the_queue.empty())
{
the_condition_variable.wait(lock);
}
popped_value = the_queue.front();
the_queue.pop();
}
};
}

View File

@ -0,0 +1,76 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef ConvolverNode_h
#define ConvolverNode_h
#include "LabSound/core/AudioScheduledSourceNode.h"
#include "LabSound/core/AudioSetting.h"
#include <memory>
#include <mutex>
namespace lab
{
class AudioBus;
class AudioSetting;
class Reverb;
// private data for reverb computations
struct sp_data;
struct sp_conv;
struct sp_ftbl;
class ConvolverNode final : public AudioScheduledSourceNode
{
public:
ConvolverNode(AudioContext& ac);
virtual ~ConvolverNode();
static const char* static_name() { return "Convolver"; }
virtual const char* name() const override { return static_name(); }
bool normalize() const;
void setNormalize(bool new_n);
// set impulse will schedule the convolver to begin processing immediately
// The supplied bus is copied for use as an impulse response.
void setImpulse(std::shared_ptr<AudioBus> bus);
std::shared_ptr<AudioBus> getImpulse() const;
virtual void process(ContextRenderLock & r, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
protected:
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
virtual bool propagatesSilence(ContextRenderLock & r) const override;
double now() const { return _now; }
void _activateNewImpulse();
double _now = 0.0;
float _scale = 1.f; // normalization value
sp_data * _sp = nullptr;
// Normalize the impulse response or not. Must default to true.
std::shared_ptr<AudioSetting> _normalize;
std::shared_ptr<AudioSetting> _impulseResponseClip;
struct ReverbKernel
{
ReverbKernel() = default;
ReverbKernel(ReverbKernel && rh) noexcept;
~ReverbKernel();
sp_conv * conv = nullptr;
sp_ftbl * ft = nullptr;
};
std::vector<ReverbKernel> _kernels; // one per impulse response channel
std::vector<ReverbKernel> _pending_kernels; // new kernels when an impulse has been computed
bool _swap_ready;
std::mutex _kernel_mutex;
};
} // namespace lab
#endif // ConvolverNode_h

View File

@ -0,0 +1,54 @@
// License: BSD 2 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef DelayNode_h
#define DelayNode_h
#include "LabSound/core/AudioBasicProcessorNode.h"
#include "LabSound/core/Macros.h"
namespace lab
{
class AudioParam;
class DelayProcessor;
// TempoSync is commonly used by subclasses of DelayNode
enum TempoSync
{
TS_32,
TS_16T,
TS_32D,
TS_16,
TS_8T,
TS_16D,
TS_8,
TS_4T,
TS_8D,
TS_4,
TS_2T,
TS_4D,
TS_2,
TS_2D,
};
class DelayNode : public AudioBasicProcessorNode
{
DelayProcessor * delayProcessor();
public:
// default maximum delay of 100ms
DelayNode(AudioContext & ac, double maxDelayTime = 2.0);
virtual ~DelayNode() = default;
static const char* static_name() { return "Delay"; }
virtual const char* name() const override { return static_name(); }
std::shared_ptr<AudioSetting> delayTime();
};
} // namespace lab
#endif // DelayNode_h

View File

@ -0,0 +1,64 @@
// License: BSD 2 Clause
// Copyright (C) 2011, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef DynamicsCompressorNode_h
#define DynamicsCompressorNode_h
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
namespace lab
{
class DynamicsCompressor;
// Fun trick:
//
// source ---> gain(100) ---> dynamics_compressor ---> output
//
// sounds like an old radio
//
class DynamicsCompressorNode : public AudioNode
{
public:
DynamicsCompressorNode(AudioContext& ac);
virtual ~DynamicsCompressorNode();
static const char* static_name() { return "DynamicsCompressor"; }
virtual const char* name() const override { return static_name(); }
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
virtual void initialize() override;
virtual void uninitialize() override;
// Static compression curve parameters.
std::shared_ptr<AudioParam> threshold() { return m_threshold; }
std::shared_ptr<AudioParam> knee() { return m_knee; }
std::shared_ptr<AudioParam> ratio() { return m_ratio; }
std::shared_ptr<AudioParam> attack() { return m_attack; }
std::shared_ptr<AudioParam> release() { return m_release; }
// Amount by which the compressor is currently compressing the signal in decibels.
std::shared_ptr<AudioParam> reduction() { return m_reduction; }
private:
virtual double tailTime(ContextRenderLock & r) const override;
virtual double latencyTime(ContextRenderLock & r) const override;
std::unique_ptr<DynamicsCompressor> m_dynamicsCompressor;
std::shared_ptr<AudioParam> m_threshold;
std::shared_ptr<AudioParam> m_knee;
std::shared_ptr<AudioParam> m_ratio;
std::shared_ptr<AudioParam> m_reduction;
std::shared_ptr<AudioParam> m_attack;
std::shared_ptr<AudioParam> m_release;
};
} // namespace lab
#endif // DynamicsCompressorNode_h

View File

@ -0,0 +1,106 @@
#ifndef FLOAT_POINT_3D_H
#define FLOAT_POINT_3D_H
#include <float.h>
#include <math.h>
namespace lab
{
struct float3
{
float x {0.f}, y {0.f}, z {0.f};
float3(float x, float y, float z) : x(x), y(y), z(z) {};
float3() = default;
float & operator[](int i) { return (&x)[i]; }
const float & operator[](int i) const { return (&x)[i]; }
};
inline bool operator==(const float3 & a, const float3 & b)
{
return a.x == b.x && a.y == b.y && a.z == b.z;
}
inline bool operator!=(const float3 & a, const float3 & b)
{
return !(a == b);
}
inline float3 operator+(const float3 & a, const float3 & b)
{
return {a.x + b.x, a.y + b.y, a.z + b.z};
}
inline float3 operator-(const float3 & v)
{
return {-v.x, -v.y, -v.z};
}
inline float3 operator-(const float3 & a, const float3 & b)
{
return {a.x - b.x, a.y - b.y, a.z - b.z};
}
inline float3 operator*(const float3 & v, float s)
{
return {v.x * s, v.y * s, v.z * s};
}
inline float3 operator*(float s, const float3 & v)
{
return v * s;
}
inline float3 operator/(const float3 & v, float s)
{
return v * (1.0f / s);
}
inline float3 operator+=(float3 & a, const float3 & b)
{
return a = a + b;
}
inline float3 operator-=(float3 & a, const float3 & b)
{
return a = a - b;
}
inline float3 operator*=(float3 & v, const float & s)
{
return v = v * s;
}
inline float3 operator/=(float3 & v, const float & s)
{
return v = v / s;
}
inline float dot(const float3 & a, const float3 & b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
inline float3 cross(const float3 & a, const float3 & b)
{
return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x};
}
inline float magnitude(const float3 & v)
{
return sqrtf(dot(v, v));
}
inline float3 normalize(const float3 & v)
{
return v / magnitude(v);
}
inline bool is_zero(const float3 & v)
{
return fabsf(v.x) < FLT_EPSILON && fabsf(v.y) < FLT_EPSILON && fabsf(v.z) < FLT_EPSILON;
}
typedef float3 FloatPoint3D;
}
#endif

View File

@ -0,0 +1,50 @@
// License: BSD 2 Clause
// Copyright (C) 2011, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef GainNode_h
#define GainNode_h
#include "LabSound/core/AudioArray.h"
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
namespace lab
{
class AudioContext;
// GainNode is an AudioNode with one input and one output which applies a gain (volume) change to the audio signal.
// De-zippering (smoothing) is applied when the gain value is changed dynamically.
//
// params: gain
// settings:
//
class GainNode : public AudioNode
{
public:
GainNode(AudioContext& ac);
virtual ~GainNode();
static const char* static_name() { return "GainNode"; }
virtual const char* name() const override { return static_name(); }
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
std::shared_ptr<AudioParam> gain() const { return m_gain; }
protected:
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
float m_lastGain; // for de-zippering
std::shared_ptr<AudioParam> m_gain;
AudioFloatArray m_sampleAccurateGainValues;
};
} // namespace lab
#endif // GainNode_h

View File

@ -0,0 +1,71 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef labsound_macros_h
#define labsound_macros_h
#define LABSOUND_DEFAULT_SAMPLERATE 48000.0f
#define LABSOUND_DEFAULT_CHANNELS (uint32_t) lab::Channels::Stereo
#if (defined(__linux) || defined(__unix) || defined(__posix) || defined(__LINUX__) || defined(__linux__))
#define LABSOUND_PLATFORM_LINUX 1
#elif (defined(_WIN64) || defined(_WIN32) || defined(__CYGWIN32__) || defined(__MINGW32__))
#define LABSOUND_PLATFORM_WINDOWS 1
#elif (defined(MACOSX) || defined(__DARWIN__) || defined(__APPLE__))
#define LABSOUND_PLATFORM_OSX 1
#endif
#if (defined(WIN_32) || defined(__i386__) || defined(i386) || defined(__x86__))
#define LABSOUND_ARCH_32 1
#elif (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64__) || defined(_M_IA64))
#define LABSOUND_ARCH_64 1
#endif
#if (defined(__clang__))
#define LABSOUND_COMPILER_CLANG 1
#elif (defined(__GNUC__))
#define LABSOUND_COMPILER_GCC 1
#elif (defined _MSC_VER)
#define LABSOUND_COMPILER_VISUAL_STUDIO 1
#endif
#if ((_M_IX86_FP) && (_M_IX86_FP >= 2)) || (_M_AMD64) || defined(_M_X64)
#define __SSE2__
#endif
#if defined(LABSOUND_COMPILER_VISUAL_STUDIO)
#include <stdint.h>
#define _USE_MATH_DEFINES
#include <cmath>
#include <math.h>
#endif
#if defined(__ARM_NEON__)
#define ARM_NEON_INTRINSICS 1
#endif
#if defined(LABSOUND_PLATFORM_OSX) || defined(LABSOUND_PLATFORM_LINUX)
# include <cmath>
# include <math.h>
#endif
#if defined(LABSOUND_PLATFORM_WINDOWS)
#define USE_KISS_FFT
#endif
#define LAB_PI 3.1415926535897931
#define LAB_HALF_PI 1.5707963267948966
#define LAB_QUARTER_PI 0.7853981633974483
#define LAB_TWO_PI 6.2831853071795862
#define LAB_TAU 6.2831853071795862
#define LAB_INV_PI 0.3183098861837907
#define LAB_INV_TWO_PI 0.1591549430918953
#define LAB_INV_HALF_PI 0.6366197723675813
#define LAB_SQRT_2 1.4142135623730951
#define LAB_INV_SQRT_2 0.7071067811865475
#define LAB_LN_2 0.6931471805599453
#define LAB_INV_LN_2 1.4426950408889634
#define LAB_LN_10 2.3025850929940459
#define LAB_INV_LN_10 0.4342944819032517
#endif // end labsound_macros_h

View File

@ -0,0 +1,55 @@
// License: BSD 2 Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef labsound_mixing_h
#define labsound_mixing_h
#include <stdint.h>
namespace lab
{
enum class ChannelInterpretation
{
Speakers,
Discrete,
};
enum class Channel : int
{
First = 0,
Left = 0,
Right = 1,
Center = 2, // center and mono are the same
Mono = 2,
LFE = 3,
SurroundLeft = 4,
SurroundRight = 5,
BackLeft = 6,
BackRight = 7
};
namespace Channels
{
enum : uint32_t
{
Mono = 1,
Stereo = 2,
Quad = 4,
Surround_5_0 = 5,
Surround_5_1 = 6,
Surround_7_1 = 8
};
};
enum class ChannelCountMode
{
Max,
ClampedMax,
Explicit,
End
};
} // end namespace lab
#endif // end labsound_mixing_h

View File

@ -0,0 +1,71 @@
// License: BSD 2 Clause
// Copyright (C) 2011, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef lab_null_device_node_h
#define lab_null_device_node_h
#include "LabSound/core/AudioDevice.h"
#include "LabSound/core/AudioHardwareDeviceNode.h"
#include <atomic>
#include <memory>
#include <thread>
namespace lab
{
class AudioBus;
class AudioContext;
class NullDeviceNode final : public AudioNode, public AudioDeviceRenderCallback
{
std::unique_ptr<AudioBus> m_renderBus;
std::thread m_renderThread;
std::atomic<bool> shouldExit{false};
bool m_startedRendering{false};
uint32_t m_numChannels;
double m_lengthSeconds;
AudioContext * m_context;
AudioStreamConfig outConfig;
SamplingInfo info;
public:
NullDeviceNode(AudioContext & context,
const AudioStreamConfig & outputConfig, const double lengthSeconds);
virtual ~NullDeviceNode();
static const char* static_name() { return "NulLDevice"; }
virtual const char* name() const override { return static_name(); }
// AudioNode Interface
virtual void initialize() override;
virtual void uninitialize() override;
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
virtual void process(ContextRenderLock &, int bufferSize) override {} // NullDeviceNode is pulled by its own internal thread so this is never called
virtual void reset(ContextRenderLock &) override{}; // @fixme
// AudioDeviceRenderCallback interface
virtual void render(AudioBus * src, AudioBus * dst, int frames, const SamplingInfo & info) override final;
virtual void start() override final;
virtual void stop() override final;
virtual bool isRunning() const override final;
virtual const SamplingInfo & getSamplingInfo() const override final;
virtual const AudioStreamConfig & getOutputConfig() const override final;
virtual const AudioStreamConfig & getInputConfig() const override final;
void offlineRender();
void offlineRenderFrames(size_t framesToProcess);
};
} // namespace lab
#endif // end lab_null_device_node_h

View File

@ -0,0 +1,66 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef OscillatorNode_h
#define OscillatorNode_h
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/AudioScheduledSourceNode.h"
#include "LabSound/core/Macros.h"
#include "LabSound/core/WaveTable.h"
namespace lab
{
class AudioBus;
class AudioContext;
class AudioSetting;
// params: frequency, detune, amplitude, and bias
// settings: type
//
// @TODO add duty param for the square wave to complete the oscillator, default value is 0.5
class OscillatorNode : public AudioScheduledSourceNode
{
double phase = 0.0;
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
virtual bool propagatesSilence(ContextRenderLock & r) const override;
std::shared_ptr<AudioSetting> m_type;
public:
OscillatorNode(AudioContext& ac);
virtual ~OscillatorNode();
static const char* static_name() { return "Oscillator"; }
virtual const char* name() const override { return static_name(); }
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override { }
OscillatorType type() const;
void setType(OscillatorType type);
std::shared_ptr<AudioParam> amplitude() { return m_amplitude; }
std::shared_ptr<AudioParam> frequency() { return m_frequency; }
std::shared_ptr<AudioParam> detune() { return m_detune; }
std::shared_ptr<AudioParam> bias() { return m_bias; }
std::shared_ptr<AudioParam> m_amplitude; // default 1.0
std::shared_ptr<AudioParam> m_frequency; // hz
std::shared_ptr<AudioParam> m_bias; // default 0.0
std::shared_ptr<AudioParam> m_detune; // Detune value in Cents.
void process_oscillator(ContextRenderLock & r, int bufferSize, int offset, int count);
AudioFloatArray m_phaseIncrements;
AudioFloatArray m_biasValues;
AudioFloatArray m_detuneValues;
AudioFloatArray m_amplitudeValues;
};
} // namespace lab
#endif // OscillatorNode_h

View File

@ -0,0 +1,158 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef PannerNode_h
#define PannerNode_h
#include "LabSound/core/AudioListener.h"
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/FloatPoint3D.h"
#include "LabSound/core/Macros.h"
namespace lab
{
// PannerNode is an AudioNode with one input and one output.
// It positions a sound in 3D space, with the exact effect dependent on the panning model.
// It has a position and an orientation in 3D space which is relative to the position and orientation of the context's AudioListener.
// A distance effect will attenuate the gain as the position moves away from the listener.
// A cone effect will attenuate the gain as the orientation moves away from the listener.
// All of these effects follow the OpenAL specification very closely.
class AudioBus;
class ConeEffect;
class DistanceEffect;
class HRTFDatabaseLoader;
class Panner;
// params: orientation[XYZ], velocity[XYZ], position[XYZ]
// settings: distanceModel, refDistance, maxDistance, rolloffFactor,
// coneKInnerAngle, coneOuterAngle, panningMode
//
class PannerNode : public AudioNode
{
std::shared_ptr<AudioParam> m_orientationX;
std::shared_ptr<AudioParam> m_orientationY;
std::shared_ptr<AudioParam> m_orientationZ;
std::shared_ptr<AudioParam> m_velocityX;
std::shared_ptr<AudioParam> m_velocityY;
std::shared_ptr<AudioParam> m_velocityZ;
std::shared_ptr<AudioParam> m_positionX;
std::shared_ptr<AudioParam> m_positionY;
std::shared_ptr<AudioParam> m_positionZ;
std::shared_ptr<AudioParam> m_distanceGain;
std::shared_ptr<AudioParam> m_coneGain;
std::shared_ptr<AudioSetting> m_distanceModel;
std::shared_ptr<AudioSetting> m_refDistance;
std::shared_ptr<AudioSetting> m_maxDistance;
std::shared_ptr<AudioSetting> m_rolloffFactor;
std::shared_ptr<AudioSetting> m_coneInnerAngle;
std::shared_ptr<AudioSetting> m_coneOuterAngle;
std::shared_ptr<AudioSetting> m_panningModel;
public:
enum DistanceModel
{
LINEAR_DISTANCE = 0,
INVERSE_DISTANCE = 1,
EXPONENTIAL_DISTANCE = 2,
};
PannerNode(AudioContext & ac, const std::string & hrtf_root_dir_path = "");
virtual ~PannerNode();
static const char* static_name() { return "Panner"; }
virtual const char* name() const override { return static_name(); }
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
virtual void initialize() override;
virtual void uninitialize() override;
// Panning model
PanningMode panningModel() const;
void setPanningModel(PanningMode m);
// Position
void setPosition(float x, float y, float z) { setPosition(FloatPoint3D(x, y, z)); }
void setPosition(const FloatPoint3D & position);
std::shared_ptr<AudioParam> positionX() const { return m_positionX; }
std::shared_ptr<AudioParam> positionY() const { return m_positionY; }
std::shared_ptr<AudioParam> positionZ() const { return m_positionZ; }
// The orientation property indicates the X component of the direction in
// which the audio source is facing, in cartesian space. The complete
// vector is defined by the position of the audio source, and the direction
// in which it is facing.
void setOrientation(const FloatPoint3D & fwd);
std::shared_ptr<AudioParam> orientationX() const { return m_orientationX; }
std::shared_ptr<AudioParam> orientationY() const { return m_orientationY; }
std::shared_ptr<AudioParam> orientationZ() const { return m_orientationZ; }
// Velocity
void setVelocity(float x, float y, float z) { setVelocity(FloatPoint3D(x, y, z)); }
void setVelocity(const FloatPoint3D & velocity);
std::shared_ptr<AudioParam> velocityX() const { return m_velocityX; }
std::shared_ptr<AudioParam> velocityY() const { return m_velocityY; }
std::shared_ptr<AudioParam> velocityZ() const { return m_velocityZ; }
// Distance parameters
DistanceModel distanceModel();
void setDistanceModel(DistanceModel);
float refDistance();
void setRefDistance(float refDistance);
float maxDistance();
void setMaxDistance(float maxDistance);
float rolloffFactor();
void setRolloffFactor(float rolloffFactor);
// Sound cones - angles in degrees
float coneInnerAngle() const;
void setConeInnerAngle(float angle);
float coneOuterAngle() const;
void setConeOuterAngle(float angle);
float coneOuterGain() const;
void setConeOuterGain(float angle);
void getAzimuthElevation(ContextRenderLock & r, double * outAzimuth, double * outElevation);
float dopplerRate(ContextRenderLock & r);
// Accessors for dynamically calculated gain values.
std::shared_ptr<AudioParam> distanceGain() { return m_distanceGain; }
std::shared_ptr<AudioParam> coneGain() { return m_coneGain; }
virtual double tailTime(ContextRenderLock & r) const override;
virtual double latencyTime(ContextRenderLock & r) const override;
protected:
std::shared_ptr<HRTFDatabaseLoader> m_hrtfDatabaseLoader;
// Returns the combined distance and cone gain attenuation.
virtual float distanceConeGain(ContextRenderLock & r);
// Notifies any SampledAudioNodes connected to us either directly or indirectly about our existence.
// This is in order to handle the pitch change necessary for the doppler shift.
// @tofix - broken?
void notifyAudioSourcesConnectedToNode(ContextRenderLock & r, AudioNode *);
std::unique_ptr<Panner> m_panner;
std::unique_ptr<DistanceEffect> m_distanceEffect;
std::unique_ptr<ConeEffect> m_coneEffect;
float m_lastGain = -1.0f;
float m_sampleRate;
};
} // namespace lab
#endif // PannerNode_h

View File

@ -0,0 +1,45 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2020, The LabSound Authors. All rights reserved.
#include <chrono>
namespace lab
{
struct ProfileSample
{
ProfileSample() { zero(); }
bool finalized = false;
std::chrono::duration<float, std::micro> microseconds;
void zero() { microseconds = std::chrono::duration<float, std::micro>::zero(); finalized = true; }
};
struct ProfileScope
{
explicit ProfileScope(ProfileSample& s)
: s(&s)
{
_start = std::chrono::system_clock::now();
s.finalized = false;
}
~ProfileScope()
{
finalize();
}
void finalize()
{
if (!s->finalized)
{
//std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
s->microseconds = std::chrono::system_clock::now() - _start;
s->finalized = true;
}
}
ProfileSample* s = nullptr;
std::chrono::system_clock::time_point _start;
};
} // lab

View File

@ -0,0 +1,104 @@
// License: BSD 2 Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef SampledAudioNode_h
#define SampledAudioNode_h
#include "LabSound/core/AudioScheduledSourceNode.h"
#include <memory>
namespace lab
{
class AudioContext;
class AudioBus;
class AudioParam;
class ContextRenderLock;
class SampledAudioNode;
struct SRC_Resampler;
// SampledAudioNode is intended for in-memory sounds. It provides a high degree of scheduling
// flexibility (can playback in rhythmically exact ways).
class SampledAudioNode final : public AudioScheduledSourceNode
{
virtual void reset(ContextRenderLock& r) override {}
virtual double tailTime(ContextRenderLock& r) const override { return 0; }
virtual double latencyTime(ContextRenderLock& r) const override { return 0; }
virtual bool propagatesSilence(ContextRenderLock& r) const override { return false; }
struct Scheduled;
struct Internals;
Internals* _internals;
std::shared_ptr<AudioSetting> m_sourceBus;
std::shared_ptr<AudioBus> m_pendingSourceBus; // the most recently assigned bus
std::shared_ptr<AudioBus> m_retainedSourceBus; // the bus used in computation, eventually agrees with m_pendingSourceBus.
std::shared_ptr<AudioParam> m_playbackRate;
std::shared_ptr<AudioParam> m_detune;
std::shared_ptr<AudioParam> m_dopplerRate;
std::vector<std::shared_ptr<SRC_Resampler>> _resamplers;
// totalPitchRate() returns the instantaneous pitch rate (non-time preserving).
// It incorporates the base pitch rate, any sample-rate conversion factor from the buffer,
// and any doppler shift from an associated panner node.
float totalPitchRate(ContextRenderLock&);
bool renderSample(ContextRenderLock& r, Scheduled&, size_t destinationSampleOffset, size_t frameSize);
virtual void process(ContextRenderLock&, int framesToProcess) override;
public:
SampledAudioNode() = delete;
explicit SampledAudioNode(AudioContext&);
virtual ~SampledAudioNode();
static const char* static_name() { return "SampledAudio"; }
virtual const char* name() const override { return static_name(); }
// setting the bus is an asynchronous operation. getBus returns the most
// recent set request in order that the interface work in a predictable way.
// In the future, setBus and getBus could be deprecated in favor of another
// schedule method that takes a source bus as an argument.
void setBus(ContextRenderLock&, std::shared_ptr<AudioBus> sourceBus);
std::shared_ptr<AudioBus> getBus() const { return m_pendingSourceBus; }
// loopCount of -1 will loop forever
// all the schedule routines will call start(0) if necessary, so that a
// schedule is sufficient for this node to start producing a signal.
//
//
void schedule(float relative_when);
void schedule(float relative_when, int loopCount);
void schedule(float relative_when, float grainOffset, int loopCount);
void schedule(float relative_when, float grainOffset, float grainDuration, int loopCount);
// note: start is not virtual. start on the ScheduledAudioNode is relative,
// but start here is in absolute time.
void start(float abs_when);
void start(float abs_when, int loopCount);
void start(float abs_when, float grainOffset, int loopCount);
void start(float abs_when, float grainOffset, float grainDuration, int loopCount);
// this will clear anything playing or pending, without stopping the node itself.
void clearSchedules();
std::shared_ptr<AudioParam> playbackRate() { return m_playbackRate; }
std::shared_ptr<AudioParam> detune() { return m_detune; }
std::shared_ptr<AudioParam> dopplerRate() { return m_dopplerRate; }
// returns the greatest sample index played back by any of the scheduled
// instances in the most recent render quantum. A value less than zero
// indicates nothing's playing.
int32_t getCursor() const;
};
} // namespace lab
#endif // SampledAudioNode

View File

@ -0,0 +1,58 @@
// License: BSD 2 Clause
// Copyright (C) 2014, The Chromium Authors. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef StereoPannerNode_h
#define StereoPannerNode_h
#include "LabSound/core/AudioArray.h"
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/Macros.h"
#include <memory>
#include <string>
namespace lab
{
class Spatializer;
// StereoPannerNode is an AudioNode with one input and one output. It is
// specifically designed for equal-power stereo panning.
// irrespective of the number of input channels, the output channel count is always two.
class StereoPannerNode : public AudioNode
{
public:
StereoPannerNode() = delete;
explicit StereoPannerNode(AudioContext& ac);
virtual ~StereoPannerNode();
static const char* static_name() { return "StereoPanner"; }
virtual const char* name() const override { return static_name(); }
std::shared_ptr<AudioParam> pan() { return m_pan; }
private:
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
virtual void initialize() override;
virtual void uninitialize() override;
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
virtual void conformChannelCounts() override {};
std::shared_ptr<AudioParam> m_pan;
std::unique_ptr<Spatializer> m_stereoPanner;
std::unique_ptr<AudioFloatArray> m_sampleAccuratePanValues;
};
} // namespace lab
#endif // StereoPannerNode_h

View File

@ -0,0 +1,40 @@
// License: BSD 2 Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef WaveShaperNode_h
#define WaveShaperNode_h
#include "LabSound/core/AudioNode.h"
#include <vector>
namespace lab {
class WaveShaperNode : public AudioNode
{
public:
WaveShaperNode(AudioContext & ac);
virtual ~WaveShaperNode();
static const char* static_name() { return "WaveShaper"; }
virtual const char* name() const override { return static_name(); }
// copies the curve
void setCurve(std::vector<float> & curve);
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock&) override {}
protected:
void processBuffer(ContextRenderLock&, const float* source, float* destination, int framesToProcess);
virtual double tailTime(ContextRenderLock& r) const override { return 0.; }
virtual double latencyTime(ContextRenderLock& r) const override { return 0.; }
std::vector<float> m_curve;
std::vector<float>* m_newCurve = nullptr;
};
} // namespace lab
#endif

View File

@ -0,0 +1,67 @@
// License: BSD 3 Clause
// Copyright (C) 2012, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef WaveTable_h
#define WaveTable_h
#include "LabSound/core/AudioArray.h"
#include "LabSound/core/AudioNode.h"
#include <memory>
#include <vector>
namespace lab
{
class WaveTable
{
public:
WaveTable(const float sampleRate, OscillatorType basicWaveform);
WaveTable(const float sampleRate, OscillatorType basicWaveform, std::vector<float> & real, std::vector<float> & imag);
~WaveTable();
// Returns pointers to the lower and higher wavetable data for the pitch range containing
// the given fundamental frequency. These two tables are in adjacent "pitch" ranges
// where the higher table will have the maximum number of partials which won't alias when played back
// at this fundamental frequency. The lower wavetable is the next range containing fewer partials than the higher wavetable.
// Interpolation between these two tables can be made according to tableInterpolationFactor.
// Where values from 0 -> 1 interpolate between lower -> higher.
void waveDataForFundamentalFrequency(float, float *& lowerWaveData, float *& higherWaveData, float & tableInterpolationFactor);
// Returns the scalar multiplier to the oscillator frequency to calculate wave table phase increment.
float rateScale() const { return m_rateScale; }
unsigned periodicWaveSize() const;
private:
void generateBasicWaveform(OscillatorType);
int m_numberOfRanges;
float m_centsPerRange;
// The lowest frequency (in Hertz) where playback will include all of the partials.
// Playing back lower than this frequency will gradually lose more high-frequency information.
// This frequency is quite low (~10Hz @ 44.1KHz)
float m_lowestFundamentalFrequency;
float m_sampleRate;
float m_rateScale;
int numberOfRanges() const { return m_numberOfRanges; }
// Maximum possible number of partials (before culling).
int maxNumberOfPartials() const;
int numberOfPartialsForRange(int rangeIndex) const;
// Creates tables based on numberOfComponents Fourier coefficients.
void createBandLimitedTables(const float * real, const float * imag, int numberOfComponents);
std::vector<std::unique_ptr<AudioFloatArray>> m_bandLimitedTables;
};
} // namespace lab
#endif // WaveTable_h

View File

@ -0,0 +1,265 @@
// License: BSD 2 Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef lab_window_functions_h
#define lab_window_functions_h
#include <cmath>
#include <vector>
#include "LabSound/core/Macros.h"
namespace lab
{
enum class WindowFunction
{
rectangle, // aka the boxcar or Dirichlet window
cosine, // aka the sine window
hann, // generalized raised cosine, order 1, aka the raised cosine window
hamming, // generalized raised cosine, order 1 (modified hann)
blackman, // generalized raised cosine, order 2
nutall, // generalized raised cosine, order 3
blackman_harris, // generalized raised cosine, order 3
blackman_nutall, // generalized raised cosine, order 3
hann_poisson, // Hann window multiplied by a Poisson window
gaussian50, // gaussian with a sigma of 0.50
gaussian25, // gaussian with a sigma of 0.25
welch, //
bartlett, // aka the (symmetric) triangular window
bartlett_hann, //
parzen, // B-spline, order 4 (a triangle shape)
flat_top, // generalized raised cosine, order 4
lanczos // aka the sinc window
};
static constexpr char const * const s_window_types[] = {
"rectangle",
"cosine",
"hann",
"hamming",
"blackman",
"nutall",
"blackman_harris",
"blackman_nutall",
"hann_poisson",
"gaussian50",
"gaussian25",
"welch",
"bartlett",
"bartlett_hann",
"parzen",
"flat_top",
"lanczos",
nullptr
};
// Inspired by https://github.com/idiap/libssp/blob/master/ssp/window.cpp
// These are implementations of the generalized raised cosine window, up to order 4
// https://ccrma.stanford.edu/~jos/sasp/Hann_Hanning_Raised_Cosine.html
namespace detail
{
inline float sinc(const float x)
{
return (x == 0.0f) ? 1.0f : std::sin(x * static_cast<float>(LAB_PI)) / (x * static_cast<float>(LAB_PI));
}
inline float gen_cosine_1(const float max_index, const uint32_t idx, const float alpha, const float beta)
{
return alpha - beta * std::cos((static_cast<float>(LAB_TAU) * idx) / max_index);
}
inline float gen_cosine_2(const float max_index, const uint32_t idx, const float alpha, const float beta, const float gamma)
{
return gen_cosine_1(max_index, idx, alpha, beta) + gamma * std::cos((2 * static_cast<float>(LAB_TAU) * idx) / max_index);
}
inline float gen_cosine_3(const float max_index, const uint32_t idx, const float alpha, const float beta, const float gamma, const float delta)
{
return gen_cosine_2(max_index, idx, alpha, beta, gamma) - delta * std::cos((3 * static_cast<float>(LAB_TAU) * idx) / max_index);
}
inline float gen_cosine_4(const float max_index, const uint32_t idx, const float alpha, const float beta, const float gamma, const float delta, const float epsilon)
{
return gen_cosine_3(max_index, idx, alpha, beta, gamma, delta) + epsilon * std::cos((4 * static_cast<float>(LAB_TAU) * idx) / max_index);
}
inline float gaussian(const float max_index, const uint32_t idx, const float sigma)
{
const float fract = (idx - (max_index * 0.5f)) / (sigma * (max_index * 0.5f));
return std::exp(-0.5f * (fract * fract));
}
}
// Reference https://github.com/spurious/snd-mirror/blob/master/clm.c
inline void ApplyWindowFunctionInplace(const WindowFunction type, float * buffer, const int window_size)
{
const float max_index = static_cast<float>(window_size) - 1.f;
switch (type)
{
case WindowFunction::rectangle:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] = 1.f; // or * 0.5f?
}
}
break;
case WindowFunction::cosine:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= std::sin((static_cast<float>(LAB_PI) * i) / max_index);
}
}
break;
case WindowFunction::hann:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gen_cosine_1(max_index, i, 0.5f, 0.5f);
}
}
break;
case WindowFunction::hamming:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gen_cosine_1(max_index, i, 0.54f, 0.46f);
}
}
break;
case WindowFunction::blackman:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gen_cosine_2(max_index, i, 0.42f, 0.50f, 0.08f);
}
}
break;
case WindowFunction::nutall:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gen_cosine_3(max_index, i, 0.355768f, 0.487396f, 0.144232f, 0.012604f);
}
}
break;
case WindowFunction::blackman_harris:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gen_cosine_3(max_index, i, 0.35875f, 0.48829f, 0.14128f, 0.01168f);
}
}
break;
case WindowFunction::blackman_nutall:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gen_cosine_3(max_index, i, 0.3635819f, 0.4891775f, 0.1365995f, 0.0106411f);
}
}
break;
case WindowFunction::hann_poisson:
{
for (int i = 0; i < window_size; ++i)
{
const float alpha = 2.f;
const float a = 1.f - std::cos((static_cast<float>(LAB_TAU) * i) / max_index);
const float b = (-alpha * std::abs(max_index - 2.f * i)) / max_index;
buffer[i] *= 0.5f * a * exp(b);
}
}
break;
case WindowFunction::gaussian50:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gaussian(max_index, i, 0.50f);
}
}
break;
case WindowFunction::gaussian25:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gaussian(max_index, i, 0.25f);
}
}
break;
case WindowFunction::welch:
{
for (int i = 0; i < window_size; ++i)
{
const float num = i - (max_index * 0.5f);
const float denom = (window_size + 1.f) * 0.5f;
const float fract = num / denom;
buffer[i] *= 1.f - fract * fract;
}
}
break;
case WindowFunction::bartlett:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= 2.f / (window_size - 1.f) * (max_index / 2.f - std::abs(i - max_index / 2.f));
}
}
break;
case WindowFunction::bartlett_hann:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gen_cosine_2(max_index, i, 0.63f, 0.48f, 0.38f);
}
}
break;
case WindowFunction::parzen:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= 1.f - abs((2.f * i - window_size) / (window_size + 1.f));
}
}
break;
case WindowFunction::flat_top:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::gen_cosine_4(max_index, i, 1.f, 1.93f, 1.29f, 0.388f, 0.028f);
}
}
break;
case WindowFunction::lanczos:
{
for (int i = 0; i < window_size; ++i)
{
buffer[i] *= detail::sinc(2.f * i / max_index - 1.f);
}
}
break;
}
}
} // namespace lab
#endif // end lab_window_functions_h

View File

@ -0,0 +1,50 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015, The LabSound Authors. All rights reserved.
#ifndef ADSR_NODE_H
#define ADSR_NODE_H
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioContext.h"
#include "LabSound/core/AudioSetting.h"
namespace lab
{
class ADSRNode : public AudioNode
{
class ADSRNodeImpl;
ADSRNodeImpl * adsr_impl;
public:
ADSRNode(AudioContext &);
virtual ~ADSRNode();
static const char* static_name() { return "ADSR"; }
virtual const char* name() const override { return static_name(); }
// This function will return true after the release period (only if a noteOff has been issued).
bool finished(ContextRenderLock &);
void set(float attack_time, float attack_level, float decay_time, float sustain_time, float sustain_level, float release_time);
virtual void process(ContextRenderLock& r, int bufferSize) override;
virtual void reset(ContextRenderLock&) override;
virtual double tailTime(ContextRenderLock& r) const override { return 0.; }
virtual double latencyTime(ContextRenderLock& r) const override { return 0.f; }
std::shared_ptr<AudioParam> gate() const; // gate signal
std::shared_ptr<AudioSetting> oneShot() const; // If zero, gate controls attack and sustain, else sustainTime controls sustain
std::shared_ptr<AudioSetting> attackTime() const; // Duration in seconds
std::shared_ptr<AudioSetting> attackLevel() const; // Level
std::shared_ptr<AudioSetting> decayTime() const; // Duration in seconds
std::shared_ptr<AudioSetting> sustainTime() const; // Duration in seconds
std::shared_ptr<AudioSetting> sustainLevel() const; // Level
std::shared_ptr<AudioSetting> releaseTime() const; // Duration in seconds
};
}
#endif

View File

@ -0,0 +1,94 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
//#define DEBUG_LOCKS
#ifndef AUDIO_CONTEXT_LOCK_H
#define AUDIO_CONTEXT_LOCK_H
#include "LabSound/core/AudioContext.h"
#include "LabSound/extended/Logging.h"
#include <iostream>
#include <mutex>
//#define DEBUG_LOCKS
namespace lab
{
class ContextGraphLock
{
AudioContext * m_context = nullptr;
public:
ContextGraphLock(AudioContext * context, const std::string & lockSuitor)
{
#if defined(DEBUG_LOCKS)
bool reentrant = context->m_graphLocker.size() > 1 && context->m_graphLocker.back() != '~';
if (reentrant)
{
LOG_ERROR("%s cannot acquire an AudioContext ContextGraphLock. Currently held by: %s.", lockSuitor.c_str(), context->m_graphLocker.c_str());
}
#endif
if (context)
{
context->m_graphLock.lock();
m_context = context;
m_context->m_graphLocker = lockSuitor;
}
}
~ContextGraphLock()
{
if (m_context)
{
m_context->m_graphLocker += '~';
m_context->m_graphLock.unlock();
}
}
AudioContext * context() { return m_context; }
};
class ContextRenderLock
{
AudioContext * m_context = nullptr;
public:
ContextRenderLock(AudioContext * context, const std::string & lockSuitor)
{
#if defined(DEBUG_LOCKS)
bool reentrant = context->m_graphLocker.size() > 1 && context->m_graphLocker.back() != '~';
if (reentrant)
{
LOG_ERROR("%s cannot acquire an AudioContext ContextGraphLock. Currently held by: %s.", lockSuitor.c_str(), context->m_graphLocker.c_str());
}
#endif
if (context)
{
context->m_renderLock.lock();
m_context = context;
m_context->m_renderLocker = lockSuitor;
}
}
~ContextRenderLock()
{
if (m_context)
{
m_context->m_renderLocker += '~';
m_context->m_renderLock.unlock();
}
}
AudioContext * context() { return m_context; }
};
} // end namespace lab
#endif

View File

@ -0,0 +1,36 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef AudioFileReader_H
#define AudioFileReader_H
#include "LabSound/core/AudioBus.h"
#include "LabSound/extended/AudioContextLock.h"
#include <memory>
#include <stdint.h>
#include <string>
#include <vector>
namespace lab
{
// Performs filesystem i/o to decode a file (using libnyquist)
std::shared_ptr<AudioBus> MakeBusFromFile(const char * filePath, bool mixToMono);
std::shared_ptr<AudioBus> MakeBusFromFile(const std::string & path, bool mixToMono);
// Performs filesystem i/o to decode a file (using libnyquist)
// Resamples the data to the specified rate. A typical application is to resample an audio
// file to the device rate; perhaps from 44.1 to 48 khz. Resampling at load time saves
// run time overhead.
std::shared_ptr<AudioBus> MakeBusFromFile(const char * filePath, bool mixToMono, float targetSampleRate);
std::shared_ptr<AudioBus> MakeBusFromFile(const std::string & path, bool mixToMono, float targetSampleRate);
// Loads and decodes a raw binary memory chunk making use of magic numbers to determine filetype.
std::shared_ptr<AudioBus> MakeBusFromMemory(const std::vector<uint8_t> & buffer, bool mixToMono);
// Loads and decodes a raw binary memory chunk where the file extension (mp3, wav, ogg, etc) is aleady known.
std::shared_ptr<AudioBus> MakeBusFromMemory(const std::vector<uint8_t> & buffer, const std::string & extension, bool mixToMono);
}
#endif

View File

@ -0,0 +1,42 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef BPM_DELAY_NODE_H
#define BPM_DELAY_NODE_H
#include "LabSound/core/AudioBasicProcessorNode.h"
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioSetting.h"
#include "LabSound/core/DelayNode.h"
namespace lab
{
class BPMDelay : public DelayNode
{
float tempo;
int noteDivision;
std::vector<float> times;
void recomputeDelay()
{
float dT = float(60.0f * noteDivision) / tempo;
delayTime()->setFloat(dT);
}
public:
BPMDelay(AudioContext & ac, float tempo);
virtual ~BPMDelay();
void SetTempo(float newTempo)
{
tempo = newTempo;
recomputeDelay();
}
void SetDelayIndex(TempoSync value);
};
}
#endif

View File

@ -0,0 +1,46 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef CLIP_NODE_H
#define CLIP_NODE_H
#include "LabSound/core/AudioBasicProcessorNode.h"
#include "LabSound/core/AudioParam.h"
namespace lab
{
// ClipNode clips a signal, using either thresholding or tanh
//
// params: a, b
// settings: mode
//
class ClipNode : public AudioBasicProcessorNode
{
class ClipNodeInternal;
ClipNodeInternal * internalNode;
public:
enum Mode
{
CLIP = 0,
TANH = 1,
_Count = 2
};
ClipNode(AudioContext & ac);
virtual ~ClipNode();
static const char* static_name() { return "Clip"; }
virtual const char* name() const override { return static_name(); }
void setMode(Mode m);
// in CLIP mode, a is the min value, and b is the max value.
// in TANH mode, a is the overall gain, and b is the input gain.
// The higher the input gain the more severe the distortion.
std::shared_ptr<AudioParam> aVal();
std::shared_ptr<AudioParam> bVal();
};
}
#endif

View File

@ -0,0 +1,46 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef DIODE_NODE_H
#define DIODE_NODE_H
#include "LabSound/core/WaveShaperNode.h"
namespace lab
{
// The diode node, used in conjunction with a gain node, will modulate
// a source so that it sounds like audio being over-driven through a vacuum tube diode.
//
// source -+-> diode ----> gain_level
// | |
// +-------------> gain ---------------> outpuot
//
// params:
// settings: distortion, vb, vl
//
class DiodeNode : public WaveShaperNode
{
void _precalc();
std::shared_ptr<WaveShaperNode> waveShaper;
std::shared_ptr<AudioSetting> _distortion;
std::shared_ptr<AudioSetting> _vb; // curve shape control
std::shared_ptr<AudioSetting> _vl; // curve shape control
public:
DiodeNode(AudioContext &);
virtual ~DiodeNode() = default;
static const char* static_name() { return "Diode"; }
virtual const char* name() const override { return static_name(); }
void setDistortion(float distortion = 1.0);
std::shared_ptr<AudioSetting> distortion() const { return _distortion; };
std::shared_ptr<AudioSetting> vb() const { return _vb; };
std::shared_ptr<AudioSetting> vl() const { return _vl; }
};
}
#endif

View File

@ -0,0 +1,46 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef FUNCTION_NODE_H
#define FUNCTION_NODE_H
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/AudioScheduledSourceNode.h"
namespace lab
{
class FunctionNode : public AudioScheduledSourceNode
{
public:
FunctionNode(AudioContext & ac, int channels = 1);
virtual ~FunctionNode();
static const char* static_name() { return "Function"; }
virtual const char* name() const override { return static_name(); }
void setFunction(std::function<void(ContextRenderLock & r, FunctionNode * me, int channel, float * buffer, int bufferSize)> fn)
{
_function = fn;
}
virtual void process(ContextRenderLock & r, int bufferSize) override;
virtual void reset(ContextRenderLock & r) override;
double now() const { return _now; }
private:
virtual bool propagatesSilence(ContextRenderLock & r) const override;
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
std::function<void(ContextRenderLock & r, FunctionNode * me, int channel, float * values, int bufferSize)> _function;
double _now = 0.0;
};
} // end namespace lab
#endif

View File

@ -0,0 +1,124 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2020, The LabSound Authors. All rights reserved.
#ifndef labsound_granulation_node_h
#define labsound_granulation_node_h
#include "LabSound/core/AudioContext.h"
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/AudioScheduledSourceNode.h"
#include "LabSound/core/AudioSetting.h"
#include <algorithm>
#include <cmath>
namespace lab
{
class GranulationNode : public AudioScheduledSourceNode
{
struct grain
{
std::shared_ptr<AudioBus> sample;
std::shared_ptr<AudioBus> window;
bool in_use {true};
// Values in samples
uint64_t grain_start {0};
uint64_t grain_duration {0};
uint64_t grain_end {0};
uint64_t envelope_index {0};
double playback_frequency;
double sample_increment;
double sample_accurate_time; // because of double precision, it's actually subsample-accurate
// position between 0-1, duration in seconds
grain(std::shared_ptr<AudioBus> _sample, std::shared_ptr<AudioBus> _window, const float sample_rate,
const double _position_offset, const double _duration, const double _speed) : sample(_sample), window(_window)
{
grain_start = static_cast<uint64_t>(sample->length() * _position_offset);
grain_duration = static_cast<uint64_t>(_duration * sample_rate);
grain_end = std::min(static_cast<uint64_t>(sample->length()), grain_start + grain_duration);
playback_frequency = (1.0 / _duration) * _speed;
sample_accurate_time = static_cast<double>((playback_frequency > 0) ? grain_start : grain_end);
sample_increment = (playback_frequency != 0.0) ? grain_duration / (sample_rate / playback_frequency) : 0.0;
}
void tick(float * out_buffer, const int num_frames)
{
const float * windowSamples = window->channel(0)->data();
for (int i = 0; i < num_frames; ++i)
{
double result = 0.f;
if (in_use)
{
sample_accurate_time += sample_increment;
// Looping behavior
if (sample_accurate_time >= grain_end)
{
sample_accurate_time = static_cast<double>(grain_start);
}
const uint64_t approximate_sample_index = static_cast<uint64_t>(std::floor(sample_accurate_time));
const double remainder = sample_accurate_time - approximate_sample_index;
uint64_t left = approximate_sample_index;
uint64_t right = approximate_sample_index + 1;
if (right >= sample->length()) right = 0;
// interpolate sample positions (primarily for speed alterations, can be more sophisticated with this later)
result = (double) ((1.0 - remainder) * sample->channel(0)->data()[left] + remainder * sample->channel(0)->data()[right]);
}
envelope_index++;
if (envelope_index == grain_duration)
{
//in_use = false;
envelope_index = 0;
}
// Apply envelope
out_buffer[i] += static_cast<float>(result * windowSamples[envelope_index]);
}
}
};
virtual bool propagatesSilence(ContextRenderLock & r) const override;
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
bool RenderGranulation(ContextRenderLock &, AudioBus *, int destinationFrameOffset, int numberOfFrames);
std::vector<grain> grain_pool;
std::shared_ptr<lab::AudioBus> window_bus;
public:
GranulationNode(AudioContext & ac);
virtual ~GranulationNode();
static const char* static_name() { return "Granulation"; }
virtual const char* name() const override { return static_name(); }
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
bool setGrainSource(ContextRenderLock &, std::shared_ptr<AudioBus> sourceBus);
std::shared_ptr<AudioBus> getGrainSource() const { return grainSourceBus->valueBus(); }
std::shared_ptr<AudioSetting> grainSourceBus;
std::shared_ptr<AudioSetting> windowFunc;
std::shared_ptr<AudioParam> numGrains;
std::shared_ptr<AudioParam> grainDuration;
std::shared_ptr<AudioParam> grainPositionMin;
std::shared_ptr<AudioParam> grainPositionMax;
std::shared_ptr<AudioParam> grainPlaybackFreq;
};
} // end namespace lab
#endif // end labsound_granulation_node_h

View File

@ -0,0 +1,58 @@
// SPDX-License-Identifier: MIT
/**
* Copyright (c) 2017 rxi
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the MIT license. See `log.c` for details.
*/
// retrieved from https://github.com/rxi/log.c
#ifndef LABSOUND_LOGGING_H
#define LABSOUND_LOGGING_H
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
#define LOG_VERSION "0.1.0"
typedef void (*log_LockFn)(void *udata, int lock);
enum { LOGLEVEL_TRACE = 0, LOGLEVEL_DEBUG, LOGLEVEL_INFO,
LOGLEVEL_WARN, LOGLEVEL_ERROR, LOGLEVEL_FATAL };
#define LOG_TRACE(...) LabSoundLog(LOGLEVEL_TRACE, __FILE__, __LINE__, __VA_ARGS__)
#define LOG_DEBUG(...) LabSoundLog(LOGLEVEL_DEBUG, __FILE__, __LINE__, __VA_ARGS__)
#define LOG_INFO(...) LabSoundLog(LOGLEVEL_INFO, __FILE__, __LINE__, __VA_ARGS__)
#define LOG_WARN(...) LabSoundLog(LOGLEVEL_WARN, __FILE__, __LINE__, __VA_ARGS__)
#define LOG_ERROR(...) LabSoundLog(LOGLEVEL_ERROR, __FILE__, __LINE__, __VA_ARGS__)
#define LOG_FATAL(...) LabSoundLog(LOGLEVEL_FATAL, __FILE__, __LINE__, __VA_ARGS__)
void log_set_udata(void *udata);
void log_set_lock(log_LockFn fn);
void log_set_fp(FILE *fp);
void log_set_level(int level);
void log_set_quiet(int enable);
void LabSoundLog(int level, const char *file, int line, const char *fmt, ...);
void LabSoundAssertLog(const char * file, int line, const char * function, const char * assertion);
#if 0
#if defined(_DEBUG) || defined(DEBUG) || defined(LABSOUND_ENABLE_LOGGING)
#define LOG(...) LabSoundLog(__FILE__, __LINE__, __VA_ARGS__);
#define LOG_ERROR(...) LabSoundLog(__FILE__, __LINE__, __VA_ARGS__)
#define LOG_VERBOSE(channel, ...) LabSoundLog(__FILE__, __LINE__, __VA_ARGS__)
#else
#define LOG(...)
#define LOG_ERROR(channel, ...)
#define LOG_VERBOSE(channel, ...)
#endif
void LabSoundLog(const char * file, int line, const char * fmt, ...);
void LabSoundAssertLog(const char * file, int line, const char * function, const char * assertion);
#endif
#endif

View File

@ -0,0 +1,62 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef NOISE_NODE_H
#define NOISE_NODE_H
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/AudioScheduledSourceNode.h"
namespace lab
{
class AudioSetting;
class NoiseNode : public AudioScheduledSourceNode
{
public:
enum NoiseType
{
WHITE = 0,
PINK = 1,
BROWN = 2,
_Count = 3
};
NoiseNode(AudioContext & ac);
virtual ~NoiseNode();
static const char* static_name() { return "Noise"; }
virtual const char* name() const override { return static_name(); }
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
NoiseType type() const;
void setType(NoiseType newType);
private:
virtual bool propagatesSilence(ContextRenderLock & r) const override;
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
std::shared_ptr<AudioSetting> _type;
uint32_t whiteSeed = 1489853723;
float lastBrown = 0;
float pink0 = 0;
float pink1 = 0;
float pink2 = 0;
float pink3 = 0;
float pink4 = 0;
float pink5 = 0;
float pink6 = 0;
};
}
#endif

View File

@ -0,0 +1,32 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef PWM_NODE_H
#define PWM_NODE_H
#include "LabSound/core/AudioBasicProcessorNode.h"
#include "LabSound/core/AudioParam.h"
namespace lab
{
// PWMNode implements a comparison based PWM. That could be improved.
// Expects two inputs.
// input 0 is the carrier, and input 1 is the modulator.
// If there is no modulator, then the node is a pass-through.
class PWMNode : public AudioBasicProcessorNode
{
class PWMNodeInternal;
PWMNodeInternal * internalNode;
public:
PWMNode(AudioContext & ac);
virtual ~PWMNode();
static const char* static_name() { return "PWM"; }
virtual const char* name() const override { return static_name(); }
};
}
#endif

View File

@ -0,0 +1,44 @@
// Copyright (c) 2003-2013 Nick Porcino, All rights reserved.
// License is MIT: http://opensource.org/licenses/MIT
// PureDataNode wraps an instance of pure-data as a signal processing node
#pragma once
#ifdef PD
#include "LabSound/core/AudioBasicProcessorNode.h"
namespace pd
{
class PdBase;
class PdMidiReceiver;
class PdReceiver;
}
namespace lab
{
class PureDataNode : public AudioBasicProcessorNode
{
public:
PureDataNode(AudioContext *, float sampleRate);
virtual ~PureDataNode();
static const char* static_name() { return "PureData"; }
virtual const char* name() const override { return static_name(); }
pd::PdBase & pd() const;
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
private:
class PureDataNodeInternal;
PureDataNodeInternal * data;
};
} // end namespace lab
#endif

View File

@ -0,0 +1,57 @@
// SPDX-License-Identifier: MIT
// Copyright (C) 2015, The LabSound Authors. All rights reserved.
#ifndef PEAK_COMP_NODE_H
#define PEAK_COMP_NODE_H
// PeakComp ported to LabSound from https://github.com/twobigears/tb_peakcomp
// Copyright (C) 2013, Two Big Ears Ltd (http://twobigears.com)
/*
Stereo L+R peak compressor with variable knee-smooth, attack, release and makeup gain.
Varun Nair. varun@twobigears.com
Inspired by:
http://www.eecs.qmul.ac.uk/~josh/documents/GiannoulisMassbergReiss-dynamicrangecompression-JAES2012.pdf
https://ccrma.stanford.edu/~jos/filters/Nonlinear_Filter_Example_Dynamic.htm
*/
#include "LabSound/core/AudioBasicProcessorNode.h"
#include "LabSound/core/AudioParam.h"
namespace lab
{
class PeakCompNode : public lab::AudioBasicProcessorNode
{
class PeakCompNodeInternal;
PeakCompNodeInternal * internalNode = nullptr; // We do not own this!
public:
PeakCompNode(AudioContext & ac);
virtual ~PeakCompNode();
static const char* static_name() { return static_name(); }
virtual const char* name() const { return "PeakComp"; }
// Threshold given in dB, default 0
std::shared_ptr<AudioParam> threshold() const;
// Ratio, default 1:1
std::shared_ptr<AudioParam> ratio() const;
// Attack in ms, default .0001f
std::shared_ptr<AudioParam> attack() const;
// Release in ms, default .0001f
std::shared_ptr<AudioParam> release() const;
// Makeup gain in dB, default 0
std::shared_ptr<AudioParam> makeup() const;
// Knee smoothing (0 = hard, 1 = smooth), default 0
std::shared_ptr<AudioParam> knee() const;
};
}
#endif

View File

@ -0,0 +1,56 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef PING_PONG_DELAY_NODE_H
#define PING_PONG_DELAY_NODE_H
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/ChannelMergerNode.h"
#include "LabSound/core/ChannelSplitterNode.h"
#include "LabSound/core/GainNode.h"
#include "LabSound/extended/BPMDelayNode.h"
namespace lab
{
class ContextGraphLock;
class AudioContext;
class Subgraph
{
public:
std::shared_ptr<GainNode> output;
std::shared_ptr<GainNode> input;
virtual void BuildSubgraph(AudioContext & ac) = 0;
virtual ~Subgraph() {}
};
class PingPongDelayNode : public Subgraph
{
float tempo;
std::shared_ptr<BPMDelay> leftDelay;
std::shared_ptr<BPMDelay> rightDelay;
std::shared_ptr<GainNode> splitterGain;
std::shared_ptr<GainNode> wetGain;
std::shared_ptr<GainNode> feedbackGain;
std::shared_ptr<ChannelMergerNode> merger;
std::shared_ptr<ChannelSplitterNode> splitter;
public:
PingPongDelayNode(AudioContext &, float tempo);
void SetTempo(float t);
void SetFeedback(float f);
void SetLevel(float f);
void SetDelayIndex(TempoSync value);
virtual void BuildSubgraph(AudioContext & ac) override;
};
}
#endif

View File

@ -0,0 +1,88 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2020+, The LabSound Authors. All rights reserved.
#ifndef lab_poly_blep_node_h
#define lab_poly_blep_node_h
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/AudioScheduledSourceNode.h"
#include "LabSound/core/Macros.h"
#include "LabSound/core/WaveTable.h"
namespace lab
{
class AudioBus;
class AudioContext;
class AudioSetting;
/*
* Anti-aliased (sometimes called virtual analog) digital oscillators that approximate the behavior of
* their analog counterparters is an active area of DSP research. Some seminal techniques are outlined
* below with several references that inform this implementation.
* \ref Band-limited Impulse Trains (BLIT) (Stilson & Smith 1996)
* \ref Band-limited Step Functions (BLEP) (Brandt 2001, Leary & Bright 2009)
* \ref MiniBLEP (minimum phase lowpass filtered prior to BLEP integration) (Brandt 2001)
* \ref Differentiated Parabolic Waveform Oscillators (DPW) (Valimaki 2010)
* \ref Polynomial Band-limited Step Functions (PolyBLEP) (Valimaki et. al 2010)
* \ref Improved Polynomial Transition Regions Algorithm for Alias-Suppressed Signal Synthesis(EPTR) (Ambrits and Bank, 2013)
* \ref Rounding Corners with BLAMP (PolyBLAMP) - (Esqueda, Valimaki, et. al 2016)
* [1] http://www.martin-finke.de/blog/articles/audio-plugins-018-polyblep-oscillator/
* [2] http://metafunction.co.uk/all-about-digital-oscillators-part-2-blits-bleps/
* [3] https://www.experimentalscene.com/articles/minbleps.php
*/
enum class PolyBLEPType
{
TRIANGLE,
SQUARE,
RECTANGLE,
SAWTOOTH,
RAMP,
MODIFIED_TRIANGLE,
MODIFIED_SQUARE,
HALF_WAVE_RECTIFIED_SINE,
FULL_WAVE_RECTIFIED_SINE,
TRIANGULAR_PULSE,
TRAPEZOID_FIXED,
TRAPEZOID_VARIABLE,
_PolyBLEPCount
};
class PolyBlepImpl;
class PolyBLEPNode : public AudioScheduledSourceNode
{
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
virtual bool propagatesSilence(ContextRenderLock & r) const override;
std::shared_ptr<AudioSetting> m_type;
std::unique_ptr<PolyBlepImpl> polyblep;
public:
PolyBLEPNode(AudioContext & ac);
virtual ~PolyBLEPNode();
static const char* static_name() { return "PolyBLEP"; }
virtual const char* name() const override { return static_name(); }
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override { }
PolyBLEPType type() const;
void setType(PolyBLEPType type);
std::shared_ptr<AudioParam> amplitude() { return m_amplitude; }
std::shared_ptr<AudioParam> frequency() { return m_frequency; }
std::shared_ptr<AudioParam> m_amplitude; // default 1.0
std::shared_ptr<AudioParam> m_frequency; // hz
void processPolyBLEP(ContextRenderLock & r, int bufferSize, int offset, int count);
AudioFloatArray m_amplitudeValues;
};
} // namespace lab
#endif // lab_poly_blep_node_h

View File

@ -0,0 +1,59 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2013, The LabSound Authors. All rights reserved.
#pragma once
#ifndef POWER_MONITOR_NODE_H
#define POWER_MONITOR_NODE_H
#include "LabSound/core/AudioBasicInspectorNode.h"
#include <mutex>
#include <vector>
namespace lab
{
class AudioSetting;
// params:
// settings: windowSize
//
class PowerMonitorNode : public AudioBasicInspectorNode
{
public:
PowerMonitorNode(AudioContext & ac);
virtual ~PowerMonitorNode();
static const char* static_name() { return "PowerMonitor"; }
virtual const char* name() const override { return static_name(); }
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
// instantaneous estimation of power
float db() const { return _db; }
// Could be better. Power is computed on the most recent frame. If the framesize is greater
// than the windowSize, then power is returned for the windowed end-of-frame. If framesize
// is less than windowSize, power is computed only on framesize. This could be a problem
// if the framesize is very large compared to the sample rate, for example, a 4k framesize
// on a 44khz samplerate is going to give you usable power measurements 11 times a second.
// If better resolution is required, it's probably better to use a RecorderNode, and perform
// analysis on the full data stream pulled from it.
//
// The intent of the power monitor node is to provide levels that can be used for a VU meter
// or a ducking algorithm.
//
void windowSize(int ws);
int windowSize() const;
private:
virtual double tailTime(ContextRenderLock & r) const override { return 0; } // required for BasicInspector
virtual double latencyTime(ContextRenderLock & r) const override { return 0; } // required for BasicInspector
float _db;
std::shared_ptr<AudioSetting> _windowSize;
};
}
#endif

View File

@ -0,0 +1,86 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef REALTIME_ANALYSER_H
#define REALTIME_ANALYSER_H
#include "LabSound/core/AudioArray.h"
#include "LabSound/extended/AudioContextLock.h"
#include <vector>
namespace lab
{
class AudioBus;
class FFTFrame;
class RealtimeAnalyser
{
RealtimeAnalyser(const RealtimeAnalyser &); // noncopyable
public:
RealtimeAnalyser(int fftSize);
virtual ~RealtimeAnalyser();
void reset();
int fftSize() const { return m_fftSize; }
void setFftSize(int fftSize);
uint32_t frequencyBinCount() const { return m_fftSize / 2; }
void setMinDecibels(double k) { m_minDecibels = k; }
double minDecibels() const { return m_minDecibels; }
void setMaxDecibels(double k) { m_maxDecibels = k; }
double maxDecibels() const { return m_maxDecibels; }
void setSmoothingTimeConstant(double k) { m_smoothingTimeConstant = k; }
double smoothingTimeConstant() const { return m_smoothingTimeConstant; }
void getFloatFrequencyData(std::vector<float> &);
void getFloatTimeDomainData(std::vector<float> &);
// if the destination array size differs from the fft size, the
// data will be interpolated if resample is true. Otherwise, the
// destination array will be resized t fit.
void getByteFrequencyData(std::vector<uint8_t> &, bool resample);
void getByteTimeDomainData(std::vector<uint8_t> &);
void writeInput(ContextRenderLock & r, AudioBus *, int bufferSize);
static const double DefaultSmoothingTimeConstant;
static const double DefaultMinDecibels;
static const double DefaultMaxDecibels;
static const int DefaultFFTSize;
static const int MinFFTSize;
static const int MaxFFTSize;
static const int InputBufferSize;
private:
// The audio thread writes the input audio here.
AudioFloatArray m_inputBuffer;
int m_writeIndex;
int m_fftSize;
std::unique_ptr<FFTFrame> m_analysisFrame;
void doFFTAnalysis();
// doFFTAnalysis() stores the floating-point magnitude analysis data here.
AudioFloatArray m_magnitudeBuffer;
AudioFloatArray & magnitudeBuffer() { return m_magnitudeBuffer; }
// A value between 0 and 1 which averages the previous version of m_magnitudeBuffer with the current analysis magnitude data.
double m_smoothingTimeConstant;
// The range used when converting when using getByteFrequencyData().
double m_minDecibels;
double m_maxDecibels;
};
} // namespace lab
#endif // RealtimeAnalyser_h

View File

@ -0,0 +1,58 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015, The LabSound Authors. All rights reserved.
#ifndef RECORDER_NODE_H
#define RECORDER_NODE_H
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioContext.h"
#include <mutex>
#include <vector>
namespace lab
{
class RecorderNode : public AudioNode
{
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
bool m_recording{false};
std::vector<std::vector<float>> m_data; // non-interleaved
mutable std::recursive_mutex m_mutex;
float m_sampleRate;
public:
// create a recorder
RecorderNode(AudioContext & r, int channelCount = 2);
// create a recorder with a specific configuration
RecorderNode(AudioContext & r, const AudioStreamConfig & outConfig);
virtual ~RecorderNode();
static const char* static_name() { return "Recorder"; }
virtual const char* name() const override { return static_name(); }
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
void startRecording() { m_recording = true; }
void stopRecording() { m_recording = false; }
float recordedLengthInSeconds() const;
// create a bus from the recording; it can be used by other nodes
// such as a SampledAudioNode.
std::unique_ptr<AudioBus> createBusFromRecording(bool mixToMono);
// returns true for success
bool writeRecordingToWav(const std::string & filenameWithWavExtension, bool mixToMono);
};
} // end namespace lab
#endif

View File

@ -0,0 +1,28 @@
#pragma once
#include "LabSound/core/AudioNode.h"
#include <string>
#include <vector>
namespace lab {
typedef AudioNode* (*CreateNodeFn)(AudioContext&);
typedef void (*DeleteNodeFn)(AudioNode*);
class NodeRegistry
{
struct Detail;
Detail* _detail;
NodeRegistry();
~NodeRegistry();
public:
static NodeRegistry& Instance();
bool Register(char const*const name, CreateNodeFn, DeleteNodeFn);
std::vector<std::string> Names() const;
lab::AudioNode* Create(const std::string& n, lab::AudioContext& ac);
};
} // lab
void LabSoundRegistryInit();

View File

@ -0,0 +1,133 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef SFXR_NODE_H
#define SFXR_NODE_H
#include "LabSound/core/AudioScheduledSourceNode.h"
#include "LabSound/core/Macros.h"
namespace lab
{
class SfxrNode : public AudioScheduledSourceNode
{
public:
SfxrNode(AudioContext & ac);
virtual ~SfxrNode();
static const char* static_name() { return "SFXR"; }
virtual const char* name() const override { return static_name(); }
// AudioNode
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
std::shared_ptr<AudioSetting> preset() const { return _preset; }
// SfxrNode - values in sfxr units
std::shared_ptr<AudioSetting> waveType() const { return _waveType; }
std::shared_ptr<AudioParam> attackTime() const { return _attack; }
std::shared_ptr<AudioParam> sustainTime() const { return _sustainTime; }
std::shared_ptr<AudioParam> sustainPunch() const { return _sustainPunch; }
std::shared_ptr<AudioParam> decayTime() const { return _decayTime; }
std::shared_ptr<AudioParam> startFrequency() const { return _startFrequency; }
std::shared_ptr<AudioParam> minFrequency() const { return _minFrequency; }
std::shared_ptr<AudioParam> slide() const { return _slide; }
std::shared_ptr<AudioParam> deltaSlide() const { return _deltaSlide; }
std::shared_ptr<AudioParam> vibratoDepth() const { return _vibratoDepth; }
std::shared_ptr<AudioParam> vibratoSpeed() const { return _vibratoSpeed; }
std::shared_ptr<AudioParam> changeAmount() const { return _changeAmount; }
std::shared_ptr<AudioParam> changeSpeed() const { return _changeSpeed; }
std::shared_ptr<AudioParam> squareDuty() const { return _squareDuty; }
std::shared_ptr<AudioParam> dutySweep() const { return _dutySweep; }
std::shared_ptr<AudioParam> repeatSpeed() const { return _repeatSpeed; }
std::shared_ptr<AudioParam> phaserOffset() const { return _phaserOffset; }
std::shared_ptr<AudioParam> phaserSweep() const { return _phaserSweep; }
std::shared_ptr<AudioParam> lpFilterCutoff() const { return _lpFilterCutoff; }
std::shared_ptr<AudioParam> lpFilterCutoffSweep() const { return _lpFilterCutoffSweep; }
std::shared_ptr<AudioParam> lpFiterResonance() const { return _lpFiterResonance; }
std::shared_ptr<AudioParam> hpFilterCutoff() const { return _hpFilterCutoff; }
std::shared_ptr<AudioParam> hpFilterCutoffSweep() const { return _hpFilterCutoffSweep; }
// utility functions that set the params with Hz values
void setStartFrequencyInHz(float);
void setVibratoSpeedInHz(float);
// sfxr uses a lot of weird parameters. These are utility functions to help with that.
float envelopeTimeInSeconds(float sfxrEnvTime);
float envelopeTimeInSfxrUnits(float t);
float frequencyInSfxrUnits(float hz);
float frequencyInHz(float sfxr);
float vibratoInSfxrUnits(float hz);
float vibratoInHz(float sfxr);
float filterFreqInHz(float sfxr);
float filterFreqInSfxrUnits(float hz);
enum WaveType
{
SQUARE = 0,
SAWTOOTH,
SINE,
NOISE
};
// some presets
void setDefaultBeep();
void coin();
void laser();
void explosion();
void powerUp();
void hit();
void jump();
void select();
// mutate the current sound
void mutate();
void randomize();
private:
virtual bool propagatesSilence(ContextRenderLock & r) const override;
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
std::shared_ptr<AudioSetting> _preset;
std::shared_ptr<AudioSetting> _waveType;
std::shared_ptr<AudioParam> _attack;
std::shared_ptr<AudioParam> _sustainTime;
std::shared_ptr<AudioParam> _sustainPunch;
std::shared_ptr<AudioParam> _decayTime;
std::shared_ptr<AudioParam> _startFrequency;
std::shared_ptr<AudioParam> _minFrequency;
std::shared_ptr<AudioParam> _slide;
std::shared_ptr<AudioParam> _deltaSlide;
std::shared_ptr<AudioParam> _vibratoDepth;
std::shared_ptr<AudioParam> _vibratoSpeed;
std::shared_ptr<AudioParam> _changeAmount;
std::shared_ptr<AudioParam> _changeSpeed;
std::shared_ptr<AudioParam> _squareDuty;
std::shared_ptr<AudioParam> _dutySweep;
std::shared_ptr<AudioParam> _repeatSpeed;
std::shared_ptr<AudioParam> _phaserOffset;
std::shared_ptr<AudioParam> _phaserSweep;
std::shared_ptr<AudioParam> _lpFilterCutoff;
std::shared_ptr<AudioParam> _lpFilterCutoffSweep;
std::shared_ptr<AudioParam> _lpFiterResonance;
std::shared_ptr<AudioParam> _hpFilterCutoff;
std::shared_ptr<AudioParam> _hpFilterCutoffSweep;
class Sfxr;
Sfxr * sfxr;
};
}
#endif

View File

@ -0,0 +1,80 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#pragma once
#ifndef SPATIALIZATION_NODE_H
#define SPATIALIZATION_NODE_H
#include "LabSound/core/PannerNode.h"
#include <map>
namespace lab
{
struct Occluder
{
float x, y, z;
float innerRadius;
float outerRadius;
float maxAttenuation;
Occluder() { }
Occluder(float x, float y, float z, float radius)
: x(x)
, y(y)
, z(z)
, innerRadius(radius * 0.75f)
, outerRadius(radius)
, maxAttenuation(0)
{
}
Occluder(const Occluder & rhs)
: x(rhs.x)
, y(rhs.y)
, z(rhs.z)
, innerRadius(rhs.innerRadius)
, outerRadius(rhs.outerRadius)
, maxAttenuation(rhs.maxAttenuation)
{
}
Occluder & operator=(const Occluder & rhs)
{
x = rhs.x;
y = rhs.y;
z = rhs.z;
innerRadius = rhs.innerRadius;
outerRadius = rhs.outerRadius;
maxAttenuation = rhs.maxAttenuation;
return *this;
}
};
class Occluders
{
std::map<int, Occluder> occluders;
public:
void setOccluder(int id, float x, float y, float z, float radius);
void removeOccluder(int id);
float occlusion(const FloatPoint3D & sourcePos, const FloatPoint3D & listenerPos) const;
};
typedef std::shared_ptr<Occluders> OccludersPtr;
class SpatializationNode : public PannerNode
{
virtual float distanceConeGain(ContextRenderLock & r);
std::shared_ptr<Occluders> occluders;
public:
SpatializationNode(AudioContext & ac);
virtual ~SpatializationNode() = default;
void setOccluders(OccludersPtr ptr);
};
}
#endif

View File

@ -0,0 +1,44 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015, The LabSound Authors. All rights reserved.
#pragma once
#ifndef SPECTRAL_MONITOR_NODE_H
#define SPECTRAL_MONITOR_NODE_H
#include "LabSound/core/AudioBasicInspectorNode.h"
#include "LabSound/core/AudioContext.h"
#include <mutex>
#include <vector>
namespace lab
{
// params:
// settings: windowSize
class SpectralMonitorNode : public AudioBasicInspectorNode
{
class SpectralMonitorNodeInternal;
SpectralMonitorNodeInternal * internalNode = nullptr;
public:
SpectralMonitorNode(AudioContext & ac);
virtual ~SpectralMonitorNode();
static const char* static_name() { return "SpectralMonitor"; }
virtual const char* name() const override { return static_name(); }
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override;
void spectralMag(std::vector<float> & result);
void windowSize(unsigned int ws);
unsigned int windowSize() const;
private:
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
};
}
#endif

View File

@ -0,0 +1,45 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015, The LabSound Authors. All rights reserved.
#ifndef SUPERSAW_NODE_H
#define SUPERSAW_NODE_H
#include "LabSound/core/AudioContext.h"
#include "LabSound/core/AudioNode.h"
#include "LabSound/core/AudioParam.h"
#include "LabSound/core/AudioScheduledSourceNode.h"
namespace lab
{
class SupersawNode : public AudioScheduledSourceNode
{
class SupersawNodeInternal;
std::unique_ptr<SupersawNodeInternal> internalNode;
public:
SupersawNode(AudioContext & ac);
virtual ~SupersawNode();
static const char* static_name() { return "SuperSaw"; }
virtual const char* name() const override { return static_name(); }
std::shared_ptr<AudioSetting> sawCount() const;
std::shared_ptr<AudioParam> frequency() const;
std::shared_ptr<AudioParam> detune() const;
void update(ContextRenderLock & r); // call if sawCount is changed. CBB: update automatically
private:
virtual void process(ContextRenderLock &, int bufferSize) override;
virtual void reset(ContextRenderLock &) override {}
virtual double tailTime(ContextRenderLock & r) const override { return 0; }
virtual double latencyTime(ContextRenderLock & r) const override { return 0; }
virtual bool propagatesSilence(ContextRenderLock & r) const override;
};
}
#endif

View File

@ -0,0 +1,102 @@
// SPDX-License-Identifier: BSD-2-Clause
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#ifndef LABSOUND_UTIL_H
#define LABSOUND_UTIL_H
#include <algorithm>
#include <random>
#include <string>
// clang-format off
#define NO_COPY(C) C(const C &) = delete; C & operator = (const C &) = delete
#define NO_MOVE(C) NO_COPY(C); C(C &&) = delete; C & operator = (const C &&) = delete
#if defined(_MSC_VER)
# if defined(min)
# undef min
# endif
# if defined(max)
# undef max
# endif
#endif
namespace lab
{
class UniformRandomGenerator
{
std::random_device rd;
std::mt19937_64 gen;
std::uniform_real_distribution<float> dist_full {0.f, 1.f};
public:
UniformRandomGenerator() : rd(), gen(rd()) {}
float random_float() { return dist_full(gen); } // [0.f, 1.f]
float random_float(float max) { std::uniform_real_distribution<float> dist_user(0.f, max); return dist_user(gen); }
float random_float(float min, float max) { std::uniform_real_distribution<float> dist_user(min, max); return dist_user(gen); }
uint32_t random_uint(uint32_t max) { std::uniform_int_distribution<uint32_t> dist_int(0, max); return dist_int(gen); }
int32_t random_int(int32_t min, int32_t max) { std::uniform_int_distribution<int32_t> dist_int(min, max); return dist_int(gen); }
};
// clang-format on
template <typename T>
inline T RoundNextPow2(T v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
template <typename S, typename T>
inline T clampTo(S value, T min, T max)
{
if (value >= static_cast<S>(max)) return max;
if (value <= static_cast<S>(min)) return min;
return static_cast<T>(value);
}
// Hard-coded to the IRCAM HRTF Database
struct HRTFDatabaseInfo
{
std::string subjectName;
std::string searchPath;
float sampleRate;
int minElevation = -45;
int maxElevation = 90;
int rawElevationAngleSpacing = 15;
// Number of elevations loaded from resource
int numberOfRawElevations = 10; // -45 -> +90 (each 15 degrees)
// Interpolates by this factor to get the total number of elevations from every elevation loaded from resource
int interpolationFactor = 1;
// Total number of elevations after interpolation.
int numTotalElevations;
HRTFDatabaseInfo(const std::string & subjectName, const std::string & searchPath, float sampleRate)
: subjectName(subjectName)
, searchPath(searchPath)
, sampleRate(sampleRate)
{
numTotalElevations = numberOfRawElevations * interpolationFactor;
}
// Returns the index for the correct HRTFElevation given the elevation angle.
int indexFromElevationAngle(double elevationAngle)
{
elevationAngle = std::max((double) minElevation, elevationAngle);
elevationAngle = std::min((double) maxElevation, elevationAngle);
return (int) (interpolationFactor * (elevationAngle - minElevation) / rawElevationAngleSpacing);
}
};
}
#endif

View File

@ -0,0 +1,162 @@
// License: BSD 3 Clause
// Copyright (C) 2010, Google Inc. All rights reserved.
// Copyright (C) 2015+, The LabSound Authors. All rights reserved.
#include "LabSound/core/Macros.h"
#if defined(LABSOUND_PLATFORM_OSX) && !defined(WEBAUDIO_KISSFFT)
#include "internal/Assertions.h"
#include "internal/FFTFrame.h"
#include "internal/VectorMath.h"
namespace lab
{
const int kMaxFFTPow2Size = 24;
FFTSetup * FFTFrame::fftSetups = 0;
// Normal constructor: allocates for a given fftSize
FFTFrame::FFTFrame(int fftSize)
: m_realData(fftSize)
, m_imagData(fftSize)
{
m_FFTSize = fftSize;
m_log2FFTSize = static_cast<size_t>(log2(fftSize));
// We only allow power of two
ASSERT(1UL << m_log2FFTSize == m_FFTSize);
// Lazily create and share fftSetup with other frames
m_FFTSetup = fftSetupForSize(fftSize);
// Setup frame data
m_frame.realp = m_realData.data();
m_frame.imagp = m_imagData.data();
}
// Creates a blank/empty frame (interpolate() must later be called)
FFTFrame::FFTFrame()
: m_realData(0)
, m_imagData(0)
{
// Later will be set to correct values when interpolate() is called
m_frame.realp = 0;
m_frame.imagp = 0;
m_FFTSize = 0;
m_log2FFTSize = 0;
}
// Copy constructor
FFTFrame::FFTFrame(const FFTFrame & frame)
: m_FFTSize(frame.m_FFTSize)
, m_log2FFTSize(frame.m_log2FFTSize)
, m_FFTSetup(frame.m_FFTSetup)
, m_realData(frame.m_FFTSize)
, m_imagData(frame.m_FFTSize)
{
// Setup frame data
m_frame.realp = m_realData.data();
m_frame.imagp = m_imagData.data();
// Copy/setup frame data
size_t nbytes = sizeof(float) * m_FFTSize;
memcpy(realData(), frame.m_frame.realp, nbytes);
memcpy(imagData(), frame.m_frame.imagp, nbytes);
}
FFTFrame::~FFTFrame()
{
}
void FFTFrame::cleanup()
{
if (!fftSetups)
return;
for (int i = 0; i < kMaxFFTPow2Size; ++i)
{
if (fftSetups[i])
vDSP_destroy_fftsetup(fftSetups[i]);
}
free(fftSetups);
fftSetups = 0;
}
void FFTFrame::multiply(const FFTFrame & frame)
{
FFTFrame & frame1 = *this;
const FFTFrame & frame2 = frame;
float * realP1 = frame1.realData();
float * imagP1 = frame1.imagData();
const float * realP2 = frame2.realData();
const float * imagP2 = frame2.imagData();
size_t halfSize = m_FFTSize / 2;
float real0 = realP1[0];
float imag0 = imagP1[0];
// Complex multiply
VectorMath::zvmul(realP1, imagP1, realP2, imagP2, realP1, imagP1, halfSize);
// Multiply the packed DC/nyquist component
realP1[0] = real0 * realP2[0];
imagP1[0] = imag0 * imagP2[0];
// Scale accounts for vecLib's peculiar scaling
// This ensures the right scaling all the way back to inverse FFT
float scale = 0.5f;
VectorMath::vsmul(realP1, 1, &scale, realP1, 1, halfSize);
VectorMath::vsmul(imagP1, 1, &scale, imagP1, 1, halfSize);
}
void FFTFrame::computeForwardFFT(const float * data)
{
vDSP_ctoz((DSPComplex *) data, 2, &m_frame, 1, m_FFTSize / 2);
vDSP_fft_zrip(m_FFTSetup, &m_frame, 1, m_log2FFTSize, FFT_FORWARD);
}
void FFTFrame::computeInverseFFT(float * data)
{
vDSP_fft_zrip(m_FFTSetup, &m_frame, 1, m_log2FFTSize, FFT_INVERSE);
vDSP_ztoc(&m_frame, 1, (DSPComplex *) data, 2, m_FFTSize / 2);
// Do final scaling so that x == IFFT(FFT(x))
float scale = 0.5f / m_FFTSize;
vDSP_vsmul(data, 1, &scale, data, 1, m_FFTSize);
}
FFTSetup FFTFrame::fftSetupForSize(int fftSize)
{
if (!fftSetups)
{
fftSetups = (FFTSetup *) malloc(sizeof(FFTSetup) * kMaxFFTPow2Size);
memset(fftSetups, 0, sizeof(FFTSetup) * kMaxFFTPow2Size);
}
int pow2size = static_cast<int>(log2(fftSize));
ASSERT(pow2size < kMaxFFTPow2Size);
if (!fftSetups[pow2size])
fftSetups[pow2size] = vDSP_create_fftsetup(pow2size, FFT_RADIX2);
return fftSetups[pow2size];
}
float * FFTFrame::realData() const
{
return m_frame.realp;
}
float * FFTFrame::imagData() const
{
return m_frame.imagp;
}
} // namespace lab
#endif // #if OS(DARWIN)

View File

@ -0,0 +1,313 @@
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (C) 2020, The LabSound Authors. All rights reserved.
#include "AudioDevice_RtAudio.h"
#include "internal/Assertions.h"
#include "internal/VectorMath.h"
#include "LabSound/core/AudioDevice.h"
#include "LabSound/core/AudioHardwareDeviceNode.h"
#include "LabSound/core/AudioNode.h"
#include "LabSound/extended/Logging.h"
#include "RtAudio.h"
namespace lab
{
////////////////////////////////////////////////////
// Platform/backend specific static functions //
////////////////////////////////////////////////////
std::vector<AudioDeviceInfo> AudioDevice::MakeAudioDeviceList()
{
std::vector<std::string> rt_audio_apis{
"unspecified",
"linux_alsa",
"linux_pulse",
"linux_oss",
"unix_jack",
"macos_coreaudio",
"windows_wasapi",
"windows_asio",
"windows_directsound",
"rtaudio_dummy"};
RtAudio rt;
if (rt.getDeviceCount() <= 0) throw std::runtime_error("no rtaudio devices available!");
const auto api_enum = rt.getCurrentApi();
LOG_INFO("using rtaudio api %s", rt_audio_apis[static_cast<int>(api_enum)].c_str());
auto to_flt_vec = [](const std::vector<unsigned int> & vec) {
std::vector<float> result;
for (auto & i : vec) result.push_back(static_cast<float>(i));
return result;
};
std::vector<AudioDeviceInfo> devices;
for (uint32_t i = 0; i < rt.getDeviceCount(); ++i)
{
RtAudio::DeviceInfo info = rt.getDeviceInfo(i);
if (info.probed)
{
AudioDeviceInfo lab_device_info;
lab_device_info.index = i;
lab_device_info.identifier = info.name;
lab_device_info.num_output_channels = info.outputChannels;
lab_device_info.num_input_channels = info.inputChannels;
lab_device_info.supported_samplerates = to_flt_vec(info.sampleRates);
lab_device_info.nominal_samplerate = static_cast<float>(info.preferredSampleRate);
lab_device_info.is_default_output = info.isDefaultOutput;
lab_device_info.is_default_input = info.isDefaultInput;
devices.push_back(lab_device_info);
}
else
{
LOG_ERROR("probing failed %s", info.name.c_str());
}
}
return devices;
}
AudioDeviceIndex AudioDevice::GetDefaultOutputAudioDeviceIndex() noexcept
{
RtAudio rt;
if (rt.getDeviceCount() <= 0) return {0, false};
return {rt.getDefaultOutputDevice(), true};
}
AudioDeviceIndex AudioDevice::GetDefaultInputAudioDeviceIndex() noexcept
{
RtAudio rt;
if (rt.getDeviceCount() <= 0) return {0, false};
return {rt.getDefaultInputDevice(), true};
}
AudioDevice * AudioDevice::MakePlatformSpecificDevice(
AudioDeviceRenderCallback & callback,
const AudioStreamConfig & outputConfig,
const AudioStreamConfig & inputConfig)
{
return new AudioDevice_RtAudio(callback, outputConfig, inputConfig);
}
/////////////////////////////
// AudioDevice_RtAudio //
/////////////////////////////
const float kLowThreshold = -1.0f;
const float kHighThreshold = 1.0f;
const bool kInterleaved = false;
AudioDevice_RtAudio::AudioDevice_RtAudio(
AudioDeviceRenderCallback & callback,
const AudioStreamConfig & _outputConfig, const AudioStreamConfig & _inputConfig)
: _callback(callback)
, outputConfig(_outputConfig)
, inputConfig(_inputConfig)
{
if (rtaudio_ctx.getDeviceCount() < 1)
{
LOG_ERROR("no audio devices available");
}
rtaudio_ctx.showWarnings(true);
// Translate AudioStreamConfig into RTAudio-native data structures
RtAudio::StreamParameters outputParams;
outputParams.deviceId = outputConfig.device_index;
outputParams.nChannels = outputConfig.desired_channels;
LOG_INFO("using output device idx: %i", outputConfig.device_index);
if (outputConfig.device_index >= 0) LOG_INFO("using output device name: %s", rtaudio_ctx.getDeviceInfo(outputParams.deviceId).name.c_str());
RtAudio::StreamParameters inputParams;
inputParams.deviceId = inputConfig.device_index;
inputParams.nChannels = inputConfig.desired_channels;
LOG_INFO("using input device idx: %i", inputConfig.device_index);
if (inputConfig.device_index >= 0) LOG_INFO("using input device name: %s", rtaudio_ctx.getDeviceInfo(inputParams.deviceId).name.c_str());
authoritativeDeviceSampleRateAtRuntime = outputConfig.desired_samplerate;
if (inputConfig.desired_channels > 0)
{
auto inDeviceInfo = rtaudio_ctx.getDeviceInfo(inputParams.deviceId);
if (inDeviceInfo.probed && inDeviceInfo.inputChannels > 0)
{
// ensure that the number of input channels buffered does not exceed the number available.
inputParams.nChannels = (inDeviceInfo.inputChannels < inputConfig.desired_channels) ? inDeviceInfo.inputChannels : inputConfig.desired_channels;
inputConfig.desired_channels = inputParams.nChannels;
LOG_INFO("[AudioDevice_RtAudio] adjusting number of input channels: %i ", inputParams.nChannels);
}
}
RtAudio::StreamOptions options;
// RTAUDIO_MINIMIZE_LATENCY tells RtAudio to use the hardware's minimum buffer size
// which is not desirable as the minimum way be too small, and a non-power of 2.
//options.flags = RTAUDIO_MINIMIZE_LATENCY;
if (!kInterleaved) options.flags |= RTAUDIO_NONINTERLEAVED;
// Note! RtAudio has a hard limit on a power of two buffer size, non-power of two sizes will result in
// heap corruption, for example, when dac.stopStream() is invoked.
uint32_t bufferFrames = AudioNode::ProcessingSizeInFrames;
samplingInfo.epoch[0] = samplingInfo.epoch[1] = std::chrono::high_resolution_clock::now();
try
{
rtaudio_ctx.openStream(&outputParams, (inputParams.nChannels > 0) ? &inputParams : nullptr, RTAUDIO_FLOAT32,
static_cast<unsigned int>(authoritativeDeviceSampleRateAtRuntime), &bufferFrames, &rt_audio_callback, this, &options);
}
catch (const RtAudioError & e)
{
LOG_ERROR(e.getMessage().c_str());
}
}
AudioDevice_RtAudio::~AudioDevice_RtAudio()
{
if (rtaudio_ctx.isStreamOpen())
{
rtaudio_ctx.closeStream();
}
}
void AudioDevice_RtAudio::start()
{
ASSERT(authoritativeDeviceSampleRateAtRuntime != 0.f); // something went very wrong
try
{
rtaudio_ctx.startStream();
}
catch (const RtAudioError & e)
{
LOG_ERROR(e.getMessage().c_str());
}
}
void AudioDevice_RtAudio::stop()
{
try
{
rtaudio_ctx.stopStream();
}
catch (const RtAudioError & e)
{
LOG_ERROR(e.getMessage().c_str());
}
}
bool AudioDevice_RtAudio::isRunning() const
{
try
{
return rtaudio_ctx.isStreamRunning();
}
catch (const RtAudioError & e)
{
LOG_ERROR(e.getMessage().c_str());
return false;
}
}
// Pulls on our provider to get rendered audio stream.
void AudioDevice_RtAudio::render(int numberOfFrames, void * outputBuffer, void * inputBuffer)
{
float * fltOutputBuffer = reinterpret_cast<float *>(outputBuffer);
float * fltInputBuffer = reinterpret_cast<float *>(inputBuffer);
if (outputConfig.desired_channels)
{
if (!_renderBus || _renderBus->length() < numberOfFrames)
{
_renderBus.reset(new AudioBus(outputConfig.desired_channels, numberOfFrames, true));
_renderBus->setSampleRate(authoritativeDeviceSampleRateAtRuntime);
}
}
if (inputConfig.desired_channels)
{
if (!_inputBus || _inputBus->length() < numberOfFrames)
{
_inputBus.reset(new AudioBus(inputConfig.desired_channels, numberOfFrames, true));
_inputBus->setSampleRate(authoritativeDeviceSampleRateAtRuntime);
}
}
// copy the input buffer
if (inputConfig.desired_channels)
{
if (kInterleaved)
{
for (uint32_t i = 0; i < inputConfig.desired_channels; ++i)
{
AudioChannel * channel = _inputBus->channel(i);
float * src = &fltInputBuffer[i];
VectorMath::vclip(src, 1, &kLowThreshold, &kHighThreshold, channel->mutableData(), inputConfig.desired_channels, numberOfFrames);
}
}
else
{
for (uint32_t i = 0; i < inputConfig.desired_channels; ++i)
{
AudioChannel * channel = _inputBus->channel(i);
float * src = &fltInputBuffer[i * numberOfFrames];
VectorMath::vclip(src, 1, &kLowThreshold, &kHighThreshold, channel->mutableData(), 1, numberOfFrames);
}
}
}
// Update sampling info
const int32_t index = 1 - (samplingInfo.current_sample_frame & 1);
const uint64_t t = samplingInfo.current_sample_frame & ~1;
samplingInfo.sampling_rate = authoritativeDeviceSampleRateAtRuntime;
samplingInfo.current_sample_frame = t + numberOfFrames + index;
samplingInfo.current_time = samplingInfo.current_sample_frame / static_cast<double>(samplingInfo.sampling_rate);
samplingInfo.epoch[index] = std::chrono::high_resolution_clock::now();
// Pull on the graph
_callback.render(_inputBus.get(), _renderBus.get(), numberOfFrames, samplingInfo);
// Then deliver the rendered audio back to rtaudio, ready for the next callback
if (outputConfig.desired_channels)
{
// Clamp values at 0db (i.e., [-1.0, 1.0]) and also copy result to the DAC output buffer
if (kInterleaved)
{
for (uint32_t i = 0; i < outputConfig.desired_channels; ++i)
{
AudioChannel * channel = _renderBus->channel(i);
float * dst = &fltOutputBuffer[i];
VectorMath::vclip(channel->data(), 1, &kLowThreshold, &kHighThreshold, dst, outputConfig.desired_channels, numberOfFrames);
}
}
else
{
for (uint32_t i = 0; i < outputConfig.desired_channels; ++i)
{
AudioChannel * channel = _renderBus->channel(i);
float * dst = &fltOutputBuffer[i * numberOfFrames];
VectorMath::vclip(channel->data(), 1, &kLowThreshold, &kHighThreshold, dst, 1, numberOfFrames);
}
}
}
}
int rt_audio_callback(void * outputBuffer, void * inputBuffer, unsigned int nBufferFrames, double streamTime, RtAudioStreamStatus status, void * userData)
{
AudioDevice_RtAudio * self = reinterpret_cast<AudioDevice_RtAudio *>(userData);
float * fltOutputBuffer = reinterpret_cast<float *>(outputBuffer);
memset(fltOutputBuffer, 0, nBufferFrames * self->outputConfig.desired_channels * sizeof(float));
self->render(nBufferFrames, fltOutputBuffer, inputBuffer);
return 0;
}
} // namespace lab

View File

@ -0,0 +1,48 @@
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (C) 2020, The LabSound Authors. All rights reserved.
#pragma once
#ifndef labsound_audiodevice_rtaudio_hpp
#define labsound_audiodevice_rtaudio_hpp
#include "LabSound/core/AudioBus.h"
#include "LabSound/core/AudioHardwareDeviceNode.h"
#include "LabSound/core/AudioNode.h"
#include "RtAudio.h"
namespace lab
{
class AudioDevice_RtAudio : public AudioDevice
{
AudioDeviceRenderCallback & _callback;
std::unique_ptr<AudioBus> _renderBus;
std::unique_ptr<AudioBus> _inputBus;
RtAudio rtaudio_ctx;
SamplingInfo samplingInfo;
public:
AudioDevice_RtAudio(AudioDeviceRenderCallback &, const AudioStreamConfig & outputConfig, const AudioStreamConfig & inputConfig);
virtual ~AudioDevice_RtAudio();
AudioStreamConfig outputConfig;
AudioStreamConfig inputConfig;
float authoritativeDeviceSampleRateAtRuntime{0.f};
// AudioDevice Interface
void render(int numberOfFrames, void * outputBuffer, void * inputBuffer);
virtual void start() override final;
virtual void stop() override final;
virtual bool isRunning() const override final;
};
int rt_audio_callback(void * outputBuffer, void * inputBuffer, unsigned int nBufferFrames, double streamTime, RtAudioStreamStatus status, void * userData);
} // namespace lab
#endif // labsound_audiodevice_rtaudio_hpp

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,33 @@
RtAudio provides a common API (Application Programming Interface)
for realtime audio input/output across Linux (native ALSA, Jack,
and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
(DirectSound, ASIO and WASAPI) operating systems.
RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
RtAudio: realtime audio i/o C++ classes
Copyright (c) 2001-2014 Gary P. Scavone
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
Any person wishing to distribute modifications to the Software is
asked to send the modifications to the original developer so that
they can be incorporated into the canonical version. This is,
however, not a binding provision of this license.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

Some files were not shown because too many files have changed in this diff Show More