Audio Pipeline

This commit is contained in:
Johannes Schriewer 2024-02-28 21:26:01 +01:00
parent 7d7f851de8
commit ccbd21e64f
35 changed files with 1458 additions and 0 deletions

150
audio/audio.c Normal file
View file

@ -0,0 +1,150 @@
#include <stdlib.h>
#include <stdio.h>
#include <stddef.h>
#include "audio.h"
#include "audio_internal.h"
AudioPipelineStatus audio_pipeline_start(AudioPipeline *pipeline) {
AudioPipelineStatus result = pipeline->source->start(pipeline->source);
pipeline->status = result;
if (pipeline->statusCallback != NULL) {
pipeline->statusCallback(pipeline, result);
}
return result;
}
AudioPipelineStatus audio_pipeline_reset(AudioPipeline *pipeline) {
AudioPipelineStatus result = pipeline->source->reset(pipeline->source);
pipeline->status = result;
if (pipeline->statusCallback != NULL) {
pipeline->statusCallback(pipeline, result);
}
return result;
}
AudioPipelineStatus audio_pipeline_stop(AudioPipeline *pipeline) {
AudioPipelineStatus result = pipeline->source->stop(pipeline->source);
pipeline->status = result;
if (pipeline->statusCallback != NULL) {
pipeline->statusCallback(pipeline, result);
}
return result;
}
AudioPipeline *audio_pipeline_assemble(AudioPipelineElement *source, ...) {
va_list list, list_backup;
AudioPipelineElement *item, *last;
AudioPipeline *pipeline = malloc(sizeof(AudioPipeline));
pipeline->start = audio_pipeline_start;
pipeline->reset = audio_pipeline_reset;
pipeline->stop = audio_pipeline_stop;
pipeline->source = source;
pipeline->status = PipelineStopped;
pipeline->statusCallback = NULL;
va_start(list, source);
va_copy(list_backup, list);
if (source == NULL) {
fprintf(stderr, "ERROR: Source did not initialize, giving up!\n");
goto fail;
}
source->pipeline = pipeline;
last = source;
item = va_arg(list, AudioPipelineElement *);
while (item != NULL) {
if (item->type == AudioElementSource) {
fprintf(stderr, "ERROR: You cannot have a source in the middle of the pipeline, offending element: %s\n", item->describe(item));
goto fail;
}
AudioPipelineStatus result = item->link(item, last);
if (result != PipelineStopped) {
fprintf(stderr, "ERROR: Could not link audio pipeline elements %s and %s\n", last->describe(last), item->describe(item));
goto fail;
}
item->pipeline = pipeline;
last = item;
if (item->type == AudioElementSink) {
break;
}
item = va_arg(list, AudioPipelineElement *);
}
if (last->type != AudioElementSink) {
fprintf(stderr, "ERROR: Pipeline has no sink!\n");
goto fail;
}
pipeline->sink = last;
va_end(list);
va_end(list_backup);
return pipeline;
fail:
va_end(list);
item = va_arg(list_backup, AudioPipelineElement *);
while (item != NULL) {
item->destroy(item);
item = va_arg(list_backup, AudioPipelineElement *);
}
if (pipeline->source != NULL) {
pipeline->source->destroy(pipeline->source);
}
free(pipeline);
va_end(list_backup);
return NULL;
}
void audio_pipeline_destroy(AudioPipeline *pipeline) {
pipeline->stop(pipeline);
AudioPipelineElement *item = pipeline->source->next;
while (item != NULL) {
AudioPipelineElement *next = item->next;
item->destroy(item);
item = next;
}
pipeline->source->destroy(pipeline->source);
free(pipeline);
}
AudioBuffer *alloc_audio_buffer(uint32_t size) {
AudioBuffer *buffer = malloc(sizeof(AudioBuffer));
if (buffer == NULL) {
return NULL;
}
buffer->buf_size = size;
if (size > 0) {
buffer->data = calloc(1, size);
if (buffer->data == NULL) {
free(buffer);
return NULL;
}
} else {
buffer->data = NULL;
}
return buffer;
}
void free_audio_buffer(AudioBuffer *buffer) {
if (buffer == NULL) {
return;
}
if (buffer->data != NULL) {
free(buffer->data);
}
free(buffer);
}

179
audio/audio_decoder_mp3.c Normal file
View file

@ -0,0 +1,179 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include "audio.h"
#include "audio_internal.h"
#include "audio_demuxer_mp3.h"
#include "deps/mp3/stream.h"
#include "deps/mp3/frame.h"
#include "deps/mp3/synth.h"
typedef struct mad_stream MADStream;
typedef struct mad_frame MADFrame;
typedef struct mad_synth MADSynth;
typedef struct _DecoderMP3Context {
MADStream *stream;
MADFrame *frame;
MADSynth *synth;
AudioBuffer *outputBuffer;
} DecoderMP3Context;
static inline int16_t scale(mad_fixed_t sample) {
/* round */
sample += (1L << (MAD_F_FRACBITS - 16));
/* clip */
if (sample >= MAD_F_ONE)
sample = MAD_F_ONE - 1;
else if (sample < -MAD_F_ONE)
sample = -MAD_F_ONE;
/* quantize */
return sample >> (MAD_F_FRACBITS + 1 - 16);
}
AudioPipelineStatus decoder_mp3_push(AudioPipelineElement *self, AudioBuffer *buffer) {
DecoderMP3Context *context = (DecoderMP3Context *)self->ctx;
int result = 0;
mad_stream_buffer(context->stream, buffer->data, buffer->buf_size);
result = mad_frame_decode(context->frame, context->stream);
if (result == -1) {
fprintf(stderr, "ERROR: frame decoding error 0x%04x (%s)\n",
context->stream->error, mad_stream_errorstr(context->stream));
if (!MAD_RECOVERABLE(context->stream->error)) {
mad_frame_mute(context->frame);
return PipelineError;
}
return PipelineRunning;
}
mad_synth_frame(context->synth, context->frame);
bool relink_needed = false;
if ((self->channels == 0) || (self->channels != context->synth->pcm.channels)) {
self->channels = context->synth->pcm.channels;
relink_needed = true;
}
if ((self->sample_rate == 0) || (self->sample_rate != context->synth->pcm.samplerate)) {
self->sample_rate = context->synth->pcm.samplerate;
relink_needed = true;
}
if (relink_needed) {
AudioPipelineStatus result = self->next->link(self->next, self);
if (result != PipelineStopped) {
fprintf(stderr, "ERROR: Re-link failed\n");
return PipelineError;
}
}
uint16_t numSamples = context->synth->pcm.length;
if ((context->outputBuffer != NULL) && (context->outputBuffer->buf_size < numSamples * self->channels * 2)) {
// realloc buffer
free_audio_buffer(context->outputBuffer);
context->outputBuffer = NULL;
}
if (context->outputBuffer == NULL) {
// allocate output buffer
context->outputBuffer = alloc_audio_buffer(numSamples * self->channels * 2);
if (context->outputBuffer == NULL) {
fprintf(stderr, "ERROR: Cannot allocate output buffer of size %d!\n", numSamples * self->channels * 2);
return PipelineError;
}
}
// Scale samples to 16 bit
mad_fixed_t const *left = context->synth->pcm.samples[0];
mad_fixed_t const *right = context->synth->pcm.samples[1];
uint32_t bufferPos = 0;
while (numSamples--) {
signed int sample;
/* output sample(s) in 16-bit signed little-endian PCM */
sample = scale(*left++);
context->outputBuffer->data[bufferPos++] = (sample >> 0) & 0xff;
context->outputBuffer->data[bufferPos++] = (sample >> 8) & 0xff;
if (context->synth->pcm.channels == 2) {
sample = scale(*right++);
context->outputBuffer->data[bufferPos++] = (sample >> 0) & 0xff;
context->outputBuffer->data[bufferPos++] = (sample >> 8) & 0xff;
}
}
// run next element of the pipeline
uint32_t buf_sz = context->outputBuffer->buf_size;
context->outputBuffer->buf_size = bufferPos;
AudioPipelineStatus ret = self->next->push(self->next, context->outputBuffer);
if ((ret != PipelineRunning) && (ret != PipelineBuffering)) {
return ret;
}
context->outputBuffer->buf_size = buf_sz;
return PipelineRunning;
}
AudioPipelineStatus decoder_mp3_link(AudioPipelineElement *self, AudioPipelineElement *source) {
if ((source->sample_rate != 0) || (source->channels != 0) || (source->bits_per_sample != 0) || (!((source->type == AudioElementSource) || (source->type == AudioElementDemuxer)))) {
fprintf(stderr, "ERROR: MP3 decoder can only link to a data source, not %s!\n", source->describe(source));
return PipelineError;
}
source->next = self;
return PipelineStopped;
}
char *decoder_mp3_describe(AudioPipelineElement *self) {
return "libmad MP3 decoder";
}
void decoder_mp3_destroy(AudioPipelineElement *self) {
DecoderMP3Context *context = (DecoderMP3Context *)self->ctx;
if (context->outputBuffer) {
free_audio_buffer(context->outputBuffer);
}
mad_synth_finish(context->synth);
mad_frame_finish(context->frame);
mad_stream_finish(context->stream);
free(context->synth);
free(context->frame);
free(context->stream);
free(context);
free(self);
}
AudioPipelineElement *audio_decoder_mp3(void) {
AudioPipelineElement *self = calloc(1, sizeof(AudioPipelineElement));
DecoderMP3Context *context = calloc(1, sizeof(DecoderMP3Context));
context->stream = malloc(sizeof(MADStream));
context->frame = malloc(sizeof(MADFrame));
context->synth = malloc(sizeof(MADSynth));
mad_stream_init(context->stream);
mad_frame_init(context->frame);
mad_synth_init(context->synth);
mad_stream_options(context->stream, 0);
self->bits_per_sample = 16;
self->ctx = context;
self->describe = decoder_mp3_describe;
self->start = filter_start_nop;
self->reset = filter_reset_nop;
self->stop = filter_stop_nop;
self->push = decoder_mp3_push;
self->link = decoder_mp3_link;
self->destroy = decoder_mp3_destroy;
self->type = AudioElementDecoder;
return self;
}

261
audio/audio_demuxer_mp3.c Normal file
View file

@ -0,0 +1,261 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include "audio.h"
#include "audio_internal.h"
#include "audio_demuxer_mp3.h"
typedef struct _DemuxerMP3Context {
uint32_t outputBufferPosition;
uint32_t missingBytesInLastPacket;
AudioBuffer *outputBuffer;
uint8_t headerData[4];
uint8_t headerIndex;
} DemuxerMP3Context;
static uint32_t const bitrateTable[5][16] = {
/* MPEG-1 */
{ 0, 32000, 64000, 96000, 128000, 160000, 192000, 224000, /* Layer I */
256000, 288000, 320000, 352000, 384000, 416000, 448000, 0 },
{ 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer II */
128000, 160000, 192000, 224000, 256000, 320000, 384000, 0 },
{ 0, 32000, 40000, 48000, 56000, 64000, 80000, 96000, /* Layer III */
112000, 128000, 160000, 192000, 224000, 256000, 320000, 0 },
/* MPEG-2 LSF */
{ 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer I */
128000, 144000, 160000, 176000, 192000, 224000, 256000, 0 },
{ 0, 8000, 16000, 24000, 32000, 40000, 48000, 56000, /* Layers II & III */
64000, 80000, 96000, 112000, 128000, 144000, 160000, 0 }
};
static uint32_t const sampleRateTable[3][3] = {
/* MPEG-1 */
{ 44100, 48000, 32000 },
/* MPEG-2 */
{ 22050, 24000, 16000 },
/* MPEG-2.5 */
{ 11025, 12000, 8000 }
};
MP3Header demuxer_mp3_decode_header(uint8_t data[4]) {
MP3Header header = { 0 };
header.version = (data[1] >> 3) & 0x03;
header.layer = (data[1] >> 1) & 0x03;
header.has_crc = (data[1] & 0x01) == 0;
header.bitrateIndex = (data[2] >> 4) & 0x0f;
header.sampleRateIndex = (data[2] >> 2) & 0x03;
header.has_padding = (data[2] >> 1) & 0x01;
header.is_private = data[2] & 0x01;
header.channelMode = (data[3] >> 6) & 0x03;
header.jointStereoModeExtension = (data[3] >> 4) & 0x03;
header.has_copyright = (data[3] >> 3) & 0x01;
header.is_original = (data[3] >> 2) & 0x01;
header.emphasis = data[3] & 0x03;
header.valid = (header.version != MPEGVersionReserved) && (header.layer != MPEGLayerReserved) && (header.bitrateIndex != 0x0f) && (header.sampleRateIndex != 0x03);
if (header.valid) {
if (header.version == MPEGVersion1) {
switch (header.layer) {
case MPEGLayer1:
header.bitrate = bitrateTable[0][header.bitrateIndex];
break;
case MPEGLayer2:
header.bitrate = bitrateTable[1][header.bitrateIndex];
break;
case MPEGLayer3:
header.bitrate = bitrateTable[2][header.bitrateIndex];
break;
case MPEGLayerReserved:
break;
}
}
if ((header.version == MPEGVersion2) || (header.version == MPEGVersion2_5)) {
switch (header.layer) {
case MPEGLayer1:
header.bitrate = bitrateTable[3][header.bitrateIndex];
break;
case MPEGLayer2:
case MPEGLayer3:
header.bitrate = bitrateTable[4][header.bitrateIndex];
break;
case MPEGLayerReserved:
break;
}
}
switch (header.version) {
case MPEGVersion1:
header.sampleRate = sampleRateTable[0][header.sampleRateIndex];
break;
case MPEGVersion2:
header.sampleRate = sampleRateTable[1][header.sampleRateIndex];
break;
case MPEGVersion2_5:
header.sampleRate = sampleRateTable[2][header.sampleRateIndex];
break;
case MPEGVersionReserved:
break;
}
header.packetLength = 144 * ((float)header.bitrate / (float)header.sampleRate);
if (header.has_padding) {
if (header.layer == MPEGLayer1) {
header.packetLength += 4;
} else {
header.packetLength += 1;
}
}
if (header.has_crc) {
header.packetLength += 2;
}
header.packetLength -= 4; // Header
//fprintf(stderr, "INFO: Packet length: %d\n", header.packetLength);
if ((header.packetLength > 2016) || (header.packetLength < 96)) {
header.valid = false;
}
} else {
fprintf(stderr, "Invalid header!\n");
}
return header;
}
static inline AudioPipelineStatus demuxer_mp3_emit(AudioPipelineElement *self) {
DemuxerMP3Context *context = (DemuxerMP3Context *)self->ctx;
//fprintf(stderr, "INFO: Emitting packet of size %d\n", context->outputBufferPosition);
uint32_t buf_sz = context->outputBuffer->buf_size;
context->outputBuffer->buf_size = context->outputBufferPosition;
AudioPipelineStatus result = self->next->push(self->next, context->outputBuffer);
context->outputBuffer->buf_size = buf_sz;
context->outputBufferPosition = 0;
return result;
}
AudioPipelineStatus demuxer_mp3_push(AudioPipelineElement *self, AudioBuffer *buffer) {
DemuxerMP3Context *context = (DemuxerMP3Context *)self->ctx;
uint32_t start = 0;
bool sync = true;
if (context->missingBytesInLastPacket) {
// FIXME: what if the buffer is too small
if (buffer->buf_size < context->missingBytesInLastPacket) {
uint32_t remaining_bytes = (context->missingBytesInLastPacket - buffer->buf_size);
//fprintf(stderr, "INFO: Last buffer was too short, copying %d bytes, %d remaining \n", buffer->buf_size, remaining_bytes);
memcpy(context->outputBuffer->data + context->outputBufferPosition, buffer->data, buffer->buf_size);
context->outputBufferPosition += buffer->buf_size;
context->missingBytesInLastPacket = remaining_bytes;
return PipelineBuffering;
}
//fprintf(stderr, "INFO: Last buffer was too short, copying %d bytes\n", context->missingBytesInLastPacket);
memcpy(context->outputBuffer->data + context->outputBufferPosition, buffer->data, context->missingBytesInLastPacket);
context->outputBufferPosition += context->missingBytesInLastPacket;
demuxer_mp3_emit(self);
start = context->missingBytesInLastPacket;
context->missingBytesInLastPacket = 0;
}
for (uint32_t i = start; i < buffer->buf_size; i++) {
switch (context->headerIndex) {
case 0:
if (buffer->data[i] != 0xff) {
if (sync) {
fprintf(stderr, "WARN: Sync lost at %d\n", i);
sync = false;
}
continue;
}
context->headerData[context->headerIndex++] = buffer->data[i];
continue;
case 1:
if (buffer->data[i] < 0xe0) {
context->headerIndex = 0;
continue;
}
context->headerData[context->headerIndex++] = buffer->data[i];
continue;
case 2:
case 3:
context->headerData[context->headerIndex++] = buffer->data[i];
continue;
default:
break;
}
context->headerIndex = 0;
// sync marker found, try to decode the header
MP3Header header = demuxer_mp3_decode_header(context->headerData);
if (!header.valid) {
continue;
}
sync = true;
if (i + header.packetLength < buffer->buf_size) {
// FIXME: realloc output Buffer size if too small
//fprintf(stderr, "INFO: Found frame sync at %d, copying %d bytes\n", i, header.packetLength);
memcpy(context->outputBuffer->data + context->outputBufferPosition, context->headerData, 4);
context->outputBufferPosition += 4;
memcpy(context->outputBuffer->data + context->outputBufferPosition, buffer->data + i, header.packetLength);
i += header.packetLength - 1;
context->outputBufferPosition += header.packetLength;
demuxer_mp3_emit(self);
} else {
uint32_t remaining_bytes = (buffer->buf_size - i);
//fprintf(stderr, "INFO: Found frame sync at %d, buffer too short copying %d bytes\n", i, remaining_bytes);
memcpy(context->outputBuffer->data + context->outputBufferPosition, context->headerData, 4);
context->outputBufferPosition += 4;
memcpy(context->outputBuffer->data + context->outputBufferPosition, buffer->data + i, remaining_bytes);
context->outputBufferPosition += remaining_bytes;
context->missingBytesInLastPacket = header.packetLength - remaining_bytes;
return PipelineBuffering;
}
}
return PipelineRunning;
}
AudioPipelineStatus demuxer_mp3_link(AudioPipelineElement *self, AudioPipelineElement *source) {
if ((source->sample_rate != 0) || (source->channels != 0) || (source->bits_per_sample != 0) || (source->type != AudioElementSource)) {
fprintf(stderr, "ERROR: MP3 demuxer can only link to a data source, not %s!\n", source->describe(source));
return PipelineError;
}
source->next = self;
return PipelineStopped;
}
char *demuxer_mp3_describe(AudioPipelineElement *self) {
return "mp3 demuxer";
}
void demuxer_mp3_destroy(AudioPipelineElement *self) {
DemuxerMP3Context *context = (DemuxerMP3Context *)self->ctx;
if (context->outputBuffer) {
free(context->outputBuffer);
}
free(context);
free(self);
}
AudioPipelineElement *audio_demuxer_mp3(void) {
AudioPipelineElement *self = calloc(1, sizeof(AudioPipelineElement));
DemuxerMP3Context *context = calloc(1, sizeof(DemuxerMP3Context));
context->outputBuffer = alloc_audio_buffer(2048);
self->ctx = context;
self->describe = demuxer_mp3_describe;
self->start = filter_start_nop;
self->reset = filter_reset_nop;
self->stop = filter_stop_nop;
self->push = demuxer_mp3_push;
self->link = demuxer_mp3_link;
self->destroy = demuxer_mp3_destroy;
self->type = AudioElementDemuxer;
return self;
}

View file

@ -0,0 +1,149 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "audio.h"
#include "audio_internal.h"
#include "deps/resampler/speex_resampler.h"
typedef struct _FilterResamplerContext {
uint32_t source_sample_rate;
AudioBuffer *outputBuffer;
SpeexResamplerState *st;
} FilterResamplerContext;
AudioPipelineStatus filter_resample_push(AudioPipelineElement *self, AudioBuffer *buffer) {
FilterResamplerContext *context = (FilterResamplerContext *)self->ctx;
if (self->sample_rate == context->source_sample_rate) {
// Short circuit if we do not have to resample
return self->next->push(self->next, buffer);
}
if (context->st == NULL) {
fprintf(stderr, "ERROR: Pipeline not linked!\n");
return PipelineError;
}
uint32_t buf_sz = (self->sample_rate / context->source_sample_rate) * buffer->buf_size;
// Realloc if buffer size mismatch
if ((context->outputBuffer != NULL) && (context->outputBuffer->buf_size < buf_sz)) {
free_audio_buffer(context->outputBuffer);
context->outputBuffer = NULL;
}
if (context->outputBuffer == NULL) {
// at first push allocate appropriately sized output buffer
context->outputBuffer = alloc_audio_buffer(buf_sz);
if (context->outputBuffer == NULL) {
fprintf(stderr, "ERROR: No memory for output buffer of size %d\n", buf_sz);
return PipelineError;
}
}
// calculate sample counts that fit into the buffers
spx_uint32_t in_sz = (buffer->buf_size / self->channels / 2);
spx_uint32_t out_sz = (context->outputBuffer->buf_size / self->channels / 2);
// run the resampler on the buffer
int result = speex_resampler_process_interleaved_int(
context->st,
(spx_int16_t *)buffer->data,
&in_sz,
(spx_int16_t *)context->outputBuffer->data,
&out_sz
);
if (result != RESAMPLER_ERR_SUCCESS) {
fprintf(stderr, "ERROR: Error while resampling: %d\n", result);
return PipelineError;
}
if (in_sz != (buffer->buf_size / self->channels / 2)) {
fprintf(stderr, "WARN: Processed %d samples of %d!\n", in_sz, (buffer->buf_size / self->channels / 2));
}
if (out_sz != (context->outputBuffer->buf_size / self->channels / 2)) {
fprintf(stderr, "WARN: Output only %d samples of %d!\n", out_sz, (context->outputBuffer->buf_size / self->channels / 2));
}
// run next element of the pipeline
return self->next->push(self->next, context->outputBuffer);
}
AudioPipelineStatus filter_resample_link(AudioPipelineElement *self, AudioPipelineElement *source) {
FilterResamplerContext *context = (FilterResamplerContext *)self->ctx;
context->source_sample_rate = source->sample_rate;
self->bits_per_sample = source->bits_per_sample;
self->channels = source->channels;
if (self->sample_rate == source->sample_rate) {
// Sample rates match, this filter is a no-op
source->next = self;
return PipelineStopped;
}
if (source->bits_per_sample != 16) {
fprintf(stderr, "ERROR: Can only resample 16bits/sample, got: %d\n", source->bits_per_sample);
return PipelineError;
}
if (context->st != NULL) {
free_audio_buffer(context->outputBuffer);
context->outputBuffer = NULL;
speex_resampler_destroy(context->st);
fprintf(stderr, "INFO: Re-linking, source has: %d Hz, we have %d Hz\n", source->sample_rate, self->sample_rate);
}
if (source->sample_rate > 0) {
int err = 0;
context->st = speex_resampler_init(source->channels, source->sample_rate, self->sample_rate, 4, &err);
if (context->st == NULL) {
fprintf(stderr, "ERROR: Error initializing resampler: %d\n", err);
return PipelineError;
}
}
source->next = self;
if (self->next != NULL) {
// relinking after samplerate change
return self->next->link(self->next, self);
}
return PipelineStopped;
}
char *filter_resample_describe(AudioPipelineElement *self) {
return "speexdsp resampler";
}
void filter_resample_destroy(AudioPipelineElement *self) {
FilterResamplerContext *context = (FilterResamplerContext *)self->ctx;
if (context->st != NULL) {
speex_resampler_destroy(context->st);
}
if (context->outputBuffer) {
free(context->outputBuffer);
}
free(context);
free(self);
}
AudioPipelineElement *audio_filter_resample(uint32_t output_sample_rate) {
AudioPipelineElement *self = calloc(1, sizeof(AudioPipelineElement));
FilterResamplerContext *context = calloc(1, sizeof(FilterResamplerContext));
self->sample_rate = output_sample_rate;
self->ctx = context;
self->describe = filter_resample_describe;
self->start = filter_start_nop;
self->reset = filter_reset_nop;
self->stop = filter_stop_nop;
self->push = filter_resample_push;
self->link = filter_resample_link;
self->destroy = filter_resample_destroy;
self->type = AudioElementFilter;
return self;
}

34
audio/audio_internal.c Normal file
View file

@ -0,0 +1,34 @@
#include "audio.h"
#include "audio_internal.h"
AudioPipelineStatus sink_nop(AudioPipelineElement *self) {
return PipelineStopped;
}
AudioPipelineStatus source_start_nop(AudioPipelineElement *self) {
return self->next->start(self->next);
}
AudioPipelineStatus source_reset_nop(AudioPipelineElement *self) {
return self->next->reset(self->next);
}
AudioPipelineStatus source_stop_nop(AudioPipelineElement *self) {
return self->next->stop(self->next);
}
AudioPipelineStatus source_nop(AudioPipelineElement *self) {
return PipelineStopped;
}
AudioPipelineStatus filter_start_nop(AudioPipelineElement *self) {
return self->next->start(self->next);
}
AudioPipelineStatus filter_reset_nop(AudioPipelineElement *self) {
return self->next->reset(self->next);
}
AudioPipelineStatus filter_stop_nop(AudioPipelineElement *self) {
return self->next->stop(self->next);
}

70
audio/audio_sink_file.c Normal file
View file

@ -0,0 +1,70 @@
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include "audio.h"
#include "audio_internal.h"
typedef struct _SinkFileContext {
FILE *fp;
} SinkFileContext;
AudioPipelineStatus sink_file_push(AudioPipelineElement *self, AudioBuffer *buffer) {
SinkFileContext *context = (SinkFileContext *)self->ctx;
size_t bytes = fwrite(buffer->data, 1, buffer->buf_size, context->fp);
return (bytes != buffer->buf_size) ? PipelineError : PipelineRunning;
}
AudioPipelineStatus sink_file_link(AudioPipelineElement *self, AudioPipelineElement *source) {
SinkFileContext *context = (SinkFileContext *)self->ctx;
if (context->fp != NULL) {
fseek(context->fp, 0, SEEK_SET);
ftruncate(fileno(context->fp), 0);
}
source->next = self;
return PipelineStopped;
}
char *sink_file_describe(AudioPipelineElement *self) {
return "file sink";
}
void sink_file_destroy(AudioPipelineElement *self) {
SinkFileContext *context = (SinkFileContext *)self->ctx;
fclose(context->fp);
free(self->ctx);
free(self);
}
AudioPipelineElement *audio_sink_file(char *filename) {
AudioPipelineElement *self = calloc(1, sizeof(AudioPipelineElement));
SinkFileContext *context = calloc(1, sizeof(SinkFileContext));
context->fp = fopen(filename, "wb");
if (context->fp == NULL) {
fprintf(stderr, "ERROR: Cannot open file %s, errno=%d", filename, errno);
free(context);
free(self);
return NULL;
}
self->ctx = context;
self->describe = sink_file_describe;
self->start = sink_nop;
self->reset = sink_nop;
self->stop = sink_nop;
self->push = sink_file_push;
self->link = sink_file_link;
self->destroy = sink_file_destroy;
self->type = AudioElementSink;
return self;
}

80
audio/audio_sink_libao.c Normal file
View file

@ -0,0 +1,80 @@
#include <stdlib.h>
#include <string.h>
#include <ao/ao.h>
#include "audio.h"
#include "audio_internal.h"
typedef struct _SinkLibaoContext {
ao_device *device;
ao_sample_format format;
} SinkLibaoContext;
AudioPipelineStatus sink_libao_push(AudioPipelineElement *self, AudioBuffer *buffer) {
SinkLibaoContext *context = (SinkLibaoContext *)self->ctx;
if (context->device == NULL) {
int default_driver = ao_default_driver_id();
context->device = ao_open_live(default_driver, &context->format, NULL);
if (context->device == NULL) {
fprintf(stderr, "Error opening device.\n");
return PipelineError;
}
}
int result = ao_play(context->device, (char *)buffer->data, buffer->buf_size);
return (result == 0) ? PipelineError : PipelineRunning;
}
AudioPipelineStatus sink_libao_link(AudioPipelineElement *self, AudioPipelineElement *source) {
SinkLibaoContext *context = (SinkLibaoContext *)self->ctx;
if (context->device != NULL) {
ao_close(context->device);
context->device = NULL;
}
memset(&context->format, 0, sizeof(ao_sample_format));
context->format.bits = source->bits_per_sample;
context->format.channels = source->channels;
context->format.rate = source->sample_rate;
context->format.byte_format = AO_FMT_LITTLE;
source->next = self;
return PipelineStopped;
}
char *sink_libao_describe(AudioPipelineElement *self) {
return "libao sink";
}
void sink_libao_destroy(AudioPipelineElement *self) {
SinkLibaoContext *context = (SinkLibaoContext *)self->ctx;
ao_close(context->device);
ao_shutdown();
free(self->ctx);
free(self);
}
AudioPipelineElement *audio_sink_libao(void) {
AudioPipelineElement *self = calloc(1, sizeof(AudioPipelineElement));
SinkLibaoContext *context = calloc(1, sizeof(SinkLibaoContext));
ao_initialize();
self->ctx = context;
self->describe = sink_libao_describe;
self->start = sink_nop;
self->reset = sink_nop;
self->stop = sink_nop;
self->push = sink_libao_push;
self->link = sink_libao_link;
self->destroy = sink_libao_destroy;
self->type = AudioElementSink;
return self;
}

103
audio/audio_source_file.c Normal file
View file

@ -0,0 +1,103 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "audio.h"
#include "audio_internal.h"
typedef struct _SourceFileContext {
AudioBuffer *buffer;
FILE *fp;
} SourceFileContext;
AudioPipelineStatus source_file_start(AudioPipelineElement *self) {
SourceFileContext *context = (SourceFileContext *)self->ctx;
while(!feof(context->fp)) {
// fprintf(stderr, "INFO: File offset %d, reading %d\n", ftell(context->fp), context->buffer->buf_size);
size_t bytes = fread(context->buffer->data, 1, context->buffer->buf_size, context->fp);
if (bytes > 0) {
uint32_t buf_size = context->buffer->buf_size;
context->buffer->buf_size = (uint32_t)bytes;
AudioPipelineStatus result = self->next->push(self->next, context->buffer);
if ((result != PipelineRunning) && (result != PipelineBuffering)) {
self->pipeline->status = result;
if (self->pipeline->statusCallback != NULL) {
self->pipeline->statusCallback(self->pipeline, result);
}
return result;
}
context->buffer->buf_size = buf_size;
}
}
return PipelineFinished;
}
AudioPipelineStatus source_file_reset(AudioPipelineElement *self) {
SourceFileContext *context = (SourceFileContext *)self->ctx;
fseek(context->fp, 0, SEEK_SET);
return PipelineStopped;
}
char *source_file_describe(AudioPipelineElement *self) {
return "file source";
}
void source_file_destroy(AudioPipelineElement *self) {
SourceFileContext *context = (SourceFileContext *)self->ctx;
free_audio_buffer(context->buffer);
fclose(context->fp);
free(self->ctx);
free(self);
}
AudioPipelineElement *audio_source_file(char *filename, uint32_t block_size) {
AudioPipelineElement *self = calloc(1, sizeof(AudioPipelineElement));
if (self == NULL) {
return NULL;
}
SourceFileContext *context = calloc(1, sizeof(SourceFileContext));
if (context == NULL) {
free(self);
return NULL;
}
context->buffer = alloc_audio_buffer(block_size);
if (context->buffer == NULL) {
fprintf(stderr, "ERROR: Not enough memory for buffer of size %d!\n", block_size);
free(context);
free(self);
return NULL;
}
context->fp = fopen(filename, "rb");
if (context->fp == NULL) {
fprintf(stderr, "ERROR: Could not open file %s\n", filename);
free_audio_buffer(context->buffer);
free(context);
free(self);
return NULL;
}
self->bits_per_sample = 0;
self->channels = 0;
self->sample_rate = 0;
self->ctx = context;
self->describe = source_file_describe;
self->start = source_file_start;
self->reset = source_file_reset;
self->stop = source_stop_nop;
self->push = NULL;
self->link = NULL;
self->destroy = source_file_destroy;
self->type = AudioElementSource;
return self;
}

View file

@ -0,0 +1,103 @@
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "audio.h"
#include "audio_internal.h"
typedef struct _SourceTesttoneContext {
AudioBuffer *buffer;
} SourceTesttoneContext;
AudioPipelineStatus source_testtone_start(AudioPipelineElement *self) {
SourceTesttoneContext *context = (SourceTesttoneContext *)self->ctx;
int sample_size = (self->bits_per_sample / 8) * self->channels;
int bytes_per_sample = (self->bits_per_sample / 8);
float max_value = pow(2, self->bits_per_sample) / 2 - 1;
AudioPipelineStatus result = self->next->start(self->next);
if (result != PipelineStopped) {
return result;
}
for (int freq = 440; freq < 1440; freq += 100) {
for (int i = 0; i < self->sample_rate / 8; i++) {
float volume = 1.0f - ((float)i / ((float)self->sample_rate / 8.0f));
int sample = (int)(0.75f * max_value * sinf(2 * M_PI * freq * ((float)i/self->sample_rate))) * volume;
for(int c = 0; c < self->channels; c++) {
int offset = (sample_size * i) + (bytes_per_sample * c);
context->buffer->data[offset] = sample & 0xff;
if (bytes_per_sample > 1) {
context->buffer->data[offset + 1] = (sample >> 8) & 0xff;
}
if (bytes_per_sample > 2) {
context->buffer->data[offset + 2] = (sample >> 16) & 0xff;
}
if (bytes_per_sample > 3) {
context->buffer->data[offset + 3] = (sample >> 24) & 0xff;
}
}
}
result = self->next->push(self->next, context->buffer);
if (result != PipelineRunning) {
self->pipeline->status = result;
if (self->pipeline->statusCallback != NULL) {
self->pipeline->statusCallback(self->pipeline, result);
}
return result;
}
}
return PipelineFinished;
}
char *source_testtone_describe(AudioPipelineElement *self) {
return "testtone source";
}
void source_testtone_destroy(AudioPipelineElement *self) {
SourceTesttoneContext *context = (SourceTesttoneContext *)self->ctx;
free_audio_buffer(context->buffer);
free(self->ctx);
free(self);
}
AudioPipelineElement *audio_source_testtone(uint16_t sample_rate, uint8_t channels, uint8_t bits_per_sample) {
AudioPipelineElement *self = calloc(1, sizeof(AudioPipelineElement));
if (self == NULL) {
return NULL;
}
SourceTesttoneContext *context = calloc(1, sizeof(SourceTesttoneContext));
if (context == NULL) {
free(self);
return NULL;
}
context->buffer = alloc_audio_buffer(sample_rate / 8 * channels * (bits_per_sample / 8));
if (context->buffer == NULL) {
free(context);
free(self);
return NULL;
}
self->bits_per_sample = bits_per_sample;
self->channels = channels;
self->sample_rate = sample_rate;
self->ctx = context;
self->describe = source_testtone_describe;
self->start = source_testtone_start;
self->reset = source_reset_nop;
self->stop = source_stop_nop;
self->push = NULL;
self->link = NULL;
self->destroy = source_testtone_destroy;
self->type = AudioElementSource;
return self;
}

108
audio/include/audio.h Normal file
View file

@ -0,0 +1,108 @@
#ifndef AUDIOPIPELINE_AUDIO_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_H__INCLUDED
#include <stdarg.h>
#include <stdint.h>
/**
* Audio buffer that is passed from one pipeline element to the next, contents
* in data are undefined, they could be compressed data or interleaved sample
* data.
*/
typedef struct _AudioBuffer {
uint32_t buf_size;
uint8_t *data;
// FIXME: Data type
} AudioBuffer;
/**
* Type of pipeline element
*/
typedef enum _AudioPipelineElementType {
AudioElementSource, /** Audio source, has to be the first item in a pipeline */
AudioElementDemuxer, /** Format demuxer, yields sample data from a stream */
AudioElementDecoder, /** Audio decoder, decodes sample data from a compressed input buffer */
AudioElementEncoder, /** Audio encoder, takes sample data and compresses them */
AudioElementFilter, /** Audio filter, takes sample data, transforms the samples and yields sample data */
AudioElementMuxer, /** Takes compressed buffers and muxes them to an output format which can be saved*/
AudioElementSink /** Audio sink, takes a buffer and outputs it somewhere, probably a file or soundcard */
} AudioPipelineElementType;
/**
* Status of the audio pipeline
*/
typedef enum _AudioPipelineStatus {
PipelineStopped, /** Pipeline in stopped state, no processing is happening yet */
PipelineRunning, /** Pipeline in running state, samples will be produced, transformed and consumed*/
PipelinePaused, /** Pipeline in paused mode, everything is initialized but no processing is happening */
PipelineBuffering, /** Pipeline is pre-loading data */
PipelineError, /** Pipeline is in error state, something happened that prevents other states */
PipelineFinished /** Pipeline has finished processing data */
} AudioPipelineStatus;
/** A audio pipeline element, data contained in this is opaque to the user */
typedef struct _AudioPipelineElement AudioPipelineElement;
/**
* The audio pipeline, created by calling `audio_pipeline_assemble` and can be used by calling
* the function pointers provided.
*/
typedef struct _AudioPipeline {
/** Start processing audio */
AudioPipelineStatus (*start)(struct _AudioPipeline *pipeline);
/** Stop processing audio and reset internal state */
AudioPipelineStatus (*reset)(struct _AudioPipeline *pipeline);
/** Stop processing audio, does not reset internal state, you can call start on the pipeline
to continue processing */
AudioPipelineStatus (*stop)(struct _AudioPipeline *pipeline);
/** Status callback will be called if the state of the pipeline changes */
void (*statusCallback)(struct _AudioPipeline *self, AudioPipelineStatus status);
/** The source element of the pipeline, some sources may allow changing parameters */
struct _AudioPipelineElement *source;
/** The sink element of the pipeline */
struct _AudioPipelineElement *sink;
/** Pipeline status (same as is provided via status callback) */
AudioPipelineStatus status;
} AudioPipeline;
/**
* Assemble a pipeline from components, end the List with a sink and a NULL element!
*
* @param source The source element
* @param ... other elements to add to the pipeline, end with NULL element!
*
* @returns Initialized `AudioPipeline` or `NULL` on Error.
*/
AudioPipeline *audio_pipeline_assemble(AudioPipelineElement *source, ...);
/**
* Destroy the pipeline given, this will release all associated memory, do not use pipeline afterwards!
*
* @param pipeline The pipeline to deallocate
*/
void audio_pipeline_destroy(AudioPipeline *pipeline);
/**
* Safely allocate an audio buffer, returns NULL on out of memory conditions.
*
* @param size Size in bytes of the buffer data, iIf set to 0, the data pointer will not be allocated and stays at `NULL`
*
* @returns Initialized `AudioBuffer`
*/
AudioBuffer *alloc_audio_buffer(uint32_t size);
/**
* Free an audio buffer and all it's contents if any has been allocated.
*
* @param buffer The audio buffer to deallocate
*/
void free_audio_buffer(AudioBuffer *buffer);
#endif /* AUDIOPIPELINE_AUDIO_H__INCLUDED */

View file

@ -0,0 +1,14 @@
#ifndef AUDIOPIPELINE_AUDIO_DECODER_MP3_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_DECODER_MP3_H__INCLUDED
#include "audio.h"
/**
* Create a mp3 decoder element
*
* @returns Initialized `AudioPipelineElement` that can be used in call to `audio_pipeline_assemble`
*/
AudioPipelineElement *audio_decoder_mp3(void);
#endif /* AUDIOPIPELINE_AUDIO_DECODER_MP3_H__INCLUDED */

View file

@ -0,0 +1,75 @@
#ifndef AUDIOPIPELINE_AUDIO_DEMUXER_MP3_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_DEMUXER_MP3_H__INCLUDED
#include <stdbool.h>
#include "audio.h"
/**
* Creates a demuxer element for MP3 files
*
* This demuxer just searches for MP3 sync markers, decodes the header to calculate frame length
* and then will forward the MP3 data to the next element in the chain in packets of 10 frames.
* The maximum size of data buffered before calling the decoder is around 14 kB.
* The theoretical maximum decoded data from this packet is 11520 samples which could be
* up to 45kB of data and between 0.96 and 1.44 seconds of audio playback time.
*
* Formula: FrameSize = (144 * BitRate / SampleRate) + PaddingBit
*
* @returns Initialized `AudioPipelineElement` that can be used in call to `audio_pipeline_assemble`
*/
AudioPipelineElement *audio_demuxer_mp3(void);
typedef enum {
MPEGVersion2_5 = 0,
MPEGVersionReserved = 1,
MPEGVersion2 = 2,
MPEGVersion1 = 3
} MPEGVersion;
typedef enum {
MPEGLayerReserved = 0,
MPEGLayer3 = 1,
MPEGLayer2 = 2,
MPEGLayer1 = 3
} MPEGLayer;
typedef enum {
MPEGChannelModeStereo = 0,
MPEGChannelModeJointStereo = 1,
MPEGChannelModeDualChannel = 2,
MPEGChannelModeMono = 3
} MPEGChannelMode;
typedef enum {
MPEGEmphasisNone = 0,
MPEGEmphasis50_15 = 1,
MPEGEmphasisReserved = 2,
MPEGEmphasisCCITJ17 = 3
} MPEGEmphasis;
typedef struct _MP3Header {
MPEGVersion version;
MPEGLayer layer;
uint8_t bitrateIndex;
uint8_t sampleRateIndex;
MPEGChannelMode channelMode;
uint8_t jointStereoModeExtension;
MPEGEmphasis emphasis;
bool has_crc;
bool has_padding;
bool has_copyright;
bool is_original;
bool is_private;
bool valid;
uint32_t bitrate;
uint32_t sampleRate;
uint16_t packetLength;
} MP3Header;
MP3Header demuxer_mp3_decode_header(uint8_t data[4]);
#endif /* AUDIOPIPELINE_AUDIO_DEMUXER_MP3_H__INCLUDED */

View file

@ -0,0 +1,17 @@
#ifndef AUDIOPIPELINE_AUDIO_FILTER_RESAMPLE_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_FILTER_RESAMPLE_H__INCLUDED
#include "audio.h"
/**
* Creates a filter element which resamples the input audio to the output sample rate.
* This is a no-op if input and output sample rates match.
*
* @param output_sample_rate The sample rate in Hz to resample to
*
* @returns Initialized `AudioPipelineElement` that can be used in call to `audio_pipeline_assemble`
*/
AudioPipelineElement *audio_filter_resample(uint32_t output_sample_rate);
#endif /* AUDIOPIPELINE_AUDIO_FILTER_RESAMPLE_H__INCLUDED */

View file

@ -0,0 +1,44 @@
#ifndef AUDIOPIPELINE_AUDIO_INTERNAL_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_INTERNAL_H__INCLUDED
#include "audio.h"
struct _AudioPipelineElement {
void *ctx; /* Internal scratchspace/context */
AudioPipelineStatus (*start)(struct _AudioPipelineElement *self);
AudioPipelineStatus (*reset)(struct _AudioPipelineElement *self);
AudioPipelineStatus (*stop)(struct _AudioPipelineElement *self);
AudioPipelineStatus (*push)(struct _AudioPipelineElement *self, AudioBuffer *buffer);
AudioPipelineStatus (*link)(struct _AudioPipelineElement *self, struct _AudioPipelineElement *source);
char *(*describe)(struct _AudioPipelineElement *self);
void (*destroy)(struct _AudioPipelineElement *self);
AudioPipelineElementType type;
/* output format */
char format[4];
uint8_t channels;
uint8_t bits_per_sample;
uint32_t sample_rate;
/* Set in pipeline assembly method */
struct _AudioPipelineElement *next;
AudioPipeline *pipeline;
};
// Some NOP functions to be used by all elements to avoid creating a lot of stub functions
AudioPipelineStatus sink_nop(AudioPipelineElement *self);
AudioPipelineStatus source_start_nop(AudioPipelineElement *self);
AudioPipelineStatus source_reset_nop(AudioPipelineElement *self);
AudioPipelineStatus source_stop_nop(AudioPipelineElement *self);
AudioPipelineStatus source_nop(AudioPipelineElement *self);
AudioPipelineStatus filter_start_nop(AudioPipelineElement *self);
AudioPipelineStatus filter_reset_nop(AudioPipelineElement *self);
AudioPipelineStatus filter_stop_nop(AudioPipelineElement *self);
#endif /* AUDIOPIPELINE_AUDIO_H__INCLUDED */

View file

@ -0,0 +1,14 @@
#ifndef AUDIOPIPELINE_AUDIO_SINK_FILE_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_SINK_FILE_H__INCLUDED
#include "audio.h"
/**
* Create a file sink, this will just write the data to a file, if you want to write in a specialized
* format you'll need a muxer
*
* @returns Initialized `AudioPipelineElement` that can be used in call to `audio_pipeline_assemble`
*/
AudioPipelineElement *audio_sink_file(char *filename);
#endif /* AUDIOPIPELINE_AUDIO_SINK_FILE_H__INCLUDED */

View file

@ -0,0 +1,13 @@
#ifndef AUDIOPIPELINE_AUDIO_SINK_LIBAO_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_SINK_LIBAO_H__INCLUDED
#include "audio.h"
/**
* Create a libao audio sink, will use the default output (usually the soundcard)
*
* @returns Initialized `AudioPipelineElement` that can be used in call to `audio_pipeline_assemble`
*/
AudioPipelineElement *audio_sink_libao(void);
#endif /* AUDIOPIPELINE_AUDIO_SINK_LIBAO_H__INCLUDED */

View file

@ -0,0 +1,15 @@
#ifndef AUDIOPIPELINE_AUDIO_SOURCE_FILE_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_SOURCE_FILE_H__INCLUDED
#include "audio.h"
/**
* Create a file audio source from the provided file name
*
* @param filename File to open
*
* @returns Initialized `AudioPipelineElement` that can be used in call to `audio_pipeline_assemble`
*/
AudioPipelineElement *audio_source_file(char *filename, uint32_t block_size);
#endif /* AUDIOPIPELINE_AUDIO_SOURCE_FILE_H__INCLUDED */

View file

@ -0,0 +1,17 @@
#ifndef AUDIOPIPELINE_AUDIO_SOURCE_TESTTONE_H__INCLUDED
#define AUDIOPIPELINE_AUDIO_SOURCE_TESTTONE_H__INCLUDED
#include "audio.h"
/**
* Create a test-tone audio source with the provided settings
*
* @param sample_rate Sample rate to generate in Hz
* @param channels Number of channels
* @param bits_per_sample Number of bits per sample, one of 8, 16, 24, 32
*
* @returns Initialized `AudioPipelineElement` that can be used in call to `audio_pipeline_assemble`
*/
AudioPipelineElement *audio_source_testtone(uint16_t sample_rate, uint8_t channels, uint8_t bits_per_sample);
#endif /* AUDIOPIPELINE_AUDIO_SOURCE_TESTTONE_H__INCLUDED */

View file

View file

View file

View file

View file

View file

View file

View file

View file

View file

@ -0,0 +1,12 @@
//
// audio_muxer_wav.h
// AudioLib
//
// Created by Johannes Schriewer on 23.02.24.
//
#ifndef audio_muxer_wav_h
#define audio_muxer_wav_h
#endif /* audio_muxer_wav_h */

View file

View file

View file

View file