1
0
Fork 0
mirror of https://github.com/opentx/opentx.git synced 2025-07-24 16:55:20 +03:00

Re #3815: Audio refactoring (#3926)

This commit is contained in:
Damjan Adamic 2016-10-19 22:56:25 +02:00 committed by Bertrand Songis
parent a19f4c78bf
commit 44133b595d
12 changed files with 935 additions and 875 deletions

View file

@ -245,14 +245,13 @@ void getSystemAudioFile(char * filename, int index)
void referenceSystemAudioFiles()
{
static_assert(sizeof(audioFilenames)==AU_SPECIAL_SOUND_FIRST*sizeof(char *), "Invalid audioFilenames size");
char path[AUDIO_FILENAME_MAXLEN+1];
FILINFO fno;
DIR dir;
sdAvailableSystemAudioFiles.reset();
assert(sizeof(audioFilenames)==AU_SPECIAL_SOUND_FIRST*sizeof(char *));
char * filename = strAppendSystemAudioPath(path);
*(filename-1) = '\0';
@ -465,7 +464,6 @@ void playModelEvent(uint8_t category, uint8_t index, event_t event)
}
}
void playModelName()
{
char filename[AUDIO_FILENAME_MAXLEN+1];
@ -483,18 +481,18 @@ void playModelName()
const int16_t alawTable[256] = { -5504, -5248, -6016, -5760, -4480, -4224, -4992, -4736, -7552, -7296, -8064, -7808, -6528, -6272, -7040, -6784, -2752, -2624, -3008, -2880, -2240, -2112, -2496, -2368, -3776, -3648, -4032, -3904, -3264, -3136, -3520, -3392, -22016, -20992, -24064, -23040, -17920, -16896, -19968, -18944, -30208, -29184, -32256, -31232, -26112, -25088, -28160, -27136, -11008, -10496, -12032, -11520, -8960, -8448, -9984, -9472, -15104, -14592, -16128, -15616, -13056, -12544, -14080, -13568, -344, -328, -376, -360, -280, -264, -312, -296, -472, -456, -504, -488, -408, -392, -440, -424, -88, -72, -120, -104, -24, -8, -56, -40, -216, -200, -248, -232, -152, -136, -184, -168, -1376, -1312, -1504, -1440, -1120, -1056, -1248, -1184, -1888, -1824, -2016, -1952, -1632, -1568, -1760, -1696, -688, -656, -752, -720, -560, -528, -624, -592, -944, -912, -1008, -976, -816, -784, -880, -848, 5504, 5248, 6016, 5760, 4480, 4224, 4992, 4736, 7552, 7296, 8064, 7808, 6528, 6272, 7040, 6784, 2752, 2624, 3008, 2880, 2240, 2112, 2496, 2368, 3776, 3648, 4032, 3904, 3264, 3136, 3520, 3392, 22016, 20992, 24064, 23040, 17920, 16896, 19968, 18944, 30208, 29184, 32256, 31232, 26112, 25088, 28160, 27136, 11008, 10496, 12032, 11520, 8960, 8448, 9984, 9472, 15104, 14592, 16128, 15616, 13056, 12544, 14080, 13568, 344, 328, 376, 360, 280, 264, 312, 296, 472, 456, 504, 488, 408, 392, 440, 424, 88, 72, 120, 104, 24, 8, 56, 40, 216, 200, 248, 232, 152, 136, 184, 168, 1376, 1312, 1504, 1440, 1120, 1056, 1248, 1184, 1888, 1824, 2016, 1952, 1632, 1568, 1760, 1696, 688, 656, 752, 720, 560, 528, 624, 592, 944, 912, 1008, 976, 816, 784, 880, 848 };
const int16_t ulawTable[256] = { -32124, -31100, -30076, -29052, -28028, -27004, -25980, -24956, -23932, -22908, -21884, -20860, -19836, -18812, -17788, -16764, -15996, -15484, -14972, -14460, -13948, -13436, -12924, -12412, -11900, -11388, -10876, -10364, -9852, -9340, -8828, -8316, -7932, -7676, -7420, -7164, -6908, -6652, -6396, -6140, -5884, -5628, -5372, -5116, -4860, -4604, -4348, -4092, -3900, -3772, -3644, -3516, -3388, -3260, -3132, -3004, -2876, -2748, -2620, -2492, -2364, -2236, -2108, -1980, -1884, -1820, -1756, -1692, -1628, -1564, -1500, -1436, -1372, -1308, -1244, -1180, -1116, -1052, -988, -924, -876, -844, -812, -780, -748, -716, -684, -652, -620, -588, -556, -524, -492, -460, -428, -396, -372, -356, -340, -324, -308, -292, -276, -260, -244, -228, -212, -196, -180, -164, -148, -132, -120, -112, -104, -96, -88, -80, -72, -64, -56, -48, -40, -32, -24, -16, -8, 0, 32124, 31100, 30076, 29052, 28028, 27004, 25980, 24956, 23932, 22908, 21884, 20860, 19836, 18812, 17788, 16764, 15996, 15484, 14972, 14460, 13948, 13436, 12924, 12412, 11900, 11388, 10876, 10364, 9852, 9340, 8828, 8316, 7932, 7676, 7420, 7164, 6908, 6652, 6396, 6140, 5884, 5628, 5372, 5116, 4860, 4604, 4348, 4092, 3900, 3772, 3644, 3516, 3388, 3260, 3132, 3004, 2876, 2748, 2620, 2492, 2364, 2236, 2108, 1980, 1884, 1820, 1756, 1692, 1628, 1564, 1500, 1436, 1372, 1308, 1244, 1180, 1116, 1052, 988, 924, 876, 844, 812, 780, 748, 716, 684, 652, 620, 588, 556, 524, 492, 460, 428, 396, 372, 356, 340, 324, 308, 292, 276, 260, 244, 228, 212, 196, 180, 164, 148, 132, 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0 };
AudioQueue audioQueue __DMA;
AudioQueue audioQueue __DMA; // to place it in the RAM section on Horus, to have file buffers in RAM for DMA access
AudioBuffer audioBuffers[AUDIO_BUFFER_COUNT] __DMA;
AudioQueue::AudioQueue()
: buffersFifo(),
_started(false),
normalContext(),
backgroundContext(),
priorityContext(),
varioContext(),
fragmentsFifo()
{
memset(this, 0, sizeof(AudioQueue));
memset(audioBuffers, 0, sizeof(audioBuffers));
}
void AudioQueue::start()
{
state = 1;
}
#define CODEC_ID_PCM_S16LE 1
@ -524,7 +522,7 @@ void audioTask(void * pdata)
}
#endif
void mixSample(audio_data_t * result, int sample, unsigned int fade)
inline void mixSample(audio_data_t * result, int sample, unsigned int fade)
{
*result = limit(AUDIO_DATA_MIN, *result + ((sample >> fade) >> (16-AUDIO_BITS_PER_SAMPLE)), AUDIO_DATA_MAX);
}
@ -580,8 +578,8 @@ int WavContext::mixBuffer(AudioBuffer *buffer, int volume, unsigned int fade)
}
}
read = 0;
if (result == FR_OK) {
read = 0;
result = f_read(&state.file, wavBuffer, state.readSize, &read);
if (result == FR_OK) {
if (read > state.size) {
@ -622,7 +620,10 @@ int WavContext::mixBuffer(AudioBuffer *buffer, int volume, unsigned int fade)
}
}
return -result;
if (result != FR_OK) {
clear();
}
return 0;
}
#else
int WavContext::mixBuffer(AudioBuffer *buffer, int volume, unsigned int fade)
@ -732,8 +733,8 @@ void AudioQueue::wakeup()
audioConsumeCurrentBuffer();
DEBUG_TIMER_STOP(debugTimerAudioConsume);
AudioBuffer * buffer = getEmptyBuffer();
if (buffer) {
AudioBuffer * buffer;
while ((buffer = buffersFifo.getEmptyBuffer()) != 0) {
int result;
unsigned int fade = 0;
int size = 0;
@ -751,32 +752,16 @@ void AudioQueue::wakeup()
}
// mix the normal context (tones and wavs)
if (normalContext.fragment.type == FRAGMENT_TONE) {
result = normalContext.tone.mixBuffer(buffer, g_eeGeneral.beepVolume, fade);
}
else if (normalContext.fragment.type == FRAGMENT_FILE) {
result = normalContext.wav.mixBuffer(buffer, g_eeGeneral.wavVolume, fade);
if (result < 0) {
normalContext.wav.clear();
}
}
else {
result = 0;
if (normalContext.isEmpty() && !fragmentsFifo.empty()) {
CoEnterMutexSection(audioMutex);
normalContext.setFragment(fragmentsFifo.get());
CoLeaveMutexSection(audioMutex);
}
result = normalContext.mixBuffer(buffer, g_eeGeneral.beepVolume, g_eeGeneral.wavVolume, fade);
if (result > 0) {
size = max(size, result);
fade += 1;
}
else {
CoEnterMutexSection(audioMutex);
if (ridx != widx) {
normalContext.tone.setFragment(fragments[ridx]);
if (!fragments[ridx].repeat--) {
ridx = (ridx + 1) % AUDIO_QUEUE_LENGTH;
}
}
CoLeaveMutexSection(audioMutex);
}
// mix the vario context
result = varioContext.mixBuffer(buffer, g_eeGeneral.varioVolume, fade);
@ -795,9 +780,7 @@ void AudioQueue::wakeup()
// push the buffer if needed
if (size > 0) {
audioDisableIrq();
// TRACE("pushing buffer %d\n", bufferWIdx);
bufferWIdx = nextBufferIdx(bufferWIdx);
// TRACE("pushing buffer %p", buffer);
buffer->size = size;
#if defined(SOFTWARE_VOLUME)
for(uint32_t i=0; i<buffer->size; ++i) {
@ -805,11 +788,15 @@ void AudioQueue::wakeup()
buffer->data[i] = (int16_t) (((tmpSample * currentSpeakerVolume) / VOLUME_LEVEL_MAX) + AUDIO_DATA_SILENCE);
}
#endif
DEBUG_TIMER_START(debugTimerAudioPush);
audioPushBuffer(buffer);
DEBUG_TIMER_STOP(debugTimerAudioPush);
audioEnableIrq();
buffersFifo.audioPushBuffer();
}
else {
// break the endless loop
break;
}
DEBUG_TIMER_START(debugTimerAudioConsume);
audioConsumeCurrentBuffer();
DEBUG_TIMER_STOP(debugTimerAudioConsume);
}
}
@ -832,17 +819,9 @@ void AudioQueue::pause(uint16_t len)
bool AudioQueue::isPlaying(uint8_t id)
{
if (normalContext.fragment.id == id || (isFunctionActive(FUNCTION_BACKGND_MUSIC) && backgroundContext.fragment.id == id))
return true;
uint8_t i = ridx;
while (i != widx) {
AudioFragment & fragment = fragments[i];
if (fragment.id == id)
return true;
i = (i + 1) % AUDIO_QUEUE_LENGTH;
}
return false;
return normalContext.hasId(id) ||
(isFunctionActive(FUNCTION_BACKGND_MUSIC) && backgroundContext.hasId(id)) ||
fragmentsFifo.hasId(id);
}
void AudioQueue::playTone(uint16_t freq, uint16_t len, uint16_t pause, uint8_t flags, int8_t freqIncr)
@ -858,42 +837,21 @@ void AudioQueue::playTone(uint16_t freq, uint16_t len, uint16_t pause, uint8_t f
}
if (flags & PLAY_BACKGROUND) {
AudioFragment & fragment = varioContext.fragment;
fragment.type = FRAGMENT_TONE;
fragment.tone.freq = freq;
fragment.tone.duration = len;
fragment.tone.pause = pause;
fragment.tone.reset = (flags & PLAY_NOW);
varioContext.setFragment(freq, len, pause, 0, 0, (flags & PLAY_NOW));
}
else {
// adjust frequency and length according to the user preferences
freq += g_eeGeneral.speakerPitch * 15;
len = getToneLength(len);
if (flags & PLAY_NOW) {
AudioFragment & fragment = priorityContext.fragment;
if (fragment.type == FRAGMENT_EMPTY) {
if (priorityContext.isFree()) {
priorityContext.clear();
fragment.type = FRAGMENT_TONE;
fragment.repeat = flags & 0x0f;
fragment.tone.freq = freq;
fragment.tone.duration = len;
fragment.tone.pause = pause;
fragment.tone.freqIncr = freqIncr;
priorityContext.setFragment(freq, len, pause, flags & 0x0f, freqIncr, false);
}
}
else {
uint8_t next_widx = (widx + 1) % AUDIO_QUEUE_LENGTH;
if (next_widx != ridx) {
AudioFragment & fragment = fragments[widx];
fragment.clear();
fragment.type = FRAGMENT_TONE;
fragment.repeat = flags & 0x0f;
fragment.tone.freq = freq;
fragment.tone.duration = len;
fragment.tone.pause = pause;
fragment.tone.freqIncr = freqIncr;
widx = next_widx;
}
fragmentsFifo.push(AudioFragment(freq, len, pause, flags & 0x0f, freqIncr, false));
}
}
@ -930,22 +888,10 @@ void AudioQueue::playFile(const char *filename, uint8_t flags, uint8_t id)
if (flags & PLAY_BACKGROUND) {
backgroundContext.clear();
AudioFragment & fragment = backgroundContext.fragment;
fragment.type = FRAGMENT_FILE;
strcpy(fragment.file, filename);
fragment.id = id;
backgroundContext.setFragment(filename, 0, id);
}
else {
uint8_t next_widx = (widx + 1) % AUDIO_QUEUE_LENGTH;
if (next_widx != ridx) {
AudioFragment & fragment = fragments[widx];
fragment.clear();
fragment.type = FRAGMENT_FILE;
strcpy(fragment.file, filename);
fragment.repeat = flags & 0x0f;
fragment.id = id;
widx = next_widx;
}
fragmentsFifo.push(AudioFragment(filename, flags & 0x0f, id));
}
CoLeaveMutexSection(audioMutex);
@ -962,10 +908,7 @@ void AudioQueue::stopPlay(uint8_t id)
#endif
// For the moment it's only needed to stop the background music
if (backgroundContext.fragment.id == id) {
backgroundContext.fragment.type = FRAGMENT_EMPTY;
backgroundContext.fragment.id = 0;
}
backgroundContext.stop(id);
}
void AudioQueue::stopSD()
@ -979,19 +922,17 @@ void AudioQueue::stopSD()
void AudioQueue::stopAll()
{
flush();
CoEnterMutexSection(audioMutex);
widx = ridx; // clean the queue
priorityContext.clear();
normalContext.fragment.clear();
varioContext.clear();
backgroundContext.clear();
normalContext.clear();
CoLeaveMutexSection(audioMutex);
}
void AudioQueue::flush()
{
CoEnterMutexSection(audioMutex);
widx = ridx; // clean the queue
fragmentsFifo.clear();
varioContext.clear();
backgroundContext.clear();
CoLeaveMutexSection(audioMutex);
@ -1012,7 +953,6 @@ void audioKeyPress()
if (g_eeGeneral.beepMode == e_mode_all) {
audioQueue.playTone(BEEP_DEFAULT_FREQ, 40, 20, PLAY_NOW);
}
#if defined(HAPTIC)
if (g_eeGeneral.hapticMode == e_mode_all) {
haptic.play(5, 0, PLAY_NOW);
@ -1035,10 +975,8 @@ void audioKeyError()
void audioTrimPress(int value)
{
value = limit(TRIM_MIN, value, TRIM_MAX);
value <<= 3;
value += 120*16;
if (g_eeGeneral.beepMode >= e_mode_nokeys) {
value = limit(TRIM_MIN, value, TRIM_MAX) * 8 + 120*16;
audioQueue.playTone(value, 40, 20, PLAY_NOW);
}
}

View file

@ -66,16 +66,16 @@ template <unsigned int NUM_BITS> class BitField {
#define AUDIO_FILENAME_MAXLEN (42) // max length (example: /SOUNDS/fr/123456789012/1234567890-off.wav)
#define AUDIO_QUEUE_LENGTH (20)
#define AUDIO_QUEUE_LENGTH (16) // must be a power of 2!
#define AUDIO_SAMPLE_RATE (32000)
#define AUDIO_BUFFER_DURATION (10)
#define AUDIO_BUFFER_SIZE (AUDIO_SAMPLE_RATE*AUDIO_BUFFER_DURATION/1000)
#if defined(SIMU_AUDIO)
#if defined(SIMU) && defined(SIMU_AUDIO)
#define AUDIO_BUFFER_COUNT (10) // simulator needs more buffers for smooth audio
#elif defined(PCBHORUS)
#define AUDIO_BUFFER_COUNT (2)
#define AUDIO_BUFFER_COUNT (2) // smaller than Taranis since there is also a buffer on the ADC chip
#else
#define AUDIO_BUFFER_COUNT (3)
#endif
@ -116,7 +116,6 @@ enum AudioBufferState
struct AudioBuffer {
audio_data_t data[AUDIO_BUFFER_SIZE];
uint16_t size;
uint8_t state;
};
extern AudioBuffer audioBuffers[AUDIO_BUFFER_COUNT];
@ -127,30 +126,65 @@ enum FragmentTypes {
FRAGMENT_FILE,
};
struct AudioFragment {
uint8_t type;
uint8_t id;
uint8_t repeat;
union {
struct {
struct Tone {
uint16_t freq;
uint16_t duration;
uint16_t pause;
int8_t freqIncr;
uint8_t reset;
} tone;
Tone() {};
Tone(uint16_t freq, uint16_t duration, uint16_t pause, int8_t freqIncr, bool reset):
freq(freq),
duration(duration),
pause(pause),
freqIncr(freqIncr),
reset(reset)
{};
};
struct AudioFragment {
uint8_t type;
uint8_t id;
uint8_t repeat;
union {
Tone tone;
char file[AUDIO_FILENAME_MAXLEN+1];
};
void clear()
AudioFragment() { clear(); };
AudioFragment(uint16_t freq, uint16_t duration, uint16_t pause, uint8_t repeat, int8_t freqIncr, bool reset, uint8_t id=0):
type(FRAGMENT_TONE),
id(id),
repeat(repeat),
tone(freq, duration, pause, freqIncr, reset)
{};
AudioFragment(const char * filename, uint8_t repeat, uint8_t id=0):
type(FRAGMENT_FILE),
id(id),
repeat(repeat)
{
memset(this, 0, sizeof(AudioFragment));
strcpy(file, filename);
}
void clear() { memset(this, 0, sizeof(AudioFragment)); };
};
class ToneContext {
public:
inline void clear() { memset(this, 0, sizeof(ToneContext)); };
bool isFree() const { return fragment.type == FRAGMENT_EMPTY; };
int mixBuffer(AudioBuffer *buffer, int volume, unsigned int fade);
void setFragment(uint16_t freq, uint16_t duration, uint16_t pause, uint8_t repeat, int8_t freqIncr, bool reset, uint8_t id=0)
{
fragment = AudioFragment(freq, duration, pause, repeat, freqIncr, reset, id);
}
private:
AudioFragment fragment;
struct {
@ -162,22 +196,29 @@ class ToneContext {
uint16_t pause;
} state;
inline void setFragment(AudioFragment & fragment)
{
this->fragment = fragment;
memset(&state, 0, sizeof(state));
}
inline void clear()
{
memset(this, 0, sizeof(ToneContext));
}
int mixBuffer(AudioBuffer *buffer, int volume, unsigned int fade);
};
class WavContext {
public:
inline void clear() { fragment.clear(); };
int mixBuffer(AudioBuffer *buffer, int volume, unsigned int fade);
bool hasId(uint8_t id) const { return fragment.id == id; };
void setFragment(const char * filename, uint8_t repeat, uint8_t id)
{
fragment = AudioFragment(filename, repeat, id);
}
void stop(uint8_t id)
{
if (fragment.id == id) {
fragment.clear();
}
}
private:
AudioFragment fragment;
struct {
@ -188,134 +229,227 @@ class WavContext {
uint8_t resampleRatio;
uint16_t readSize;
} state;
inline void clear()
{
fragment.clear();
}
int mixBuffer(AudioBuffer *buffer, int volume, unsigned int fade);
};
class MixedContext {
public:
union {
AudioFragment fragment;
ToneContext tone;
WavContext wav;
};
int mixBuffer(AudioBuffer *buffer, int volume, unsigned int fade);
};
void audioPushBuffer(AudioBuffer * buffer);
class AudioQueue {
friend void audioTask(void* pdata);
#if defined(SIMU_AUDIO)
friend void * audioThread(void *);
#endif
#if defined(CLI)
friend void printAudioVars();
#endif
public:
AudioQueue();
void start();
void playTone(uint16_t freq, uint16_t len, uint16_t pause=0, uint8_t flags=0, int8_t freqIncr=0);
void playFile(const char *filename, uint8_t flags=0, uint8_t id=0);
void stopPlay(uint8_t id);
void stopAll();
void flush();
void pause(uint16_t tLen);
void stopSD();
bool isPlaying(uint8_t id);
bool started()
MixedContext()
{
return state;
clear();
}
bool empty()
void setFragment(const AudioFragment * frag)
{
return ridx == widx;
if (frag) {
fragment = *frag;
}
}
inline AudioBuffer * getNextFilledBuffer()
inline void clear()
{
if (audioBuffers[bufferRIdx].state == AUDIO_BUFFER_PLAYING) {
audioBuffers[bufferRIdx].state = AUDIO_BUFFER_FREE;
bufferRIdx = nextBufferIdx(bufferRIdx);
tone.clear(); // the biggest member of the uninon
}
uint8_t idx = bufferRIdx;
do {
AudioBuffer * buffer = &audioBuffers[idx];
if (buffer->state == AUDIO_BUFFER_FILLED) {
buffer->state = AUDIO_BUFFER_PLAYING;
bufferRIdx = idx;
return buffer;
}
idx = nextBufferIdx(idx);
} while (idx != bufferWIdx); //this fixes a bug if all buffers are filled
bool isEmpty() const { return fragment.type == FRAGMENT_EMPTY; };
bool isTone() const { return fragment.type == FRAGMENT_TONE; };
bool isFile() const { return fragment.type == FRAGMENT_FILE; };
bool hasId(uint8_t id) const { return fragment.id == id; };
return NULL;
}
bool filledAtleast(int noBuffers)
int mixBuffer(AudioBuffer *buffer, int toneVolume, int wavVolume, unsigned int fade)
{
int count = 0;
for(int n= 0; n<AUDIO_BUFFER_COUNT; ++n) {
if (audioBuffers[n].state == AUDIO_BUFFER_FILLED) {
if (++count >= noBuffers) {
return true;
if (isTone()) return tone.mixBuffer(buffer, toneVolume, fade);
else if (isFile()) return wav.mixBuffer(buffer, wavVolume, fade);
return 0;
}
private:
union {
AudioFragment fragment; // a hack: fragment is used to access the fragment members of tone and wav
ToneContext tone;
WavContext wav;
};
};
class AudioBufferFifo {
#if defined(CLI)
friend void printAudioVars();
#endif
private:
volatile uint8_t readIdx;
volatile uint8_t writeIdx;
volatile bool bufferFull;
// readIdx == writeIdx -> buffer empty
// readIdx == writeIdx + 1 -> buffer full
inline uint8_t nextBufferIdx(uint8_t idx) const
{
return (idx >= AUDIO_BUFFER_COUNT-1 ? 0 : idx+1);
}
bool full() const
{
return bufferFull;
}
bool empty() const
{
return (readIdx == writeIdx) && !bufferFull;
}
uint8_t used() const
{
return bufferFull ? AUDIO_BUFFER_COUNT : writeIdx - readIdx;
}
public:
AudioBufferFifo() : readIdx(0), writeIdx(0), bufferFull(false)
{
memset(audioBuffers, 0, sizeof(audioBuffers));
}
// returns an empty buffer to be filled wit data and put back into FIFO with audioPushBuffer()
AudioBuffer * getEmptyBuffer() const
{
return full() ? 0 : &audioBuffers[writeIdx];
}
// puts filled buffer into FIFO
void audioPushBuffer()
{
// AudioBuffer * buffer = &audioBuffers[writeIdx];
audioDisableIrq();
writeIdx = nextBufferIdx(writeIdx);
bufferFull = (writeIdx == readIdx);
audioEnableIrq();
// buffer->state = AUDIO_BUFFER_FILLED;
}
// returns a pointer to the audio buffer to be played
const AudioBuffer * getNextFilledBuffer() const
{
return empty() ? 0 : &audioBuffers[readIdx];
}
// frees the last played buffer
void freeNextFilledBuffer()
{
audioDisableIrq();
readIdx = nextBufferIdx(readIdx);
bufferFull = false;
audioEnableIrq();
}
bool filledAtleast(int noBuffers) const
{
return used() >= noBuffers;
}
};
class AudioFragmentFifo
{
#if defined(CLI)
friend void printAudioVars();
#endif
private:
volatile uint8_t ridx;
volatile uint8_t widx;
AudioFragment fragments[AUDIO_QUEUE_LENGTH];
uint8_t nextIdx(uint8_t idx) const
{
return (idx + 1) & (AUDIO_QUEUE_LENGTH - 1);
}
public:
AudioFragmentFifo() : ridx(0), widx(0), fragments() {};
bool hasId(uint8_t id)
{
uint8_t i = ridx;
while (i != widx) {
AudioFragment & fragment = fragments[i];
if (fragment.id == id) return true;
i = nextIdx(i);
}
return false;
}
protected:
bool empty() const
{
return ridx == widx;
}
bool full() const
{
return ridx == nextIdx(widx);
}
void clear()
{
widx = ridx; // clean the queue
}
const AudioFragment * get()
{
if (!empty()) {
const AudioFragment * result = &fragments[ridx];
if (!fragments[ridx].repeat--) {
// repeat is done, move to the next fragment
ridx = nextIdx(ridx);
}
return result;
}
return 0;
}
void push(const AudioFragment & fragment)
{
if (!full()) {
TRACE("frament %d at %d", fragment.type, widx);
fragments[widx] = fragment;
widx = nextIdx(widx);
}
}
};
class AudioQueue {
#if defined(SIMU_AUDIO)
friend void fillAudioBuffer(void *, uint8_t *, int);
#endif
#if defined(CLI)
friend void printAudioVars();
#endif
public:
AudioQueue();
void start() { _started = true; };
void playTone(uint16_t freq, uint16_t len, uint16_t pause=0, uint8_t flags=0, int8_t freqIncr=0);
void playFile(const char *filename, uint8_t flags=0, uint8_t id=0);
void stopPlay(uint8_t id);
void stopAll();
void flush();
void pause(uint16_t tLen);
void stopSD();
bool isPlaying(uint8_t id);
bool isEmpty() const { return fragmentsFifo.empty(); };
void wakeup();
bool started() const { return _started; };
volatile bool state;
uint8_t ridx;
uint8_t widx;
AudioFragment fragments[AUDIO_QUEUE_LENGTH];
AudioBufferFifo buffersFifo;
private:
volatile bool _started;
MixedContext normalContext;
WavContext backgroundContext;
ToneContext priorityContext;
ToneContext varioContext;
uint8_t bufferRIdx;
uint8_t bufferWIdx;
inline uint8_t nextBufferIdx(uint8_t idx)
{
return (idx == AUDIO_BUFFER_COUNT-1 ? 0 : idx+1);
}
inline AudioBuffer * getEmptyBuffer()
{
AudioBuffer * buffer = &audioBuffers[bufferWIdx];
if (buffer->state == AUDIO_BUFFER_FREE)
return buffer;
else
return NULL;
}
AudioFragmentFifo fragmentsFifo;
};
extern AudioQueue audioQueue;

View file

@ -517,22 +517,21 @@ extern OS_MutexID audioMutex;
void printAudioVars()
{
for(int n = 0; n < AUDIO_BUFFER_COUNT; n++) {
serialPrint("Audio Buffer %d: size: %u, state: %u, ", n, (uint32_t)audioBuffers[n].size, (uint32_t)audioBuffers[n].state);
serialPrint("Audio Buffer %d: size: %u, ", n, (uint32_t)audioBuffers[n].size);
dump((uint8_t *)audioBuffers[n].data, 32);
}
serialPrint("fragments:");
for(int n = 0; n < AUDIO_QUEUE_LENGTH; n++) {
serialPrint("%d: type %u: id: %u, repeat: %u, ", n, (uint32_t)audioQueue.fragments[n].type,
(uint32_t)audioQueue.fragments[n].id,
(uint32_t)audioQueue.fragments[n].repeat);
if ( audioQueue.fragments[n].type == FRAGMENT_FILE) {
serialPrint(" file: %s", audioQueue.fragments[n].file);
serialPrint("%d: type %u: id: %u, repeat: %u, ", n, (uint32_t)audioQueue.fragmentsFifo.fragments[n].type,
(uint32_t)audioQueue.fragmentsFifo.fragments[n].id,
(uint32_t)audioQueue.fragmentsFifo.fragments[n].repeat);
if ( audioQueue.fragmentsFifo.fragments[n].type == FRAGMENT_FILE) {
serialPrint(" file: %s", audioQueue.fragmentsFifo.fragments[n].file);
}
}
serialPrint("audioQueue:");
serialPrint(" ridx: %d, widx: %d", audioQueue.ridx, audioQueue.widx);
serialPrint(" bufferRIdx: %d, bufferWIdx: %d", audioQueue.bufferRIdx, audioQueue.bufferWIdx);
serialPrint("FragmentFifo: ridx: %d, widx: %d", audioQueue.fragmentsFifo.ridx, audioQueue.fragmentsFifo.widx);
serialPrint("audioQueue: readIdx: %d, writeIdx: %d, full: %d", audioQueue.buffersFifo.readIdx, audioQueue.buffersFifo.writeIdx, audioQueue.buffersFifo.bufferFull);
serialPrint("normalContext: %u", (uint32_t)audioQueue.normalContext.fragment.type);

View file

@ -227,7 +227,6 @@ const char * debugTimerNames[DEBUG_TIMERS_COUNT] = {
,"Audio int. " // debugTimerAudioIterval
,"Audio dur. " // debugTimerAudioDuration
," A. consume" // debugTimerAudioConsume,
," A push " // debugTimerAudioPush,
};

View file

@ -338,7 +338,6 @@ enum DebugTimers {
debugTimerAudioIterval,
debugTimerAudioDuration,
debugTimerAudioConsume,
debugTimerAudioPush,
DEBUG_TIMERS_COUNT
};

View file

@ -461,7 +461,7 @@ void evalFunctions()
if (isRepeatDelayElapsed(functions, functionsContext, i)) {
if (!IS_PLAYING(PLAY_INDEX)) {
if (CFN_FUNC(cfn) == FUNC_PLAY_SOUND) {
if (audioQueue.empty()) {
if (audioQueue.isEmpty()) {
AUDIO_PLAY(AU_SPECIAL_SOUND_FIRST + CFN_PARAM(cfn));
}
}

View file

@ -364,7 +364,7 @@ uint8_t * currentBuffer = NULL;
uint32_t currentSize = 0;
int16_t newVolume = -1;
void audioSetCurrentBuffer(AudioBuffer * buffer)
void audioSetCurrentBuffer(const AudioBuffer * buffer)
{
currentBuffer = (uint8_t *)buffer->data;
currentSize = buffer->size * 2;
@ -379,34 +379,21 @@ void audioConsumeCurrentBuffer()
newVolume = -1;
}
if (!currentBuffer) {
audioSetCurrentBuffer(audioQueue.buffersFifo.getNextFilledBuffer());
}
if (currentBuffer) {
uint32_t written = audioSpiWriteData(currentBuffer, currentSize);
currentBuffer += written;
currentSize -= written;
if (currentSize == 0) {
AudioBuffer * buffer = audioQueue.getNextFilledBuffer();
if (buffer) {
audioSetCurrentBuffer(buffer);
}
else {
audioQueue.buffersFifo.freeNextFilledBuffer();
currentBuffer = NULL;
currentSize = 0;
}
}
}
}
void audioPushBuffer(AudioBuffer * buffer)
{
if (!currentBuffer) {
buffer->state = AUDIO_BUFFER_PLAYING;
audioSetCurrentBuffer(buffer);
audioConsumeCurrentBuffer();
}
else {
buffer->state = AUDIO_BUFFER_FILLED;
}
}
// adjust this value for a volume level just above the silence
// values is attenuation in dB, higher value - less volume

View file

@ -364,18 +364,11 @@ struct SimulatorAudio {
bool threadRunning;
pthread_t threadPid;
} simuAudio;
void audioPushBuffer(AudioBuffer * buffer)
{
buffer->state = AUDIO_BUFFER_FILLED;
}
#endif
#if defined(PCBHORUS)
void audioConsumeCurrentBuffer()
{
}
#endif
#if defined(MASTER_VOLUME)
void setScaledVolume(uint8_t volume)
@ -395,7 +388,7 @@ int32_t getVolume()
#endif
#if defined(SIMU_AUDIO) && defined(CPUARM)
void copyBuffer(uint8_t * dest, uint16_t * buff, unsigned int samples)
void copyBuffer(uint8_t * dest, const uint16_t * buff, unsigned int samples)
{
for(unsigned int i=0; i<samples; i++) {
int sample = ((int32_t)(uint32_t)(buff[i]) - 0x8000); // conversion from uint16_t
@ -409,22 +402,25 @@ void fillAudioBuffer(void *udata, Uint8 *stream, int len)
SDL_memset(stream, 0, len);
if (simuAudio.leftoverLen) {
copyBuffer(stream, simuAudio.leftoverData, simuAudio.leftoverLen);
len -= simuAudio.leftoverLen*2;
stream += simuAudio.leftoverLen*2;
simuAudio.leftoverLen = 0;
int len1 = min(len/2, simuAudio.leftoverLen);
copyBuffer(stream, simuAudio.leftoverData, len1);
len -= len1*2;
stream += len1*2;
simuAudio.leftoverLen -= len1;
// putchar('l');
if (simuAudio.leftoverLen) return; // buffer fully filled
}
if (audioQueue.filledAtleast(len/(AUDIO_BUFFER_SIZE*2)+1) ) {
if (audioQueue.buffersFifo.filledAtleast(len/(AUDIO_BUFFER_SIZE*2)+1) ) {
while(true) {
AudioBuffer *nextBuffer = audioQueue.getNextFilledBuffer();
const AudioBuffer * nextBuffer = audioQueue.buffersFifo.getNextFilledBuffer();
if (nextBuffer) {
if (len >= nextBuffer->size*2) {
copyBuffer(stream, nextBuffer->data, nextBuffer->size);
stream += nextBuffer->size*2;
len -= nextBuffer->size*2;
// putchar('+');
audioQueue.buffersFifo.freeNextFilledBuffer();
}
else {
//partial
@ -433,6 +429,7 @@ void fillAudioBuffer(void *udata, Uint8 *stream, int len)
memcpy(simuAudio.leftoverData, &nextBuffer->data[len/2], simuAudio.leftoverLen*2);
len = 0;
// putchar('p');
audioQueue.buffersFifo.freeNextFilledBuffer();
break;
}
}

View file

@ -19,6 +19,7 @@
*/
#include "opentx.h"
const AudioBuffer * nextBuffer = 0;
const int8_t volumeScale[VOLUME_LEVEL_MAX+1] =
{
@ -93,17 +94,24 @@ void dacInit()
NVIC_EnableIRQ(DACC_IRQn) ;
}
void audioPushBuffer(AudioBuffer *buffer)
void audioConsumeCurrentBuffer()
{
buffer->state = AUDIO_BUFFER_FILLED;
if (nextBuffer == 0) {
nextBuffer = audioQueue.buffersFifo.getNextFilledBuffer();
if (nextBuffer) {
dacStart();
}
}
}
extern "C" void DAC_IRQHandler()
{
uint32_t sr = DACC->DACC_ISR;
if (sr & DACC_ISR_ENDTX) {
AudioBuffer *nextBuffer = audioQueue.getNextFilledBuffer();
if (nextBuffer) audioQueue.buffersFifo.freeNextFilledBuffer();
nextBuffer = audioQueue.buffersFifo.getNextFilledBuffer();
if (nextBuffer) {
// Try the first PDC buffer
if ((DACC->DACC_TCR == 0) && (DACC->DACC_TNCR == 0)) {

View file

@ -23,7 +23,7 @@
void audioInit( void ) ;
void audioEnd( void ) ;
#define audioConsumeCurrentBuffer()
void audioConsumeCurrentBuffer();
#define audioDisableIrq() __disable_irq()
#define audioEnableIrq() __enable_irq()

View file

@ -21,7 +21,7 @@
#include "opentx.h"
#if !defined(SIMU)
bool dacIdle = true;
const AudioBuffer * nextBuffer = 0;
void setSampleRate(uint32_t frequency)
{
@ -77,22 +77,22 @@ void dacInit()
NVIC_SetPriority(AUDIO_DMA_Stream_IRQn, 7);
}
void audioPushBuffer(AudioBuffer * buffer)
void audioConsumeCurrentBuffer()
{
if (dacIdle) {
dacIdle = false;
buffer->state = AUDIO_BUFFER_PLAYING;
if (nextBuffer == 0) {
nextBuffer = audioQueue.buffersFifo.getNextFilledBuffer();
if (nextBuffer) {
AUDIO_DMA_Stream->CR &= ~DMA_SxCR_EN ; // Disable DMA channel
AUDIO_DMA->HIFCR = DMA_HIFCR_CTCIF5 | DMA_HIFCR_CHTIF5 | DMA_HIFCR_CTEIF5 | DMA_HIFCR_CDMEIF5 | DMA_HIFCR_CFEIF5 ; // Write ones to clear bits
AUDIO_DMA_Stream->M0AR = CONVERT_PTR_UINT(buffer->data);
AUDIO_DMA_Stream->NDTR = buffer->size;
AUDIO_DMA_Stream->M0AR = CONVERT_PTR_UINT(nextBuffer->data);
AUDIO_DMA_Stream->NDTR = nextBuffer->size;
AUDIO_DMA_Stream->CR |= DMA_SxCR_EN | DMA_SxCR_TCIE ; // Enable DMA channel and interrupt
DAC->SR = DAC_SR_DMAUDR1 ; // Write 1 to clear flag
DAC->CR |= DAC_CR_EN1 | DAC_CR_DMAEN1 ; // Enable DAC
}
else {
buffer->state = AUDIO_BUFFER_FILLED;
}
}
void dacStart()
@ -140,7 +140,9 @@ extern "C" void AUDIO_DMA_Stream_IRQHandler()
AUDIO_DMA->HIFCR = DMA_HIFCR_CTCIF5 | DMA_HIFCR_CHTIF5 | DMA_HIFCR_CTEIF5 | DMA_HIFCR_CDMEIF5 | DMA_HIFCR_CFEIF5 ; // Write ones to clear flags
AUDIO_DMA_Stream->CR &= ~DMA_SxCR_EN ; // Disable DMA channel
AudioBuffer * nextBuffer = audioQueue.getNextFilledBuffer();
if (nextBuffer) audioQueue.buffersFifo.freeNextFilledBuffer();
nextBuffer = audioQueue.buffersFifo.getNextFilledBuffer();
if (nextBuffer) {
AUDIO_DMA_Stream->M0AR = CONVERT_PTR_UINT(nextBuffer->data);
AUDIO_DMA_Stream->NDTR = nextBuffer->size;
@ -148,8 +150,5 @@ extern "C" void AUDIO_DMA_Stream_IRQHandler()
AUDIO_DMA_Stream->CR |= DMA_SxCR_EN | DMA_SxCR_TCIE ; // Enable DMA channel
DAC->SR = DAC_SR_DMAUDR1; // Write 1 to clear flag
}
else {
dacIdle = true;
}
}
#endif // #if !defined(SIMU)

View file

@ -459,7 +459,7 @@ void setScaledVolume(uint8_t volume);
void setVolume(uint8_t volume);
int32_t getVolume(void);
#endif
#define audioConsumeCurrentBuffer()
void audioConsumeCurrentBuffer();
#define audioDisableIrq() __disable_irq()
#define audioEnableIrq() __enable_irq()