diff --git a/apps/app_speech_utils.c b/apps/app_speech_utils.c index dc80dd95f1e683176329bc9c3f8e51336e91b06b..d7efd564b0b5a52fedfa914045811ffde9b7a412 100644 --- a/apps/app_speech_utils.c +++ b/apps/app_speech_utils.c @@ -614,39 +614,43 @@ static int speech_background(struct ast_channel *chan, void *data) time(&start); started = 1; } - /* Deal with audio frames if present */ - if (f != NULL && f->frametype == AST_FRAME_VOICE) { + /* Write audio frame out to speech engine if no DTMF has been received */ + if (!strlen(dtmf) && f != NULL && f->frametype == AST_FRAME_VOICE) { ast_speech_write(speech, f->data, f->datalen); } break; case AST_SPEECH_STATE_WAIT: /* Cue up waiting sound if not already playing */ - if (chan->stream == NULL) { - if (speech->processing_sound != NULL) { - if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) { - speech_streamfile(chan, speech->processing_sound, chan->language); - } - } - } else if (chan->streamid == -1 && chan->timingfunc == NULL) { - ast_stopstream(chan); - if (speech->processing_sound != NULL) { - if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) { - speech_streamfile(chan, speech->processing_sound, chan->language); - } - } - } + if (!strlen(dtmf)) { + if (chan->stream == NULL) { + if (speech->processing_sound != NULL) { + if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) { + speech_streamfile(chan, speech->processing_sound, chan->language); + } + } + } else if (chan->streamid == -1 && chan->timingfunc == NULL) { + ast_stopstream(chan); + if (speech->processing_sound != NULL) { + if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) { + speech_streamfile(chan, speech->processing_sound, chan->language); + } + } + } + } break; case AST_SPEECH_STATE_DONE: - /* Copy to speech structure the results, if available */ - speech->results = ast_speech_results_get(speech); - /* Now that we are done... let's switch back to not ready state */ + /* Now that we are done... let's switch back to not ready state */ ast_speech_change_state(speech, AST_SPEECH_STATE_NOT_READY); - /* Break out of our background too */ - done = 1; - /* Stop audio playback */ - if (chan->stream != NULL) { - ast_stopstream(chan); - } + if (!strlen(dtmf)) { + /* Copy to speech structure the results, if available */ + speech->results = ast_speech_results_get(speech); + /* Break out of our background too */ + done = 1; + /* Stop audio playback */ + if (chan->stream != NULL) { + ast_stopstream(chan); + } + } break; default: break; @@ -688,7 +692,7 @@ static int speech_background(struct ast_channel *chan, void *data) } } - if (strlen(dtmf) > 0 && speech->results == NULL) { + if (strlen(dtmf)) { /* We sort of make a results entry */ speech->results = ast_calloc(1, sizeof(*speech->results)); if (speech->results != NULL) {