diff --git a/bridges/bridge_softmix.c b/bridges/bridge_softmix.c index eb476932f10062d3257a3b1f68a1d1fd3d9114fa..4350905fd49df0278d65871e3367282b2654887e 100644 --- a/bridges/bridge_softmix.c +++ b/bridges/bridge_softmix.c @@ -429,15 +429,20 @@ static enum ast_bridge_write_result softmix_bridge_write(struct ast_bridge *brid bridge_channel->tech_args.silence_threshold : DEFAULT_SOFTMIX_SILENCE_THRESHOLD; char update_talking = -1; /* if this is set to 0 or 1, tell the bridge that the channel has started or stopped talking. */ + int res = AST_BRIDGE_WRITE_SUCCESS; /* Only accept audio frames, all others are unsupported */ if (frame->frametype == AST_FRAME_DTMF_END || frame->frametype == AST_FRAME_DTMF_BEGIN) { softmix_pass_dtmf(bridge, bridge_channel, frame); - return AST_BRIDGE_WRITE_SUCCESS; + goto no_audio; } else if (frame->frametype != AST_FRAME_VOICE) { - return AST_BRIDGE_WRITE_UNSUPPORTED; + res = AST_BRIDGE_WRITE_UNSUPPORTED; + goto no_audio; + } else if (frame->datalen == 0) { + goto no_audio; } + /* If we made it here, we are going to write the frame into the conference */ ast_mutex_lock(&sc->lock); ast_dsp_silence(sc->dsp, frame, &totalsilence); @@ -480,7 +485,20 @@ static enum ast_bridge_write_result softmix_bridge_write(struct ast_bridge *brid ast_bridge_notify_talking(bridge, bridge_channel, update_talking); } - return AST_BRIDGE_WRITE_SUCCESS; + return res; + +no_audio: + /* Even though the frame is not being written into the conference because it is not audio, + * we should use this opportunity to check to see if a frame is ready to be written out from + * the conference to the channel. */ + ast_mutex_lock(&sc->lock); + if (sc->have_frame) { + ast_write(bridge_channel->chan, &sc->write_frame); + sc->have_frame = 0; + } + ast_mutex_unlock(&sc->lock); + + return res; } /*! \brief Function called when the channel's thread is poked */ diff --git a/funcs/func_jitterbuffer.c b/funcs/func_jitterbuffer.c index e613e6a23b2b8f434d5bca9079f4209c4080fb9b..dd1de604ad13db3889eb0eb8e52bf0e122cd3b45 100644 --- a/funcs/func_jitterbuffer.c +++ b/funcs/func_jitterbuffer.c @@ -203,6 +203,7 @@ static struct ast_frame *hook_event_cb(struct ast_channel *chan, struct ast_fram struct jb_framedata *framedata = data; struct timeval now_tv; unsigned long now; + int putframe = 0; /* signifies if audio frame was placed into the buffer or not */ switch (event) { case AST_FRAMEHOOK_EVENT_READ: @@ -249,15 +250,31 @@ static struct ast_frame *hook_event_cb(struct ast_channel *chan, struct ast_fram if (res == AST_JB_IMPL_OK) { frame = &ast_null_frame; } + putframe = 1; } if (frame->frametype == AST_FRAME_NULL) { int res; long next = framedata->jb_impl->next(framedata->jb_obj); + /* If now is earlier than the next expected output frame + * from the jitterbuffer we may choose to pass on retrieving + * a frame during this read iteration. The only exception + * to this rule is when an audio frame is placed into the buffer + * and the time for the next frame to come out of the buffer is + * at least within the timer_interval of the next output frame. By + * doing this we are able to feed off the timing of the input frames + * and only rely on our jitterbuffer timer when frames are dropped. + * During testing, this hybrid form of timing gave more reliable results. */ if (now < next) { - return frame; + long int diff = next - now; + if (!putframe) { + return frame; + } else if (diff >= framedata->timer_interval) { + return frame; + } } + res = framedata->jb_impl->get(framedata->jb_obj, &frame, now, framedata->timer_interval); switch (res) { case AST_JB_IMPL_OK: