Newer
Older
struct ast_channel *chan = (struct ast_channel *)data;
ast_channel_lock(chan);
tmp = ast_channel_generatordata(chan);
ast_channel_generatordata_set(chan, NULL);
if (ast_channel_generator(chan))
generate = ast_channel_generator(chan)->generate;
ast_channel_unlock(chan);
if (!tmp || !generate) {
}
res = generate(chan, tmp, 0, ast_format_get_sample_rate(ast_channel_writeformat(chan)) / 50);
ast_channel_lock(chan);
if (ast_channel_generator(chan) && generate == ast_channel_generator(chan)->generate) {
ast_channel_generatordata_set(chan, tmp);
}
ast_channel_unlock(chan);
if (res) {
ast_debug(1, "Auto-deactivating generator\n");
ast_deactivate_generator(chan);
}
return 0;
}
int ast_activate_generator(struct ast_channel *chan, struct ast_generator *gen, void *params)
{
void *generatordata = NULL;
Kevin P. Fleming
committed
if (ast_channel_generatordata(chan)) {
Richard Mudgett
committed
struct ast_generator *generator_old = ast_channel_generator(chan);
if (generator_old && generator_old->release) {
generator_old->release(chan, ast_channel_generatordata(chan));
if (gen->alloc && !(generatordata = gen->alloc(chan, params))) {
Kevin P. Fleming
committed
}
ast_channel_generatordata_set(chan, generatordata);
Kevin P. Fleming
committed
if (!res) {
ast_settimeout(chan, 50, generator_force, chan);
ast_channel_generator_set(chan, gen);
Kevin P. Fleming
committed
/*! \brief Wait for x amount of time on a file descriptor to have input. */
int ast_waitfor_n_fd(int *fds, int n, int *ms, int *exception)
ast_waitfor_nandfds(NULL, 0, fds, n, exception, &winner, ms);
/*! \brief Wait for x amount of time on a file descriptor to have input. */
Joshua Colp
committed
#ifdef HAVE_EPOLL
static struct ast_channel *ast_waitfor_nandfds_classic(struct ast_channel **c, int n, int *fds, int nfds,
int *exception, int *outfd, int *ms)
#else
struct ast_channel *ast_waitfor_nandfds(struct ast_channel **c, int n, int *fds, int nfds,
Joshua Colp
committed
int *exception, int *outfd, int *ms)
#endif
Russell Bryant
committed
struct pollfd *pfds = NULL;
long rms;
int x, y, max;
struct timeval now = { 0, 0 };
struct timeval whentohangup = { 0, 0 }, diff;
struct fdmap {
int chan;
int fdno;
Russell Bryant
committed
} *fdmap = NULL;
if ((sz = n * AST_MAX_FDS + nfds)) {
pfds = ast_alloca(sizeof(*pfds) * sz);
fdmap = ast_alloca(sizeof(*fdmap) * sz);
} else {
/* nothing to allocate and no FDs to check */
return NULL;
}
Joshua Colp
committed
for (x = 0; x < n; x++) {
if (!ast_tvzero(*ast_channel_whentohangup(c[x]))) {
if (ast_tvzero(whentohangup))
now = ast_tvnow();
diff = ast_tvsub(*ast_channel_whentohangup(c[x]), now);
if (diff.tv_sec < 0 || ast_tvzero(diff)) {
ast_test_suite_event_notify("HANGUP_TIME", "Channel: %s", ast_channel_name(c[x]));
ast_channel_softhangup_internal_flag_add(c[x], AST_SOFTHANGUP_TIMEOUT);
if (ast_tvzero(whentohangup) || ast_tvcmp(diff, whentohangup) < 0)
/* INT_MAX, not LONG_MAX, because it matters on 64-bit */
if (!ast_tvzero(whentohangup) && whentohangup.tv_sec < INT_MAX / 1000) {
rms = whentohangup.tv_sec * 1000 + whentohangup.tv_usec / 1000; /* timeout in milliseconds */
if (*ms >= 0 && *ms < rms) { /* original *ms still smaller */
}
} else if (!ast_tvzero(whentohangup) && rms < 0) {
/* Tiny corner case... call would need to last >24 days */
rms = INT_MAX;
/*
* Build the pollfd array, putting the channels' fds first,
* followed by individual fds. Order is important because
* individual fd's must have priority over channel fds.
*/
Joshua Colp
committed
for (x = 0; x < n; x++) {
for (y = 0; y < AST_MAX_FDS; y++) {
fdmap[max].fdno = y; /* fd y is linked to this pfds */
fdmap[max].chan = x; /* channel x is linked to this pfds */
max += ast_add_fd(&pfds[max], ast_channel_fd(c[x], y));
ast_channel_lock(c[x]);
ast_channel_unlock(c[x]);
Joshua Colp
committed
for (x = 0; x < nfds; x++) {
fdmap[max].chan = -1;
max += ast_add_fd(&pfds[max], fds[x]);
Kevin P. Fleming
committed
start = ast_tvnow();
if (sizeof(int) == 4) { /* XXX fix timeout > 600000 on linux x86-32 */
res = ast_poll(pfds, max, kbrms);
} while (!res && (rms > 0));
} else {
res = ast_poll(pfds, max, rms);
ast_channel_lock(c[x]);
ast_clear_flag(ast_channel_flags(c[x]), AST_FLAG_BLOCKING);
ast_channel_unlock(c[x]);
if (res < 0) { /* Simulate a timeout if we were interrupted */
*ms = -1;
if (!ast_tvzero(whentohangup)) { /* if we have a timeout, check who expired */
now = ast_tvnow();
Joshua Colp
committed
for (x = 0; x < n; x++) {
if (!ast_tvzero(*ast_channel_whentohangup(c[x])) && ast_tvcmp(*ast_channel_whentohangup(c[x]), now) <= 0) {
ast_test_suite_event_notify("HANGUP_TIME", "Channel: %s", ast_channel_name(c[x]));
ast_channel_softhangup_internal_flag_add(c[x], AST_SOFTHANGUP_TIMEOUT);
if (res == 0) { /* no fd ready, reset timeout and done */
*ms = 0; /* XXX use 0 since we may not have an exact timeout. */
return winner;
}
/*
* Then check if any channel or fd has a pending event.
* Remember to check channels first and fds last, as they
* must have priority on setting 'winner'
*/
for (x = 0; x < max; x++) {
res = pfds[x].revents;
if (fdmap[x].chan >= 0) { /* this is a channel */
winner = c[fdmap[x].chan]; /* override previous winners */
ast_channel_lock(winner);
ast_set_flag(ast_channel_flags(winner), AST_FLAG_EXCEPTION);
ast_clear_flag(ast_channel_flags(winner), AST_FLAG_EXCEPTION);
ast_channel_fdno_set(winner, fdmap[x].fdno);
ast_channel_unlock(winner);
*exception = (res & POLLPRI) ? -1 : 0;
}
if (*ms > 0) {
Kevin P. Fleming
committed
*ms -= ast_tvdiff_ms(ast_tvnow(), start);
Joshua Colp
committed
#ifdef HAVE_EPOLL
static struct ast_channel *ast_waitfor_nandfds_simple(struct ast_channel *chan, int *ms)
{
struct timeval start = { 0 , 0 };
int res = 0;
struct epoll_event ev[1];
long diff, rms = *ms;
Joshua Colp
committed
struct ast_channel *winner = NULL;
struct ast_epoll_data *aed = NULL;
Joshua Colp
committed
/* Figure out their timeout */
if (!ast_tvzero(*ast_channel_whentohangup(chan))) {
if ((diff = ast_tvdiff_ms(*ast_channel_whentohangup(chan), ast_tvnow())) < 0) {
Joshua Colp
committed
/* They should already be hungup! */
ast_channel_softhangup_internal_flag_add(chan, AST_SOFTHANGUP_TIMEOUT);
Joshua Colp
committed
ast_channel_unlock(chan);
return NULL;
}
/* If this value is smaller then the current one... make it priority */
Joshua Colp
committed
}
ast_channel_unlock(chan);
/* Time to make this channel block... */
CHECK_BLOCKING(chan);
Joshua Colp
committed
start = ast_tvnow();
Joshua Colp
committed
/* We don't have to add any file descriptors... they are already added, we just have to wait! */
res = epoll_wait(ast_channel_epfd(chan), ev, 1, rms);
Joshua Colp
committed
/* Stop blocking */
ast_clear_flag(ast_channel_flags(chan), AST_FLAG_BLOCKING);
Joshua Colp
committed
/* Simulate a timeout if we were interrupted */
if (res < 0) {
Joshua Colp
committed
*ms = -1;
Joshua Colp
committed
return NULL;
}
/* If this channel has a timeout see if it expired */
if (!ast_tvzero(*ast_channel_whentohangup(chan))) {
if (ast_tvdiff_ms(ast_tvnow(), *ast_channel_whentohangup(chan)) >= 0) {
ast_channel_softhangup_internal_flag_add(chan, AST_SOFTHANGUP_TIMEOUT);
Joshua Colp
committed
winner = chan;
}
}
/* No fd ready, reset timeout and be done for now */
if (!res) {
*ms = 0;
return winner;
}
/* See what events are pending */
aed = ev[0].data.ptr;
ast_channel_fdno_set(chan, aed->which);
ast_set_flag(ast_channel_flags(chan), AST_FLAG_EXCEPTION);
ast_clear_flag(ast_channel_flags(chan), AST_FLAG_EXCEPTION);
Joshua Colp
committed
if (*ms > 0) {
*ms -= ast_tvdiff_ms(ast_tvnow(), start);
Joshua Colp
committed
*ms = 0;
Joshua Colp
committed
}
return chan;
}
static struct ast_channel *ast_waitfor_nandfds_complex(struct ast_channel **c, int n, int *ms)
{
struct timeval start = { 0 , 0 };
int res = 0, i;
struct epoll_event ev[25] = { { 0, } };
struct timeval now = { 0, 0 };
long whentohangup = 0, diff = 0, rms = *ms;
Joshua Colp
committed
struct ast_channel *winner = NULL;
for (i = 0; i < n; i++) {
if (!ast_tvzero(*ast_channel_whentohangup(c[i]))) {
now = ast_tvnow();
if ((diff = ast_tvdiff_ms(*ast_channel_whentohangup(c[i]), now)) < 0) {
ast_channel_softhangup_internal_flag_add(c[i], AST_SOFTHANGUP_TIMEOUT);
Joshua Colp
committed
ast_channel_unlock(c[i]);
return c[i];
}
Joshua Colp
committed
whentohangup = diff;
Joshua Colp
committed
}
ast_channel_unlock(c[i]);
CHECK_BLOCKING(c[i]);
}
rms = *ms;
if (whentohangup) {
rms = whentohangup;
Joshua Colp
committed
rms = *ms;
Joshua Colp
committed
}
Joshua Colp
committed
start = ast_tvnow();
Joshua Colp
committed
res = epoll_wait(ast_channel_epfd(c[0]), ev, 25, rms);
Joshua Colp
committed
ast_clear_flag(ast_channel_flags(c[i]), AST_FLAG_BLOCKING);
Joshua Colp
committed
if (res < 0) {
Joshua Colp
committed
*ms = -1;
Joshua Colp
committed
return NULL;
}
if (whentohangup) {
now = ast_tvnow();
Joshua Colp
committed
for (i = 0; i < n; i++) {
if (!ast_tvzero(*ast_channel_whentohangup(c[i])) && ast_tvdiff_ms(now, *ast_channel_whentohangup(c[i])) >= 0) {
ast_channel_softhangup_internal_flag_add(c[i], AST_SOFTHANGUP_TIMEOUT);
Joshua Colp
committed
winner = c[i];
Joshua Colp
committed
}
}
}
if (!res) {
*ms = 0;
return winner;
}
Joshua Colp
committed
for (i = 0; i < res; i++) {
Joshua Colp
committed
struct ast_epoll_data *aed = ev[i].data.ptr;
Joshua Colp
committed
continue;
Joshua Colp
committed
winner = aed->chan;
ast_set_flag(ast_channel_flags(winner), AST_FLAG_EXCEPTION);
ast_clear_flag(ast_channel_flags(winner), AST_FLAG_EXCEPTION);
ast_channel_fdno_set(winner, aed->which);
Joshua Colp
committed
}
if (*ms > 0) {
*ms -= ast_tvdiff_ms(ast_tvnow(), start);
Joshua Colp
committed
*ms = 0;
Joshua Colp
committed
}
return winner;
}
struct ast_channel *ast_waitfor_nandfds(struct ast_channel **c, int n, int *fds, int nfds,
int *exception, int *outfd, int *ms)
{
Joshua Colp
committed
/* If no epoll file descriptor is available resort to classic nandfds */
Joshua Colp
committed
return ast_waitfor_nandfds_classic(c, n, fds, nfds, exception, outfd, ms);
Joshua Colp
committed
return ast_waitfor_nandfds_simple(c[0], ms);
Joshua Colp
committed
return ast_waitfor_nandfds_complex(c, n, ms);
Joshua Colp
committed
}
#endif
struct ast_channel *ast_waitfor_n(struct ast_channel **c, int n, int *ms)
{
return ast_waitfor_nandfds(c, n, NULL, 0, NULL, NULL, ms);
}
int ast_waitfor(struct ast_channel *c, int ms)
{
if (ms < 0) {
do {
ms = 100000;
ast_waitfor_nandfds(&c, 1, NULL, 0, NULL, NULL, &ms);
} while (!ms);
} else {
ast_waitfor_nandfds(&c, 1, NULL, 0, NULL, NULL, &ms);
Kevin P. Fleming
committed
int ast_waitfordigit(struct ast_channel *c, int ms)
return ast_waitfordigit_full(c, ms, -1, -1);
int ast_settimeout(struct ast_channel *c, unsigned int rate, int (*func)(const void *data), void *data)
{
return ast_settimeout_full(c, rate, func, data, 0);
}
int ast_settimeout_full(struct ast_channel *c, unsigned int rate, int (*func)(const void *data), void *data, unsigned int is_ao2_obj)
unsigned int real_rate = rate, max_rate;
if (ast_channel_timingfd(c) == -1) {
return -1;
}
if (!func) {
rate = 0;
data = NULL;
if (rate && rate > (max_rate = ast_timer_get_max_rate(ast_channel_timer(c)))) {
real_rate = max_rate;
}
ast_debug(1, "Scheduling timer at (%u requested / %u actual) timer ticks per second\n", rate, real_rate);
res = ast_timer_set_rate(ast_channel_timer(c), real_rate);
if (ast_channel_timingdata(c) && ast_test_flag(ast_channel_flags(c), AST_FLAG_TIMINGDATA_IS_AO2_OBJ)) {
ao2_ref(ast_channel_timingdata(c), -1);
}
ast_channel_timingfunc_set(c, func);
ast_channel_timingdata_set(c, data);
if (data && is_ao2_obj) {
ao2_ref(data, 1);
ast_set_flag(ast_channel_flags(c), AST_FLAG_TIMINGDATA_IS_AO2_OBJ);
} else {
ast_clear_flag(ast_channel_flags(c), AST_FLAG_TIMINGDATA_IS_AO2_OBJ);
}
Mark Michelson
committed
if (func == NULL && rate == 0 && ast_channel_fdno(c) == AST_TIMING_FD) {
/* Clearing the timing func and setting the rate to 0
* means that we don't want to be reading from the timingfd
* any more. Setting c->fdno to -1 means we won't have any
* errant reads from the timingfd, meaning we won't potentially
* miss any important frames.
*/
ast_channel_fdno_set(c, -1);
}
int ast_waitfordigit_full(struct ast_channel *c, int timeout_ms, int audiofd, int cmdfd)
struct timeval start = ast_tvnow();
/* Stop if we're a zombie or need a soft hangup */
if (ast_test_flag(ast_channel_flags(c), AST_FLAG_ZOMBIE) || ast_check_hangup(c))
/* Only look for the end of DTMF, don't bother with the beginning and don't emulate things */
ast_channel_set_flag(c, AST_FLAG_END_DTMF_ONLY);
/* Wait for a digit, no more than timeout_ms milliseconds total.
* Or, wait indefinitely if timeout_ms is <0.
*/
while ((ms = ast_remaining_ms(start, timeout_ms))) {
Kevin P. Fleming
committed
errno = 0;
/* While ast_waitfor_nandfds tries to help by reducing the timeout by how much was waited,
* it is unhelpful if it waited less than a millisecond.
*/
rchan = ast_waitfor_nandfds(&c, 1, &cmdfd, (cmdfd > -1) ? 1 : 0, NULL, &outfd, &ms);
if (!rchan && outfd < 0 && ms) {
Kevin P. Fleming
committed
if (errno == 0 || errno == EINTR)
continue;
ast_log(LOG_WARNING, "Wait failed (%s)\n", strerror(errno));
ast_channel_clear_flag(c, AST_FLAG_END_DTMF_ONLY);
return -1;
} else if (outfd > -1) {
/* The FD we were watching has something waiting */
Steve Murphy
committed
ast_log(LOG_WARNING, "The FD we were waiting for has something waiting. Waitfordigit returning numeric 1\n");
ast_channel_clear_flag(c, AST_FLAG_END_DTMF_ONLY);
int res;
struct ast_frame *f = ast_read(c);
if (!f) {
ast_channel_clear_flag(c, AST_FLAG_END_DTMF_ONLY);
Joshua Colp
committed
switch (f->frametype) {
case AST_FRAME_DTMF_BEGIN:
break;
case AST_FRAME_DTMF_END:
res = f->subclass.integer;
ast_channel_clear_flag(c, AST_FLAG_END_DTMF_ONLY);
return res;
case AST_FRAME_CONTROL:
switch (f->subclass.integer) {
case AST_CONTROL_HANGUP:
ast_frfree(f);
ast_channel_clear_flag(c, AST_FLAG_END_DTMF_ONLY);
case AST_CONTROL_STREAM_STOP:
case AST_CONTROL_STREAM_SUSPEND:
case AST_CONTROL_STREAM_RESTART:
case AST_CONTROL_STREAM_REVERSE:
case AST_CONTROL_STREAM_FORWARD:
/* Fall-through and treat as if it were a DTMF signal. Items
* that perform stream control will handle this. */
res = f->subclass.integer;
ast_frfree(f);
ast_channel_clear_flag(c, AST_FLAG_END_DTMF_ONLY);
case AST_CONTROL_PVT_CAUSE_CODE:
case AST_CONTROL_RINGING:
case AST_CONTROL_ANSWER:
case AST_CONTROL_SRCCHANGE:
Mark Michelson
committed
case AST_CONTROL_CONNECTED_LINE:
case AST_CONTROL_REDIRECTING:
case AST_CONTROL_HOLD:
case AST_CONTROL_UNHOLD:
/* Unimportant */
break;
default:
ast_log(LOG_WARNING, "Unexpected control subclass '%d'\n", f->subclass.integer);
case AST_FRAME_VOICE:
/* Write audio if appropriate */
Kevin P. Fleming
committed
if (audiofd > -1) {
if (write(audiofd, f->data.ptr, f->datalen) < 0) {
ast_log(LOG_WARNING, "write() failed: %s\n", strerror(errno));
}
}
default:
/* Ignore */
break;
ast_channel_clear_flag(c, AST_FLAG_END_DTMF_ONLY);
return 0; /* Time is up */
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
enum DtmfDirection {
DTMF_RECEIVED,
DTMF_SENT
};
static const char *dtmf_direction_to_string(enum DtmfDirection direction)
{
switch (direction) {
case DTMF_RECEIVED:
return "Received";
case DTMF_SENT:
return "Sent";
}
return "?";
}
static void send_dtmf_begin_event(struct ast_channel *chan,
enum DtmfDirection direction, const char digit)
{
RAII_VAR(struct ast_json *, blob, NULL, ast_json_unref);
char digit_str[] = { digit, '\0' };
blob = ast_json_pack("{ s: s, s: s }",
"digit", digit_str,
"direction", dtmf_direction_to_string(direction));
if (!blob) {
return;
}
ast_channel_publish_cached_blob(chan, ast_channel_dtmf_begin_type(), blob);
}
static void send_dtmf_end_event(struct ast_channel *chan,
enum DtmfDirection direction, const char digit, long duration_ms)
{
RAII_VAR(struct ast_json *, blob, NULL, ast_json_unref);
char digit_str[] = { digit, '\0' };
blob = ast_json_pack("{ s: s, s: s, s: i }",
"digit", digit_str,
"direction", dtmf_direction_to_string(direction),
"duration_ms", duration_ms);
if (!blob) {
return;
}
ast_channel_publish_cached_blob(chan, ast_channel_dtmf_end_type(), blob);
static void ast_read_generator_actions(struct ast_channel *chan, struct ast_frame *f)
{
Richard Mudgett
committed
struct ast_generator *generator;
void *gendata;
int res;
int samples;
generator = ast_channel_generator(chan);
if (!generator
|| !generator->generate
|| f->frametype != AST_FRAME_VOICE
|| !ast_channel_generatordata(chan)
|| ast_channel_timingfunc(chan)) {
return;
}
Richard Mudgett
committed
/*
* We must generate frames in phase locked mode since
* we have no internal timer available.
*/
if (ast_format_cmp(f->subclass.format, ast_channel_writeformat(chan)) == AST_FORMAT_CMP_NOT_EQUAL) {
Richard Mudgett
committed
float factor;
factor = ((float) ast_format_get_sample_rate(ast_channel_writeformat(chan))) / ((float) ast_format_get_sample_rate(f->subclass.format));
Richard Mudgett
committed
samples = (int) (((float) f->samples) * factor);
} else {
samples = f->samples;
}
Richard Mudgett
committed
gendata = ast_channel_generatordata(chan);
ast_channel_generatordata_set(chan, NULL); /* reset, to let writes go through */
/*
* This unlock is here based on two assumptions that hold true at
* this point in the code. 1) this function is only called from
* within __ast_read() and 2) all generators call ast_write() in
* their generate callback.
*
* The reason this is added is so that when ast_write is called,
* the lock that occurs there will not recursively lock the
* channel. Doing this will allow deadlock avoidance to work in
* deeper functions.
*/
ast_channel_unlock(chan);
res = generator->generate(chan, gendata, f->datalen, samples);
ast_channel_lock(chan);
if (generator == ast_channel_generator(chan)) {
ast_channel_generatordata_set(chan, gendata);
ast_debug(1, "Auto-deactivating generator\n");
ast_deactivate_generator(chan);
}
}
}
static inline void queue_dtmf_readq(struct ast_channel *chan, struct ast_frame *f)
{
struct ast_frame *fr = ast_channel_dtmff(chan);
fr->frametype = AST_FRAME_DTMF_END;
fr->subclass.integer = f->subclass.integer;
fr->len = f->len;
/* The only time this function will be called is for a frame that just came
* out of the channel driver. So, we want to stick it on the tail of the
* readq. */
ast_queue_frame(chan, fr);
}
/*!
* \brief Determine whether or not we should ignore DTMF in the readq
*/
static inline int should_skip_dtmf(struct ast_channel *chan)
{
if (ast_test_flag(ast_channel_flags(chan), AST_FLAG_DEFER_DTMF | AST_FLAG_EMULATE_DTMF)) {
/* We're in the middle of emulating a digit, or DTMF has been
* explicitly deferred. Skip this digit, then. */
return 1;
}
if (!ast_tvzero(*ast_channel_dtmf_tv(chan)) &&
ast_tvdiff_ms(ast_tvnow(), *ast_channel_dtmf_tv(chan)) < AST_MIN_DTMF_GAP) {
/* We're not in the middle of a digit, but it hasn't been long enough
* since the last digit, so we'll have to skip DTMF for now. */
return 1;
}
return 0;
}
/*!
* \brief calculates the number of samples to jump forward with in a monitor stream.
* \note When using ast_seekstream() with the read and write streams of a monitor,
* the number of samples to seek forward must be of the same sample rate as the stream
* or else the jump will not be calculated correctly.
*
* \retval number of samples to seek forward after rate conversion.
*/
static inline int calc_monitor_jump(int samples, int sample_rate, int seek_rate)
{
int diff = sample_rate - seek_rate;
if (diff > 0) {
samples = samples / (float) (sample_rate / seek_rate);
} else if (diff < 0) {
samples = samples * (float) (seek_rate / sample_rate);
}
return samples;
}
static struct ast_frame *__ast_read(struct ast_channel *chan, int dropaudio)
struct ast_frame *f = NULL; /* the return value */
/* this function is very long so make sure there is only one return
* point at the end (there are only two exceptions to this).
/* Stop if we're a zombie or need a soft hangup */
if (ast_test_flag(ast_channel_flags(chan), AST_FLAG_ZOMBIE) || ast_check_hangup(chan)) {
if (ast_channel_generator(chan))
ast_deactivate_generator(chan);
/*
* It is possible for chan->_softhangup to be set and there
* still be control frames that need to be read. Instead of
* just going to 'done' in the case of ast_check_hangup(), we
* need to queue the end-of-Q frame so that it can mark the end
* of the read queue. If there are frames to be read,
* ast_queue_control() will be called repeatedly, but will only
* queue the first end-of-Q frame.
*/
if (ast_channel_softhangup_internal_flag(chan)) {
ast_queue_control(chan, AST_CONTROL_END_OF_Q);
} else {
goto done;
}
/*
* The ast_waitfor() code records which of the channel's file
* descriptors reported that data is available. In theory,
* ast_read() should only be called after ast_waitfor() reports
* that a channel has data available for reading. However,
* there still may be some edge cases throughout the code where
* ast_read() is called improperly. This can potentially cause
* problems, so if this is a developer build, make a lot of
* noise if this happens so that it can be addressed.
*
* One of the potential problems is blocking on a dead channel.
*/
if (ast_channel_fdno(chan) == -1) {
ast_log(LOG_ERROR,
"ast_read() on chan '%s' called with no recorded file descriptor.\n",
ast_channel_name(chan));
prestate = ast_channel_state(chan);
if (ast_channel_timingfd(chan) > -1 && ast_channel_fdno(chan) == AST_TIMING_FD) {
Russell Bryant
committed
enum ast_timer_event res;
ast_clear_flag(ast_channel_flags(chan), AST_FLAG_EXCEPTION);
res = ast_timer_get_event(ast_channel_timer(chan));
switch (res) {
case AST_TIMING_EVENT_EXPIRED:
Matthew Jordan
committed
if (ast_timer_ack(ast_channel_timer(chan), 1) < 0) {
ast_log(LOG_ERROR, "Failed to acknoweldge timer in ast_read\n");
goto done;
}
/* save a copy of func/data before unlocking the channel */
ast_timing_func_t func = ast_channel_timingfunc(chan);
void *data = ast_channel_timingdata(chan);
int got_ref = 0;
if (data && ast_test_flag(ast_channel_flags(chan), AST_FLAG_TIMINGDATA_IS_AO2_OBJ)) {
ao2_ref(data, 1);
got_ref = 1;
}
ast_channel_fdno_set(chan, -1);
ast_channel_unlock(chan);
func(data);
if (got_ref) {
ao2_ref(data, -1);
}
} else {
ast_timer_set_rate(ast_channel_timer(chan), 0);
ast_channel_fdno_set(chan, -1);
}
/* cannot 'goto done' because the channel is already unlocked */
return &ast_null_frame;
case AST_TIMING_EVENT_CONTINUOUS:
if (AST_LIST_EMPTY(ast_channel_readq(chan)) ||
!AST_LIST_NEXT(AST_LIST_FIRST(ast_channel_readq(chan)), frame_list)) {
ast_timer_disable_continuous(ast_channel_timer(chan));
} else if (ast_channel_fd_isset(chan, AST_GENERATOR_FD) && ast_channel_fdno(chan) == AST_GENERATOR_FD) {
/* if the AST_GENERATOR_FD is set, call the generator with args
* set to -1 so it can do whatever it needs to.
*/
void *tmp = ast_channel_generatordata(chan);
ast_channel_generatordata_set(chan, NULL); /* reset to let ast_write get through */
ast_channel_generator(chan)->generate(chan, tmp, -1, -1);
ast_channel_generatordata_set(chan, tmp);
f = &ast_null_frame;
ast_channel_fdno_set(chan, -1);
goto done;
} else if (ast_channel_fd_isset(chan, AST_JITTERBUFFER_FD) && ast_channel_fdno(chan) == AST_JITTERBUFFER_FD) {
ast_clear_flag(ast_channel_flags(chan), AST_FLAG_EXCEPTION);
Mark Michelson
committed
/* Read and ignore anything on the alertpipe, but read only
one sizeof(blah) per frame that we send from it */
if (ast_channel_internal_alert_read(chan) == AST_ALERT_READ_FATAL) {
f = &ast_null_frame;
goto done;
}
if (!AST_LIST_EMPTY(ast_channel_readq(chan))) {
int skip_dtmf = should_skip_dtmf(chan);
AST_LIST_TRAVERSE_SAFE_BEGIN(ast_channel_readq(chan), f, frame_list) {
/* We have to be picky about which frame we pull off of the readq because
* there are cases where we want to leave DTMF frames on the queue until
* some later time. */
if ( (f->frametype == AST_FRAME_DTMF_BEGIN || f->frametype == AST_FRAME_DTMF_END) && skip_dtmf) {
continue;
}
AST_LIST_REMOVE_CURRENT(frame_list);
break;
}
Tilghman Lesher
committed
AST_LIST_TRAVERSE_SAFE_END;
if (!f) {
/* There were no acceptable frames on the readq. */
f = &ast_null_frame;
ast_channel_alert_write(chan);
/* Interpret hangup and end-of-Q frames to return NULL */
/* XXX why not the same for frames from the channel ? */
if (f->frametype == AST_FRAME_CONTROL) {
switch (f->subclass.integer) {
case AST_CONTROL_HANGUP:
ast_channel_softhangup_internal_flag_add(chan, AST_SOFTHANGUP_DEV);
cause = f->data.uint32;
/* Fall through */
case AST_CONTROL_END_OF_Q:
ast_frfree(f);
f = NULL;
break;
default:
break;
}
ast_channel_blocker_set(chan, pthread_self());
if (ast_test_flag(ast_channel_flags(chan), AST_FLAG_EXCEPTION)) {
if (ast_channel_tech(chan)->exception)
f = ast_channel_tech(chan)->exception(chan);
ast_log(LOG_WARNING, "Exception flag set on '%s', but no exception handler\n", ast_channel_name(chan));
f = &ast_null_frame;
ast_clear_flag(ast_channel_flags(chan), AST_FLAG_EXCEPTION);
} else if (ast_channel_tech(chan) && ast_channel_tech(chan)->read)
f = ast_channel_tech(chan)->read(chan);
ast_log(LOG_WARNING, "No read routine on channel %s\n", ast_channel_name(chan));
/* Perform the framehook read event here. After the frame enters the framehook list
* there is no telling what will happen, <insert mad scientist laugh here>!!! */
f = ast_framehook_list_read_event(ast_channel_framehooks(chan), f);
* Reset the recorded file descriptor that triggered this read so that we can
* easily detect when ast_read() is called without properly using ast_waitfor().
*/
ast_channel_fdno_set(chan, -1);
Kevin P. Fleming
committed
if (f) {
struct ast_frame *readq_tail = AST_LIST_LAST(ast_channel_readq(chan));
struct ast_control_read_action_payload *read_action_payload;
struct ast_party_connected_line connected;
int hooked = 0;
Kevin P. Fleming
committed
/* if the channel driver returned more than one frame, stuff the excess
into the readq for the next ast_read call
Kevin P. Fleming
committed
*/
ast_queue_frame(chan, AST_LIST_NEXT(f, frame_list));
ast_frfree(AST_LIST_NEXT(f, frame_list));
Kevin P. Fleming
committed
}
Kevin P. Fleming
committed
switch (f->frametype) {
case AST_FRAME_CONTROL:
if (f->subclass.integer == AST_CONTROL_ANSWER) {
if (prestate == AST_STATE_UP && ast_channel_is_bridged(chan)) {
ast_debug(1, "Dropping duplicate answer!\n");