Newer
Older
Richard Mudgett
committed
int res;
ast_bridge_lock(bridge);
/* Try to find the channel that we want to kick. */
if (!(bridge_channel = bridge_find_channel(bridge, chan))) {
ast_bridge_unlock(bridge);
return -1;
}
Richard Mudgett
committed
res = ast_bridge_channel_queue_callback(bridge_channel, 0, kick_it, NULL, 0);
Richard Mudgett
committed
ast_bridge_unlock(bridge);
return res;
}
Richard Mudgett
committed
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
/*!
* \internal
* \brief Point the bridge_channel to a new bridge.
* \since 12.0.0
*
* \param bridge_channel What is to point to a new bridge.
* \param new_bridge Where the bridge channel should point.
*
* \return Nothing
*/
static void bridge_channel_change_bridge(struct ast_bridge_channel *bridge_channel, struct ast_bridge *new_bridge)
{
struct ast_bridge *old_bridge;
ao2_ref(new_bridge, +1);
ast_bridge_channel_lock(bridge_channel);
ast_channel_lock(bridge_channel->chan);
old_bridge = bridge_channel->bridge;
bridge_channel->bridge = new_bridge;
ast_channel_internal_bridge_set(bridge_channel->chan, new_bridge);
ast_channel_unlock(bridge_channel->chan);
ast_bridge_channel_unlock(bridge_channel);
ao2_ref(old_bridge, -1);
}
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
static void bridge_channel_moving(struct ast_bridge_channel *bridge_channel, struct ast_bridge *src, struct ast_bridge *dst)
{
struct ast_bridge_features *features = bridge_channel->features;
struct ast_bridge_hook *hook;
struct ao2_iterator iter;
/* Run any moving hooks. */
iter = ao2_iterator_init(features->other_hooks, 0);
for (; (hook = ao2_iterator_next(&iter)); ao2_ref(hook, -1)) {
int remove_me;
ast_bridge_move_indicate_callback move_cb;
if (hook->type != AST_BRIDGE_HOOK_TYPE_MOVE) {
continue;
}
move_cb = (ast_bridge_move_indicate_callback) hook->callback;
remove_me = move_cb(bridge_channel, hook->hook_pvt, src, dst);
if (remove_me) {
ast_debug(1, "Move detection hook %p is being removed from %p(%s)\n",
hook, bridge_channel, ast_channel_name(bridge_channel->chan));
ao2_unlink(features->other_hooks, hook);
}
}
ao2_iterator_destroy(&iter);
}
void bridge_do_merge(struct ast_bridge *dst_bridge, struct ast_bridge *src_bridge, struct ast_bridge_channel **kick_me, unsigned int num_kick,
Joshua Colp
committed
unsigned int optimized)
Richard Mudgett
committed
{
struct ast_bridge_channel *bridge_channel;
unsigned int idx;
ast_debug(1, "Merging bridge %s into bridge %s\n",
src_bridge->uniqueid, dst_bridge->uniqueid);
ast_bridge_publish_merge(dst_bridge, src_bridge);
/*
* Move channels from src_bridge over to dst_bridge.
*
* We must use AST_LIST_TRAVERSE_SAFE_BEGIN() because
* bridge_channel_internal_pull() alters the list we are traversing.
Richard Mudgett
committed
*/
AST_LIST_TRAVERSE_SAFE_BEGIN(&src_bridge->channels, bridge_channel, entry) {
if (bridge_channel->state != BRIDGE_CHANNEL_STATE_WAIT) {
Richard Mudgett
committed
/*
* The channel is already leaving let it leave normally because
* pulling it may delete hooks that should run for this channel.
*/
continue;
}
if (ast_test_flag(&bridge_channel->features->feature_flags,
AST_BRIDGE_CHANNEL_FLAG_IMMOVABLE)) {
continue;
}
if (kick_me) {
for (idx = 0; idx < num_kick; ++idx) {
if (bridge_channel == kick_me[idx]) {
Richard Mudgett
committed
ast_bridge_channel_leave_bridge(bridge_channel,
BRIDGE_CHANNEL_STATE_END_NO_DISSOLVE, AST_CAUSE_NORMAL_CLEARING);
Richard Mudgett
committed
break;
}
}
}
bridge_channel_internal_pull(bridge_channel);
if (bridge_channel->state != BRIDGE_CHANNEL_STATE_WAIT) {
Richard Mudgett
committed
/*
* The channel died as a result of being pulled or it was
* kicked. Leave it pointing to the original bridge.
*/
continue;
}
bridge_channel_moving(bridge_channel, bridge_channel->bridge, dst_bridge);
Richard Mudgett
committed
/* Point to new bridge.*/
bridge_channel_change_bridge(bridge_channel, dst_bridge);
if (bridge_channel_internal_push(bridge_channel)) {
Richard Mudgett
committed
ast_bridge_features_remove(bridge_channel->features,
AST_BRIDGE_HOOK_REMOVE_ON_PULL);
Richard Mudgett
committed
ast_bridge_channel_leave_bridge(bridge_channel,
BRIDGE_CHANNEL_STATE_END_NO_DISSOLVE, bridge_channel->bridge->cause);
Richard Mudgett
committed
}
}
AST_LIST_TRAVERSE_SAFE_END;
if (kick_me) {
/*
* Now we can kick any channels in the dst_bridge without
* potentially dissolving the bridge.
*/
for (idx = 0; idx < num_kick; ++idx) {
bridge_channel = kick_me[idx];
ast_bridge_channel_lock(bridge_channel);
if (bridge_channel->state == BRIDGE_CHANNEL_STATE_WAIT) {
Richard Mudgett
committed
ast_bridge_channel_leave_bridge_nolock(bridge_channel,
BRIDGE_CHANNEL_STATE_END_NO_DISSOLVE, AST_CAUSE_NORMAL_CLEARING);
bridge_channel_internal_pull(bridge_channel);
Richard Mudgett
committed
}
ast_bridge_channel_unlock(bridge_channel);
}
}
Joshua Colp
committed
bridge_reconfigured(dst_bridge, !optimized);
bridge_reconfigured(src_bridge, !optimized);
Richard Mudgett
committed
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
ast_debug(1, "Merged bridge %s into bridge %s\n",
src_bridge->uniqueid, dst_bridge->uniqueid);
}
struct merge_direction {
/*! Destination merge bridge. */
struct ast_bridge *dest;
/*! Source merge bridge. */
struct ast_bridge *src;
};
/*!
* \internal
* \brief Determine which bridge should merge into the other.
* \since 12.0.0
*
* \param bridge1 A bridge for merging
* \param bridge2 A bridge for merging
*
* \note The two bridges are assumed already locked.
*
* \return Which bridge merges into which or NULL bridges if cannot merge.
*/
static struct merge_direction bridge_merge_determine_direction(struct ast_bridge *bridge1, struct ast_bridge *bridge2)
{
struct merge_direction merge = { NULL, NULL };
int bridge1_priority;
int bridge2_priority;
if (!ast_test_flag(&bridge1->feature_flags,
AST_BRIDGE_FLAG_MERGE_INHIBIT_TO | AST_BRIDGE_FLAG_MERGE_INHIBIT_FROM)
&& !ast_test_flag(&bridge2->feature_flags,
AST_BRIDGE_FLAG_MERGE_INHIBIT_TO | AST_BRIDGE_FLAG_MERGE_INHIBIT_FROM)) {
/*
* Can merge either way. Merge to the higher priority merge
* bridge. Otherwise merge to the larger bridge.
*/
bridge1_priority = bridge1->v_table->get_merge_priority(bridge1);
bridge2_priority = bridge2->v_table->get_merge_priority(bridge2);
if (bridge2_priority < bridge1_priority) {
merge.dest = bridge1;
merge.src = bridge2;
} else if (bridge1_priority < bridge2_priority) {
merge.dest = bridge2;
merge.src = bridge1;
} else {
/* Merge to the larger bridge. */
if (bridge2->num_channels <= bridge1->num_channels) {
merge.dest = bridge1;
merge.src = bridge2;
} else {
merge.dest = bridge2;
merge.src = bridge1;
}
}
} else if (!ast_test_flag(&bridge1->feature_flags, AST_BRIDGE_FLAG_MERGE_INHIBIT_TO)
&& !ast_test_flag(&bridge2->feature_flags, AST_BRIDGE_FLAG_MERGE_INHIBIT_FROM)) {
/* Can merge only one way. */
merge.dest = bridge1;
merge.src = bridge2;
} else if (!ast_test_flag(&bridge2->feature_flags, AST_BRIDGE_FLAG_MERGE_INHIBIT_TO)
&& !ast_test_flag(&bridge1->feature_flags, AST_BRIDGE_FLAG_MERGE_INHIBIT_FROM)) {
/* Can merge only one way. */
merge.dest = bridge2;
merge.src = bridge1;
}
return merge;
}
/*!
* \internal
* \brief Merge two bridges together
* \since 12.0.0
*
* \param dst_bridge Destination bridge of merge.
* \param src_bridge Source bridge of merge.
* \param merge_best_direction TRUE if don't care about which bridge merges into the other.
* \param kick_me Array of channels to kick from the bridges.
* \param num_kick Number of channels in the kick_me array.
*
* \note The dst_bridge and src_bridge are assumed already locked.
*
* \retval 0 on success
* \retval -1 on failure
*/
static int bridge_merge_locked(struct ast_bridge *dst_bridge, struct ast_bridge *src_bridge, int merge_best_direction, struct ast_channel **kick_me, unsigned int num_kick)
{
struct merge_direction merge;
struct ast_bridge_channel **kick_them = NULL;
/* Sanity check. */
ast_assert(dst_bridge && src_bridge && dst_bridge != src_bridge && (!num_kick || kick_me));
if (dst_bridge->dissolved || src_bridge->dissolved) {
ast_debug(1, "Can't merge bridges %s and %s, at least one bridge is dissolved.\n",
src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (ast_test_flag(&dst_bridge->feature_flags, AST_BRIDGE_FLAG_MASQUERADE_ONLY)
|| ast_test_flag(&src_bridge->feature_flags, AST_BRIDGE_FLAG_MASQUERADE_ONLY)) {
ast_debug(1, "Can't merge bridges %s and %s, masquerade only.\n",
src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (dst_bridge->inhibit_merge || src_bridge->inhibit_merge) {
ast_debug(1, "Can't merge bridges %s and %s, merging temporarily inhibited.\n",
src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (merge_best_direction) {
merge = bridge_merge_determine_direction(dst_bridge, src_bridge);
} else {
merge.dest = dst_bridge;
merge.src = src_bridge;
}
if (!merge.dest
|| ast_test_flag(&merge.dest->feature_flags, AST_BRIDGE_FLAG_MERGE_INHIBIT_TO)
|| ast_test_flag(&merge.src->feature_flags, AST_BRIDGE_FLAG_MERGE_INHIBIT_FROM)) {
ast_debug(1, "Can't merge bridges %s and %s, merging inhibited.\n",
src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (merge.src->num_channels < 2) {
/*
* For a two party bridge, a channel may be temporarily removed
* from the source bridge or the initial bridge members have not
* joined yet.
*/
ast_debug(1, "Can't merge bridge %s into bridge %s, not enough channels in source bridge.\n",
merge.src->uniqueid, merge.dest->uniqueid);
return -1;
}
if (2 + num_kick < merge.dest->num_channels + merge.src->num_channels
&& !(merge.dest->technology->capabilities & AST_BRIDGE_CAPABILITY_MULTIMIX)
&& (!ast_test_flag(&merge.dest->feature_flags, AST_BRIDGE_FLAG_SMART)
|| !(merge.dest->allowed_capabilities & AST_BRIDGE_CAPABILITY_MULTIMIX))) {
ast_debug(1, "Can't merge bridge %s into bridge %s, multimix is needed and it cannot be acquired.\n",
merge.src->uniqueid, merge.dest->uniqueid);
return -1;
}
if (num_kick) {
unsigned int num_to_kick = 0;
unsigned int idx;
kick_them = ast_alloca(num_kick * sizeof(*kick_them));
for (idx = 0; idx < num_kick; ++idx) {
kick_them[num_to_kick] = bridge_find_channel(merge.src, kick_me[idx]);
Richard Mudgett
committed
if (!kick_them[num_to_kick]) {
kick_them[num_to_kick] = bridge_find_channel(merge.dest, kick_me[idx]);
Richard Mudgett
committed
}
if (kick_them[num_to_kick]) {
++num_to_kick;
}
}
if (num_to_kick != num_kick) {
ast_debug(1, "Can't merge bridge %s into bridge %s, at least one kicked channel is not in either bridge.\n",
merge.src->uniqueid, merge.dest->uniqueid);
return -1;
}
}
bridge_do_merge(merge.dest, merge.src, kick_them, num_kick, 0);
Richard Mudgett
committed
return 0;
}
int ast_bridge_merge(struct ast_bridge *dst_bridge, struct ast_bridge *src_bridge, int merge_best_direction, struct ast_channel **kick_me, unsigned int num_kick)
{
int res;
/* Sanity check. */
ast_assert(dst_bridge && src_bridge);
ast_bridge_lock_both(dst_bridge, src_bridge);
res = bridge_merge_locked(dst_bridge, src_bridge, merge_best_direction, kick_me, num_kick);
ast_bridge_unlock(src_bridge);
ast_bridge_unlock(dst_bridge);
return res;
}
int bridge_do_move(struct ast_bridge *dst_bridge, struct ast_bridge_channel *bridge_channel, int attempt_recovery,
Joshua Colp
committed
unsigned int optimized)
Richard Mudgett
committed
{
struct ast_bridge *orig_bridge;
int was_in_bridge;
int res = 0;
if (bridge_channel->swap) {
ast_debug(1, "Moving %p(%s) into bridge %s swapping with %s\n",
bridge_channel, ast_channel_name(bridge_channel->chan), dst_bridge->uniqueid,
ast_channel_name(bridge_channel->swap));
} else {
ast_debug(1, "Moving %p(%s) into bridge %s\n",
bridge_channel, ast_channel_name(bridge_channel->chan), dst_bridge->uniqueid);
}
orig_bridge = bridge_channel->bridge;
was_in_bridge = bridge_channel->in_bridge;
bridge_channel_internal_pull(bridge_channel);
if (bridge_channel->state != BRIDGE_CHANNEL_STATE_WAIT) {
Richard Mudgett
committed
/*
* The channel died as a result of being pulled. Leave it
* pointing to the original bridge.
*
* Clear out the swap channel pointer. A ref is not held
* by bridge_channel->swap at this point.
Richard Mudgett
committed
*/
bridge_channel->swap = NULL;
Joshua Colp
committed
bridge_reconfigured(orig_bridge, 0);
Richard Mudgett
committed
return -1;
}
/* Point to new bridge.*/
ao2_ref(orig_bridge, +1);/* Keep a ref in case the push fails. */
bridge_channel_change_bridge(bridge_channel, dst_bridge);
bridge_channel_moving(bridge_channel, orig_bridge, dst_bridge);
if (bridge_channel_internal_push_full(bridge_channel, optimized)) {
Richard Mudgett
committed
/* Try to put the channel back into the original bridge. */
Richard Mudgett
committed
ast_bridge_features_remove(bridge_channel->features,
AST_BRIDGE_HOOK_REMOVE_ON_PULL);
Richard Mudgett
committed
if (attempt_recovery && was_in_bridge) {
/* Point back to original bridge. */
bridge_channel_change_bridge(bridge_channel, orig_bridge);
if (bridge_channel_internal_push(bridge_channel)) {
Richard Mudgett
committed
ast_bridge_features_remove(bridge_channel->features,
AST_BRIDGE_HOOK_REMOVE_ON_PULL);
Richard Mudgett
committed
ast_bridge_channel_leave_bridge(bridge_channel,
BRIDGE_CHANNEL_STATE_END_NO_DISSOLVE, bridge_channel->bridge->cause);
Richard Mudgett
committed
}
} else {
Richard Mudgett
committed
ast_bridge_channel_leave_bridge(bridge_channel,
BRIDGE_CHANNEL_STATE_END_NO_DISSOLVE, bridge_channel->bridge->cause);
bridge_channel_settle_owed_events(orig_bridge, bridge_channel);
Richard Mudgett
committed
}
res = -1;
} else if (!optimized) {
bridge_channel_settle_owed_events(orig_bridge, bridge_channel);
Richard Mudgett
committed
}
Joshua Colp
committed
bridge_reconfigured(dst_bridge, !optimized);
bridge_reconfigured(orig_bridge, !optimized);
Richard Mudgett
committed
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
ao2_ref(orig_bridge, -1);
return res;
}
/*!
* \internal
* \brief Move a channel from one bridge to another.
* \since 12.0.0
*
* \param dst_bridge Destination bridge of bridge channel move.
* \param src_bridge Source bridge of bridge channel move.
* \param chan Channel to move.
* \param swap Channel to replace in dst_bridge.
* \param attempt_recovery TRUE if failure attempts to push channel back into original bridge.
*
* \note The dst_bridge and src_bridge are assumed already locked.
*
* \retval 0 on success.
* \retval -1 on failure.
*/
static int bridge_move_locked(struct ast_bridge *dst_bridge, struct ast_bridge *src_bridge, struct ast_channel *chan, struct ast_channel *swap, int attempt_recovery)
{
struct ast_bridge_channel *bridge_channel;
if (dst_bridge->dissolved || src_bridge->dissolved) {
ast_debug(1, "Can't move channel %s from bridge %s into bridge %s, at least one bridge is dissolved.\n",
ast_channel_name(chan), src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (ast_test_flag(&dst_bridge->feature_flags, AST_BRIDGE_FLAG_MASQUERADE_ONLY)
|| ast_test_flag(&src_bridge->feature_flags, AST_BRIDGE_FLAG_MASQUERADE_ONLY)) {
ast_debug(1, "Can't move channel %s from bridge %s into bridge %s, masquerade only.\n",
ast_channel_name(chan), src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (dst_bridge->inhibit_merge || src_bridge->inhibit_merge) {
ast_debug(1, "Can't move channel %s from bridge %s into bridge %s, temporarily inhibited.\n",
ast_channel_name(chan), src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
bridge_channel = bridge_find_channel(src_bridge, chan);
Richard Mudgett
committed
if (!bridge_channel) {
ast_debug(1, "Can't move channel %s from bridge %s into bridge %s, channel not in bridge.\n",
ast_channel_name(chan), src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (bridge_channel->state != BRIDGE_CHANNEL_STATE_WAIT) {
Richard Mudgett
committed
ast_debug(1, "Can't move channel %s from bridge %s into bridge %s, channel leaving bridge.\n",
ast_channel_name(chan), src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (ast_test_flag(&bridge_channel->features->feature_flags,
AST_BRIDGE_CHANNEL_FLAG_IMMOVABLE)) {
ast_debug(1, "Can't move channel %s from bridge %s into bridge %s, channel immovable.\n",
ast_channel_name(chan), src_bridge->uniqueid, dst_bridge->uniqueid);
return -1;
}
if (swap) {
struct ast_bridge_channel *bridge_channel_swap;
bridge_channel_swap = bridge_find_channel(dst_bridge, swap);
Richard Mudgett
committed
if (!bridge_channel_swap) {
ast_debug(1, "Can't move channel %s from bridge %s into bridge %s, swap channel %s not in bridge.\n",
ast_channel_name(chan), src_bridge->uniqueid, dst_bridge->uniqueid,
ast_channel_name(swap));
return -1;
}
if (bridge_channel_swap->state != BRIDGE_CHANNEL_STATE_WAIT) {
Richard Mudgett
committed
ast_debug(1, "Can't move channel %s from bridge %s into bridge %s, swap channel %s leaving bridge.\n",
ast_channel_name(chan), src_bridge->uniqueid, dst_bridge->uniqueid,
ast_channel_name(swap));
return -1;
}
}
bridge_channel->swap = swap;
return bridge_do_move(dst_bridge, bridge_channel, attempt_recovery, 0);
Richard Mudgett
committed
}
int ast_bridge_move(struct ast_bridge *dst_bridge, struct ast_bridge *src_bridge, struct ast_channel *chan, struct ast_channel *swap, int attempt_recovery)
{
int res;
ast_bridge_lock_both(dst_bridge, src_bridge);
res = bridge_move_locked(dst_bridge, src_bridge, chan, swap, attempt_recovery);
ast_bridge_unlock(src_bridge);
ast_bridge_unlock(dst_bridge);
return res;
}
int ast_bridge_add_channel(struct ast_bridge *bridge, struct ast_channel *chan,
struct ast_bridge_features *features, int play_tone, const char *xfersound)
{
RAII_VAR(struct ast_bridge *, chan_bridge, NULL, ao2_cleanup);
RAII_VAR(struct ast_channel *, yanked_chan, NULL, ao2_cleanup);
ast_channel_lock(chan);
chan_bridge = ast_channel_get_bridge(chan);
ast_channel_unlock(chan);
if (chan_bridge) {
struct ast_bridge_channel *bridge_channel;
/* The channel is in a bridge so it is not getting any new features. */
ast_bridge_features_destroy(features);
ast_bridge_lock_both(bridge, chan_bridge);
bridge_channel = bridge_find_channel(chan_bridge, chan);
if (bridge_move_locked(bridge, chan_bridge, chan, NULL, 1)) {
ast_bridge_unlock(chan_bridge);
ast_bridge_unlock(bridge);
return -1;
}
/*
* bridge_move_locked() will implicitly ensure that
* bridge_channel is not NULL.
*/
ast_assert(bridge_channel != NULL);
/*
* Additional checks if the channel we just stole dissolves the
* original bridge.
*/
bridge_dissolve_check_stolen(chan_bridge, bridge_channel);
ast_bridge_unlock(chan_bridge);
ast_bridge_unlock(bridge);
} else {
/* Slightly less easy case. We need to yank channel A from
* where he currently is and impart him into our bridge.
*/
yanked_chan = ast_channel_yank(chan);
if (!yanked_chan) {
ast_log(LOG_WARNING, "Could not gain control of channel %s\n", ast_channel_name(chan));
ast_bridge_features_destroy(features);
return -1;
}
if (ast_channel_state(yanked_chan) != AST_STATE_UP) {
ast_answer(yanked_chan);
ast_channel_ref(yanked_chan);
if (ast_bridge_impart(bridge, yanked_chan, NULL, features,
AST_BRIDGE_IMPART_CHAN_INDEPENDENT)) {
/* It is possible for us to yank a channel and have some other
* thread start a PBX on the channl after we yanked it. In particular,
* this can theoretically happen on the ;2 of a Local channel if we
* yank it prior to the ;1 being answered. Make sure that it isn't
* executing a PBX before hanging it up.
*/
if (ast_channel_pbx(yanked_chan)) {
ast_channel_unref(yanked_chan);
} else {
ast_hangup(yanked_chan);
}
return -1;
}
}
if (play_tone && !ast_strlen_zero(xfersound)) {
struct ast_channel *play_chan = yanked_chan ?: chan;
RAII_VAR(struct ast_bridge_channel *, play_bridge_channel, NULL, ao2_cleanup);
ast_channel_lock(play_chan);
play_bridge_channel = ast_channel_get_bridge_channel(play_chan);
ast_channel_unlock(play_chan);
if (!play_bridge_channel) {
ast_log(LOG_WARNING, "Unable to play tone for channel %s. No longer in a bridge.\n",
ast_channel_name(play_chan));
} else {
ast_bridge_channel_queue_playfile(play_bridge_channel, NULL, xfersound, NULL);
}
}
return 0;
}
Mark Michelson
committed
static int bridge_allows_optimization(struct ast_bridge *bridge)
{
return !(bridge->inhibit_merge
|| bridge->dissolved
|| ast_test_flag(&bridge->feature_flags, AST_BRIDGE_FLAG_MASQUERADE_ONLY));
}
Richard Mudgett
committed
/*!
* \internal
* \brief Lock the unreal channel stack for chan and prequalify it.
* \since 12.0.0
*
* \param chan Unreal channel writing a frame into the channel driver.
*
* \note It is assumed that chan is already locked.
*
* \retval bridge on success with bridge and bridge_channel locked.
* \retval NULL if cannot do optimization now.
*/
static struct ast_bridge *optimize_lock_chan_stack(struct ast_channel *chan)
{
struct ast_bridge *bridge;
struct ast_bridge_channel *bridge_channel;
if (!AST_LIST_EMPTY(ast_channel_readq(chan))) {
return NULL;
}
if (ast_test_flag(ast_channel_flags(chan), AST_FLAG_EMULATE_DTMF)) {
return NULL;
}
if (ast_channel_has_audio_frame_or_monitor(chan)) {
Richard Mudgett
committed
/* Channel has an active monitor, audiohook, or framehook. */
return NULL;
}
Richard Mudgett
committed
bridge_channel = ast_channel_internal_bridge_channel(chan);
if (!bridge_channel || ast_bridge_channel_trylock(bridge_channel)) {
return NULL;
}
bridge = bridge_channel->bridge;
if (bridge_channel->activity != BRIDGE_CHANNEL_THREAD_SIMPLE
|| bridge_channel->state != BRIDGE_CHANNEL_STATE_WAIT
Richard Mudgett
committed
|| ast_bridge_trylock(bridge)) {
ast_bridge_channel_unlock(bridge_channel);
return NULL;
}
if (!bridge_channel_internal_allows_optimization(bridge_channel) ||
Mark Michelson
committed
!bridge_allows_optimization(bridge)) {
Richard Mudgett
committed
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
ast_bridge_unlock(bridge);
ast_bridge_channel_unlock(bridge_channel);
return NULL;
}
return bridge;
}
/*!
* \internal
* \brief Lock the unreal channel stack for peer and prequalify it.
* \since 12.0.0
*
* \param peer Other unreal channel in the pair.
*
* \retval bridge on success with bridge, bridge_channel, and peer locked.
* \retval NULL if cannot do optimization now.
*/
static struct ast_bridge *optimize_lock_peer_stack(struct ast_channel *peer)
{
struct ast_bridge *bridge;
struct ast_bridge_channel *bridge_channel;
if (ast_channel_trylock(peer)) {
return NULL;
}
if (!AST_LIST_EMPTY(ast_channel_readq(peer))) {
ast_channel_unlock(peer);
return NULL;
}
if (ast_test_flag(ast_channel_flags(peer), AST_FLAG_EMULATE_DTMF)) {
ast_channel_unlock(peer);
return NULL;
}
if (ast_channel_has_audio_frame_or_monitor(peer)) {
Richard Mudgett
committed
/* Peer has an active monitor, audiohook, or framehook. */
ast_channel_unlock(peer);
return NULL;
}
Richard Mudgett
committed
bridge_channel = ast_channel_internal_bridge_channel(peer);
if (!bridge_channel || ast_bridge_channel_trylock(bridge_channel)) {
ast_channel_unlock(peer);
return NULL;
}
bridge = bridge_channel->bridge;
if (bridge_channel->activity != BRIDGE_CHANNEL_THREAD_IDLE
|| bridge_channel->state != BRIDGE_CHANNEL_STATE_WAIT
Richard Mudgett
committed
|| ast_bridge_trylock(bridge)) {
ast_bridge_channel_unlock(bridge_channel);
ast_channel_unlock(peer);
return NULL;
}
Mark Michelson
committed
if (!bridge_allows_optimization(bridge) ||
!bridge_channel_internal_allows_optimization(bridge_channel)) {
Richard Mudgett
committed
ast_bridge_unlock(bridge);
ast_bridge_channel_unlock(bridge_channel);
ast_channel_unlock(peer);
return NULL;
}
return bridge;
}
/*!
* \internal
Mark Michelson
committed
* \brief Indicates allowability of a swap optimization
*/
enum bridge_allow_swap {
/*! Bridges cannot allow for a swap optimization to occur */
SWAP_PROHIBITED,
/*! Bridge swap optimization can occur into the chan_bridge */
SWAP_TO_CHAN_BRIDGE,
/*! Bridge swap optimization can occur into the peer_bridge */
SWAP_TO_PEER_BRIDGE,
};
/*!
* \internal
* \brief Determine if two bridges allow for swap optimization to occur
Richard Mudgett
committed
*
Mark Michelson
committed
* \param chan_bridge First bridge being tested
* \param peer_bridge Second bridge being tested
* \return Allowability of swap optimization
Richard Mudgett
committed
*/
Mark Michelson
committed
static enum bridge_allow_swap bridges_allow_swap_optimization(struct ast_bridge *chan_bridge,
struct ast_bridge *peer_bridge)
Richard Mudgett
committed
{
int chan_priority;
Mark Michelson
committed
int peer_priority;
Richard Mudgett
committed
if (!ast_test_flag(&chan_bridge->feature_flags,
Mark Michelson
committed
AST_BRIDGE_FLAG_SWAP_INHIBIT_TO | AST_BRIDGE_FLAG_SWAP_INHIBIT_FROM |
AST_BRIDGE_FLAG_TRANSFER_BRIDGE_ONLY)
Richard Mudgett
committed
&& !ast_test_flag(&peer_bridge->feature_flags,
Mark Michelson
committed
AST_BRIDGE_FLAG_SWAP_INHIBIT_TO | AST_BRIDGE_FLAG_SWAP_INHIBIT_FROM |
AST_BRIDGE_FLAG_TRANSFER_BRIDGE_ONLY)) {
Richard Mudgett
committed
/*
* Can swap either way. Swap to the higher priority merge
* bridge.
*/
chan_priority = chan_bridge->v_table->get_merge_priority(chan_bridge);
peer_priority = peer_bridge->v_table->get_merge_priority(peer_bridge);
if (chan_bridge->num_channels == 2
&& chan_priority <= peer_priority) {
Mark Michelson
committed
return SWAP_TO_PEER_BRIDGE;
Richard Mudgett
committed
} else if (peer_bridge->num_channels == 2
&& peer_priority <= chan_priority) {
Mark Michelson
committed
return SWAP_TO_CHAN_BRIDGE;
Richard Mudgett
committed
}
} else if (chan_bridge->num_channels == 2
Mark Michelson
committed
&& !ast_test_flag(&chan_bridge->feature_flags, AST_BRIDGE_FLAG_SWAP_INHIBIT_FROM | AST_BRIDGE_FLAG_TRANSFER_BRIDGE_ONLY)
Richard Mudgett
committed
&& !ast_test_flag(&peer_bridge->feature_flags, AST_BRIDGE_FLAG_SWAP_INHIBIT_TO)) {
/* Can swap optimize only one way. */
Mark Michelson
committed
return SWAP_TO_PEER_BRIDGE;
Richard Mudgett
committed
} else if (peer_bridge->num_channels == 2
Mark Michelson
committed
&& !ast_test_flag(&peer_bridge->feature_flags, AST_BRIDGE_FLAG_SWAP_INHIBIT_FROM | AST_BRIDGE_FLAG_TRANSFER_BRIDGE_ONLY)
Richard Mudgett
committed
&& !ast_test_flag(&chan_bridge->feature_flags, AST_BRIDGE_FLAG_SWAP_INHIBIT_TO)) {
/* Can swap optimize only one way. */
Mark Michelson
committed
return SWAP_TO_CHAN_BRIDGE;
}
return SWAP_PROHIBITED;
}
/*!
* \internal
* \brief Check and attempt to swap optimize out the unreal channels.
* \since 12.0.0
*
* \param chan_bridge
* \param chan_bridge_channel
* \param peer_bridge
* \param peer_bridge_channel
Matthew Jordan
committed
* \param pvt Unreal data containing callbacks to call if the optimization actually
* happens
Mark Michelson
committed
*
* \retval 1 if unreal channels failed to optimize out.
* \retval 0 if unreal channels were not optimized out.
* \retval -1 if unreal channels were optimized out.
*/
Matthew Jordan
committed
static int try_swap_optimize_out(struct ast_bridge *chan_bridge,
Mark Michelson
committed
struct ast_bridge_channel *chan_bridge_channel, struct ast_bridge *peer_bridge,
Matthew Jordan
committed
struct ast_bridge_channel *peer_bridge_channel,
struct ast_unreal_pvt *pvt)
Mark Michelson
committed
{
struct ast_bridge *dst_bridge;
struct ast_bridge_channel *dst_bridge_channel;
struct ast_bridge_channel *src_bridge_channel;
struct ast_bridge_channel *other;
int res = 1;
switch (bridges_allow_swap_optimization(chan_bridge, peer_bridge)) {
case SWAP_TO_CHAN_BRIDGE:
Richard Mudgett
committed
dst_bridge = chan_bridge;
dst_bridge_channel = chan_bridge_channel;
src_bridge_channel = peer_bridge_channel;
Mark Michelson
committed
break;
case SWAP_TO_PEER_BRIDGE:
dst_bridge = peer_bridge;
dst_bridge_channel = peer_bridge_channel;
src_bridge_channel = chan_bridge_channel;
break;
case SWAP_PROHIBITED:
default:
return 0;
Richard Mudgett
committed
}
Mark Michelson
committed
other = ast_bridge_channel_peer(src_bridge_channel);
if (other && other->state == BRIDGE_CHANNEL_STATE_WAIT) {
unsigned int id;
if (ast_channel_trylock(other->chan)) {
return 1;
}
id = ast_atomic_fetchadd_int((int *) &optimization_id, +1);
ast_verb(4, "Move-swap optimizing %s <-- %s.\n",
Mark Michelson
committed
ast_channel_name(dst_bridge_channel->chan),
ast_channel_name(other->chan));
Richard Mudgett
committed
Matthew Jordan
committed
if (pvt && !ast_test_flag(pvt, AST_UNREAL_OPTIMIZE_BEGUN) && pvt->callbacks
&& pvt->callbacks->optimization_started) {
pvt->callbacks->optimization_started(pvt, other->chan,
dst_bridge_channel->chan == pvt->owner ? AST_UNREAL_OWNER : AST_UNREAL_CHAN,
id);
Matthew Jordan
committed
ast_set_flag(pvt, AST_UNREAL_OPTIMIZE_BEGUN);
}
Mark Michelson
committed
other->swap = dst_bridge_channel->chan;
if (!bridge_do_move(dst_bridge, other, 1, 1)) {
Richard Mudgett
committed
ast_bridge_channel_leave_bridge(src_bridge_channel,
BRIDGE_CHANNEL_STATE_END_NO_DISSOLVE, AST_CAUSE_NORMAL_CLEARING);
Mark Michelson
committed
res = -1;
}
if (pvt && pvt->callbacks && pvt->callbacks->optimization_finished) {
pvt->callbacks->optimization_finished(pvt, res == 1, id);
Richard Mudgett
committed
}
ast_channel_unlock(other->chan);
Richard Mudgett
committed
}
return res;
}
Mark Michelson
committed
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
/*!
* \internal
* \brief Indicates allowability of a merge optimization
*/
enum bridge_allow_merge {
/*! Bridge properties prohibit merge optimization */
MERGE_PROHIBITED,
/*! Merge optimization cannot occur because the source bridge has too few channels */
MERGE_NOT_ENOUGH_CHANNELS,
/*! Merge optimization cannot occur because multimix capability could not be requested */
MERGE_NO_MULTIMIX,
/*! Merge optimization allowed between bridges */
MERGE_ALLOWED,
};
/*!
* \internal
* \brief Determines allowability of a merge optimization
*
* \note The merge output parameter is undefined if MERGE_PROHIBITED is returned. For success
* and other failure returns, a merge direction was determined, and the parameter is safe to
* access.
*
* \param chan_bridge First bridge being tested
* \param peer_bridge Second bridge being tested
* \param num_kick_channels The number of channels to remove from the bridges during merging
* \param[out] merge Indicates the recommended direction for the bridge merge
*/
static enum bridge_allow_merge bridges_allow_merge_optimization(struct ast_bridge *chan_bridge,
struct ast_bridge *peer_bridge, int num_kick_channels, struct merge_direction *merge)
{
*merge = bridge_merge_determine_direction(chan_bridge, peer_bridge);
if (!merge->dest) {
return MERGE_PROHIBITED;
}
if (merge->src->num_channels < 2) {
return MERGE_NOT_ENOUGH_CHANNELS;
} else if ((2 + num_kick_channels) < merge->dest->num_channels + merge->src->num_channels
&& !(merge->dest->technology->capabilities & AST_BRIDGE_CAPABILITY_MULTIMIX)
&& (!ast_test_flag(&merge->dest->feature_flags, AST_BRIDGE_FLAG_SMART)
|| !(merge->dest->allowed_capabilities & AST_BRIDGE_CAPABILITY_MULTIMIX))) {
return MERGE_NO_MULTIMIX;
}
return MERGE_ALLOWED;
}
Richard Mudgett
committed
/*!
* \internal
* \brief Check and attempt to merge optimize out the unreal channels.
* \since 12.0.0
*
* \param chan_bridge
* \param chan_bridge_channel
* \param peer_bridge
* \param peer_bridge_channel
Matthew Jordan
committed
* \param pvt Unreal data containing callbacks to call if the optimization actually
* happens
Richard Mudgett
committed
*
* \retval 0 if unreal channels were not optimized out.
* \retval -1 if unreal channels were optimized out.
*/
Matthew Jordan
committed
static int try_merge_optimize_out(struct ast_bridge *chan_bridge,
Richard Mudgett
committed
struct ast_bridge_channel *chan_bridge_channel, struct ast_bridge *peer_bridge,
Matthew Jordan
committed
struct ast_bridge_channel *peer_bridge_channel,
struct ast_unreal_pvt *pvt)
Richard Mudgett
committed
{
struct merge_direction merge;
Mark Michelson
committed
struct ast_bridge_channel *kick_me[] = {
chan_bridge_channel,
peer_bridge_channel,
};
Richard Mudgett
committed
Mark Michelson
committed
switch (bridges_allow_merge_optimization(chan_bridge, peer_bridge, ARRAY_LEN(kick_me), &merge)) {
case MERGE_ALLOWED:
break;
case MERGE_PROHIBITED:
return 0;
case MERGE_NOT_ENOUGH_CHANNELS:
Richard Mudgett
committed
ast_debug(4, "Can't optimize %s -- %s out, not enough channels in bridge %s.\n",
ast_channel_name(chan_bridge_channel->chan),
ast_channel_name(peer_bridge_channel->chan),
merge.src->uniqueid);
Mark Michelson
committed
return 0;
case MERGE_NO_MULTIMIX:
Richard Mudgett
committed
ast_debug(4, "Can't optimize %s -- %s out, multimix is needed and it cannot be acquired.\n",
ast_channel_name(chan_bridge_channel->chan),
ast_channel_name(peer_bridge_channel->chan));
Mark Michelson
committed
return 0;
}
Richard Mudgett
committed
ast_verb(4, "Merge optimizing %s -- %s out.\n",
Mark Michelson
committed
ast_channel_name(chan_bridge_channel->chan),
ast_channel_name(peer_bridge_channel->chan));
Richard Mudgett
committed
id = ast_atomic_fetchadd_int((int *) &optimization_id, +1);
Matthew Jordan
committed
if (pvt && !ast_test_flag(pvt, AST_UNREAL_OPTIMIZE_BEGUN) && pvt->callbacks
&& pvt->callbacks->optimization_started) {
pvt->callbacks->optimization_started(pvt, NULL,
merge.dest == ast_channel_internal_bridge(pvt->owner) ? AST_UNREAL_OWNER : AST_UNREAL_CHAN,
id);
Matthew Jordan
committed
ast_set_flag(pvt, AST_UNREAL_OPTIMIZE_BEGUN);
}
bridge_do_merge(merge.dest, merge.src, kick_me, ARRAY_LEN(kick_me), 1);
Matthew Jordan
committed
if (pvt && pvt->callbacks && pvt->callbacks->optimization_finished) {
pvt->callbacks->optimization_finished(pvt, 1, id);
Matthew Jordan
committed
}
Richard Mudgett
committed
Mark Michelson
committed
return -1;
Richard Mudgett
committed
}
Matthew Jordan
committed
int ast_bridge_unreal_optimize_out(struct ast_channel *chan, struct ast_channel *peer, struct ast_unreal_pvt *pvt)
Richard Mudgett
committed
{
struct ast_bridge *chan_bridge;
struct ast_bridge *peer_bridge;
struct ast_bridge_channel *chan_bridge_channel;
struct ast_bridge_channel *peer_bridge_channel;
int res = 0;
chan_bridge = optimize_lock_chan_stack(chan);
if (!chan_bridge) {
return res;
}
chan_bridge_channel = ast_channel_internal_bridge_channel(chan);
peer_bridge = optimize_lock_peer_stack(peer);
if (peer_bridge) {
peer_bridge_channel = ast_channel_internal_bridge_channel(peer);
Matthew Jordan
committed
res = try_swap_optimize_out(chan_bridge, chan_bridge_channel,
peer_bridge, peer_bridge_channel, pvt);
Richard Mudgett
committed
if (!res) {
Matthew Jordan
committed
res = try_merge_optimize_out(chan_bridge, chan_bridge_channel,
peer_bridge, peer_bridge_channel, pvt);
Richard Mudgett
committed
} else if (0 < res) {
res = 0;
}
/* Release peer locks. */
ast_bridge_unlock(peer_bridge);
ast_bridge_channel_unlock(peer_bridge_channel);
ast_channel_unlock(peer);
}
/* Release chan locks. */
ast_bridge_unlock(chan_bridge);
ast_bridge_channel_unlock(chan_bridge_channel);
return res;
}
Mark Michelson
committed
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
enum ast_bridge_optimization ast_bridges_allow_optimization(struct ast_bridge *chan_bridge,
struct ast_bridge *peer_bridge)
{
struct merge_direction merge;
if (!bridge_allows_optimization(chan_bridge) || !bridge_allows_optimization(peer_bridge)) {
return AST_BRIDGE_OPTIMIZE_PROHIBITED;
}
switch (bridges_allow_swap_optimization(chan_bridge, peer_bridge)) {
case SWAP_TO_CHAN_BRIDGE:
return AST_BRIDGE_OPTIMIZE_SWAP_TO_CHAN_BRIDGE;
case SWAP_TO_PEER_BRIDGE:
return AST_BRIDGE_OPTIMIZE_SWAP_TO_PEER_BRIDGE;
case SWAP_PROHIBITED:
default:
break;
}
/* Two channels will be kicked from the bridges, the unreal;1 and unreal;2 channels */
if (bridges_allow_merge_optimization(chan_bridge, peer_bridge, 2, &merge) != MERGE_ALLOWED) {
return AST_BRIDGE_OPTIMIZE_PROHIBITED;