From 025ea9914b4f367fa9c463054475e408f1f1c0ab Mon Sep 17 00:00:00 2001
From: Yalu Zhang <yalu.zhang@iopsys.eu>
Date: Thu, 27 Jun 2024 10:21:22 +0000
Subject: [PATCH] Fix a crash when an internal call between two DECT handsets
 is terminated

Use shared ubus context from the pool instead of creating a new one in endpt_get_rtp_stats().

- Do the same for endpt_signal()
- Improve the function audio_packet_handler()
- Update some log levels
---
 src/channels/chan_voicemngr.c | 150 ++++++++++++++--------------------
 1 file changed, 63 insertions(+), 87 deletions(-)

diff --git a/src/channels/chan_voicemngr.c b/src/channels/chan_voicemngr.c
index 188067f..9f20edd 100644
--- a/src/channels/chan_voicemngr.c
+++ b/src/channels/chan_voicemngr.c
@@ -687,29 +687,20 @@ static void endpt_signal(int line, char *signal, char *state, char *data) {
 static int chan_voicemngr_send_ubus_event(char *ev_name, int line)
 {
 	struct blob_buf blob;
-	struct ubus_context *ubusctx;
 	int res = 0;
 
-	ubusctx = ubus_connect(NULL);
-	if (!ubusctx) {
-		return -1;
-	}
-
 	memset(&blob, 0, sizeof(blob));
 	if(blob_buf_init(&blob, 0)) {
-		ast_ubus_free_context(ubusctx);
 		return -1;
 	}
 
 	blobmsg_add_u32(&blob, "id", line);
 	blobmsg_add_string(&blob, "event", ev_name);
-
-	if (ubus_send_event(ubusctx, broadcast_path, blob.head) != UBUS_STATUS_OK) {
+	if (ubus_send_event(get_shared_context(__FUNCTION__), broadcast_path, blob.head) != UBUS_STATUS_OK) {
 		ast_log(LOG_NOTICE,"Error sending ubus message %s\n", ev_name);
 		res = -1;
 	}
 
-	ast_ubus_free_context(ubusctx);
 	blob_buf_free(&blob);
 
 	return res;
@@ -1754,7 +1745,7 @@ static int chan_voicemngr_classify_rtp_packet(int payload_type) {
 		if (voicemngr_codecs[i].rtp_payload_type == payload_type)
 			return CHAN_VOICEMNGR_AUDIO;
 
-	ast_log(LOG_WARNING, "Unknown RTP payload_type %d\n", payload_type);
+	ast_debug(3, "Unknown RTP payload_type %d\n", payload_type);
 	return CHAN_VOICEMNGR_UNKNOWN;
 }
 
@@ -1764,7 +1755,7 @@ static int map_ast_codec_id_to_rtp(const struct ast_format *astcodec)
 		if (ast_format_cmp(astcodec, *voicemngr_codecs[i].ast_format) == AST_FORMAT_CMP_EQUAL)
 			return voicemngr_codecs[i].rtp_payload_type;
 
-	ast_log(LOG_WARNING, "Unknown asterisk format(%s), return PCMA\n", ast_format_get_name(astcodec));
+	ast_debug(3, "Unknown asterisk format(%s), return PCMA\n", ast_format_get_name(astcodec));
 	return RTP_PT_PCMA;
 }
 
@@ -3257,16 +3248,22 @@ static struct ast_format *map_rtpname_to_format(char* name) {
 	return ast_format_ulaw;
 }
 
-/* Handle audio packets from voicemngr. */
-static void audio_packet_handler(pe_packet_t *p) {
+/* Handle audio packets from voicemngr */
+static void audio_packet_handler(pe_packet_t *p)
+{
 	struct chan_voicemngr_subchannel *sub;
-	int packet_type  = CHAN_VOICEMNGR_UNKNOWN, drop_frame = 0;
+	int packet_type = CHAN_VOICEMNGR_UNKNOWN, drop_frame = 0;
 	audio_packet_t *ap = (audio_packet_t *)p->data;
 	uint8_t payload_type = ap->rtp[1];
 	struct ast_frame frame = { .src = "TELCHAN", };
 	struct chan_voicemngr_pvt *pvt;
 	int sip_client_id = -1;
-	unsigned int*   packet_buf32 = (unsigned int*)ap->rtp;
+	unsigned int *packet_buf32 = (unsigned int*)ap->rtp;
+
+	if (!(ap->rtp_size && (ap->rtp[0] & 0x80))) {
+		ast_debug(3, "Invalid packet size or RTP version\n");
+		return;
+	}
 
 	// Clear the RTP marker bit
 	if ((payload_type & RTP_MARKER_BIT) && (payload_type < RTCP_SR || payload_type > RTCP_XR))
@@ -3279,77 +3276,66 @@ static void audio_packet_handler(pe_packet_t *p) {
 		endpt_connection(ap->line, ap->connection_id, "destroy"); // Request line close
 		return;
 	}
+
 	if (payload_type == sub->dtmf_pt) {
 		packet_type = CHAN_VOICEMNGR_AUDIO;
 	} else {
 		packet_type = chan_voicemngr_classify_rtp_packet(payload_type);
 	}
-	//pvt_lock(sub->parent, "chan_voicemngr monitor packets");
-	//ast_mutex_lock(&sub->parent->lock);
-	struct ast_channel *owner = NULL;
-	if (sub->owner) {
-		ast_channel_ref(sub->owner);
-		owner = sub->owner;
-	}
 
-	// We seem to get packets from DSP even if connection is muted (perhaps muting only affects packet callback).
-	// Drop packets if subchannel is on hold. Handle rtp packet according to classification.
-	if (sub->channel_state != ONHOLD && packet_type == CHAN_VOICEMNGR_AUDIO && (ap->rtp[0] & 0x80) && ap->rtp_size) {
-		frame.frametype = AST_FRAME_VOICE;
-		frame.offset = 0;
-		frame.data.ptr = ap->rtp + 12;
-		frame.datalen = ap->rtp_size - 12;
-		if (payload_type == RTP_PT_CN) {
-			frame.frametype = AST_FRAME_CNG;
-			frame.subclass.integer = ap->rtp[12];
-		} else if (sub->channel_state == OFFHOOK
-		 || sub->channel_state == DIALING){
-			drop_frame=1;
-		} else if(payload_type == sub->dtmf_pt) {
-			frame.frametype = AST_FRAME_DTMF_BYPASS;
-			frame.subclass.integer = ap->rtp[1]; //payload_type
-			ast_debug(3, "DTMF rtp_event for digit: %d, with payload_type: %d \n", ap->rtp[12], payload_type);
-			if (sub->conference_initiator == 1) {
-				drop_frame=1;
-			}
-		} else {
-			struct ast_format *format = map_rtptype_to_format(payload_type);
-			if (format) {
-				frame.subclass.format = format;
-				frame.samples = ast_codec_samples_count(&frame);
+	enum chan_voicemngr_channel_state sub_state = sub->channel_state;
+	enum ast_channel_state owner_state = sub->owner ? ast_channel_state(sub->owner) : AST_STATE_DOWN;
+	if ((sub_state == INCALL || sub_state == CALLWAITING || sub_state == TRANSFERING) &&
+		(owner_state == AST_STATE_UP || owner_state == AST_STATE_RING)) {
+		if (packet_type == CHAN_VOICEMNGR_AUDIO) {
+			frame.frametype = AST_FRAME_VOICE;
+			frame.offset = 0;
+			frame.data.ptr = ap->rtp + 12;
+			frame.datalen = ap->rtp_size - 12;
+			if (payload_type == RTP_PT_CN) {
+				frame.frametype = AST_FRAME_CNG;
+				frame.subclass.integer = ap->rtp[12];
+			} else if(payload_type == sub->dtmf_pt) {
+				frame.frametype = AST_FRAME_DTMF_BYPASS;
+				frame.subclass.integer = ap->rtp[1]; //payload_type
+				ast_debug(3, "DTMF rtp_event for digit: %d, with payload_type: %d \n", ap->rtp[12], payload_type);
+				if (sub->conference_initiator == 1) {
+					drop_frame = 1;
+				}
+			} else {
+				struct ast_format *format = map_rtptype_to_format(payload_type);
+				if (format) {
+					frame.subclass.format = format;
+					frame.samples = ast_codec_samples_count(&frame);
+				}
 			}
-		}
 
-		sip_client_id = chan_voicemngr_get_sip_client_id(sub);
-		if (sip_client_id >= 0 && sip_client_id < MAX_SIP_CLIENTS) {
-			line_stats[sip_client_id].txpkts++;
-			line_stats[sip_client_id].txbytes += frame.datalen;
+			sip_client_id = chan_voicemngr_get_sip_client_id(sub);
+			if (sip_client_id >= 0 && sip_client_id < MAX_SIP_CLIENTS) {
+				line_stats[sip_client_id].txpkts++;
+				line_stats[sip_client_id].txbytes += frame.datalen;
+			} else {
+				ast_debug(9, "Wrong sip client id: %d\n", sip_client_id);
+			}
+			// write header values into frame so asterisk uses the same RTP header as DSP
+			frame.seqno = (ntohl(packet_buf32[0]) & 0xffff);
+			frame.ts =  ntohl(packet_buf32[1]);
+			frame.ssrc = ntohl(packet_buf32[2]);
+		} else if (packet_type == CHAN_VOICEMNGR_RTCP_SR || packet_type == CHAN_VOICEMNGR_RTCP_RR) {
+			frame.frametype = AST_FRAME_RTCP;
+			frame.data.ptr = ap->rtp;
+			frame.datalen = ap->rtp_size;
+			frame.subclass.integer = (packet_type == CHAN_VOICEMNGR_RTCP_SR ? RTCP_SR : RTCP_RR);
+			chan_voicemngr_process_outgoing_rtcp_packet(sub, ap->rtp, ap->rtp_size);
 		} else {
-			ast_debug(9, "Wrong sip client id: %d\n", sip_client_id);
-		}
-		// write header values into frame so asterisk uses the same RTP header as DSP
-		frame.seqno = (ntohl(packet_buf32[0]) & 0xffff);
-		frame.ts =  ntohl(packet_buf32[1]);
-		frame.ssrc = ntohl(packet_buf32[2]);
-	} else if (packet_type == CHAN_VOICEMNGR_RTCP_SR || packet_type == CHAN_VOICEMNGR_RTCP_RR) {
-		frame.frametype = AST_FRAME_RTCP;
-		frame.data.ptr = ap->rtp;
-		frame.datalen = ap->rtp_size;
-		frame.subclass.integer = (packet_type == CHAN_VOICEMNGR_RTCP_SR ? RTCP_SR : RTCP_RR);
-		chan_voicemngr_process_outgoing_rtcp_packet(sub, ap->rtp, ap->rtp_size);
-	} else {
-		//ast_debug(5, "Dropping RTP frame of type %d.\n", packet_type);
-		drop_frame=1;
-		//pvt_unlock(sub->parent);
-	}
-	//ast_mutex_unlock(&sub->parent->lock);
-	//pvt_unlock(sub->parent);
+			ast_debug(5, "Drop RTP frame of type %d.\n", packet_type);
+		}
 
-	if (owner) {
-		if (!drop_frame && (ast_channel_state(owner) == AST_STATE_UP || ast_channel_state(owner) == AST_STATE_RING)) {
-			ast_queue_frame(owner, &frame);
+		if (packet_type != CHAN_VOICEMNGR_UNKNOWN && !drop_frame && sub->owner) {
+			ast_channel_ref(sub->owner);
+			ast_queue_frame(sub->owner, &frame);
+			ast_channel_unref(sub->owner);
 		}
-		ast_channel_unref(owner);
 	}
 }
 
@@ -5038,7 +5024,6 @@ static void ubus_call_answer_rtp_stats(struct ubus_request *req, int type, struc
 
 static int endpt_get_rtp_stats(struct chan_voicemngr_subchannel *sub) {
 	int line = sub->parent->line_id;
-	struct ubus_context *local_ctx;
 	struct blob_buf bb;
 	int ret;
 
@@ -5053,12 +5038,6 @@ static int endpt_get_rtp_stats(struct chan_voicemngr_subchannel *sub) {
 		return -1;
 	}
 
-	local_ctx = ubus_connect(NULL);
-	if (!local_ctx) {
-		ast_log(LOG_ERROR, "%s(): ubus_connect() failed\n", __func__);
-		return -1;
-	}
-
 	memset(&bb, 0, sizeof(bb));
 	if(blob_buf_init(&bb, 0)) {
 		return -1;
@@ -5066,12 +5045,9 @@ static int endpt_get_rtp_stats(struct chan_voicemngr_subchannel *sub) {
 
 	blobmsg_add_u32(&bb, "line", line);
 	blobmsg_add_u8(&bb, "reset", 1); // always reset RTP stats after get them
-
 	ast_log(LOG_DEBUG, "thread %d: ubus call endpt rtp_stats \"{'line':%d,'reset':true}\"", ast_get_tid(), line);
-	ret = ubus_invoke(local_ctx, endpt_id, "rtp_stats", bb.head, ubus_call_answer_rtp_stats, sub, 500);
-
+	ret = ubus_invoke(get_shared_context(__FUNCTION__), endpt_id, "rtp_stats", bb.head, ubus_call_answer_rtp_stats, sub, 500);
 	blob_buf_free(&bb);
-	ast_ubus_free_context(local_ctx);
 
 	if (ret != UBUS_STATUS_OK) {
 		ast_log(LOG_DEBUG, "ubus_invoke for rtp_stats failed with return value %d\n", ret);
-- 
GitLab