Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
From 27a076f2f6c6007c0ba41d2868a803c4d841e815 Mon Sep 17 00:00:00 2001
From: nanang <nanang@localhost>
Date: Tue, 23 Apr 2019 08:42:45 +0000
Subject: [PATCH] Fixed #2191: - Stricter double timer entry scheduling
prevention. - Integrate group lock in SIP transport, e.g: for add/dec ref,
for timer scheduling.
---
pjlib/include/pj/timer.h | 2 +-
pjlib/src/pj/timer.c | 11 +++++++-
pjsip/include/pjsip/sip_endpoint.h | 39 +++++++++++++++++++++++++++++
pjsip/include/pjsip/sip_transport.h | 2 ++
pjsip/src/pjsip/sip_endpoint.c | 36 ++++++++++++++++++++++++++
pjsip/src/pjsip/sip_transport.c | 36 +++++++++++++++++++++-----
pjsip/src/pjsip/sip_transport_tcp.c | 10 +++++---
pjsip/src/pjsip/sip_transport_tls.c | 14 ++++++++---
pjsip/src/pjsip/sip_transport_udp.c | 2 ++
9 files changed, 137 insertions(+), 15 deletions(-)
diff --git a/pjlib/include/pj/timer.h b/pjlib/include/pj/timer.h
index df6155a81..14857b872 100644
--- a/pjlib/include/pj/timer.h
+++ b/pjlib/include/pj/timer.h
@@ -252,9 +252,9 @@ PJ_DECL(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht,
*
* @param ht The timer heap.
* @param entry The entry to be registered.
+ * @param delay The interval to expire.
* @param id_val The value to be set to the "id" field of the timer entry
* once the timer is scheduled.
- * @param delay The interval to expire.
* @param grp_lock The group lock.
*
* @return PJ_SUCCESS, or the appropriate error code.
diff --git a/pjlib/src/pj/timer.c b/pjlib/src/pj/timer.c
index f0a2cbbc9..cbdd9791f 100644
--- a/pjlib/src/pj/timer.c
+++ b/pjlib/src/pj/timer.c
@@ -502,7 +502,7 @@ static pj_status_t schedule_w_grp_lock(pj_timer_heap_t *ht,
PJ_ASSERT_RETURN(entry->cb != NULL, PJ_EINVAL);
/* Prevent same entry from being scheduled more than once */
- PJ_ASSERT_RETURN(entry->_timer_id < 1, PJ_EINVALIDOP);
+ //PJ_ASSERT_RETURN(entry->_timer_id < 1, PJ_EINVALIDOP);
#if PJ_TIMER_DEBUG
entry->src_file = src_file;
@@ -512,6 +512,15 @@ static pj_status_t schedule_w_grp_lock(pj_timer_heap_t *ht,
PJ_TIME_VAL_ADD(expires, *delay);
lock_timer_heap(ht);
+
+ /* Prevent same entry from being scheduled more than once */
+ if (pj_timer_entry_running(entry)) {
+ unlock_timer_heap(ht);
+ PJ_LOG(3,(THIS_FILE, "Bug! Rescheduling outstanding entry (%p)",
+ entry));
+ return PJ_EINVALIDOP;
+ }
+
status = schedule_entry(ht, entry, &expires);
if (status == PJ_SUCCESS) {
if (set_id)
diff --git a/pjsip/include/pjsip/sip_endpoint.h b/pjsip/include/pjsip/sip_endpoint.h
index 99683fbe1..ee967f8d9 100644
--- a/pjsip/include/pjsip/sip_endpoint.h
+++ b/pjsip/include/pjsip/sip_endpoint.h
@@ -138,6 +138,7 @@ PJ_DECL(pj_status_t) pjsip_endpt_handle_events( pjsip_endpoint *endpt,
PJ_DECL(pj_status_t) pjsip_endpt_handle_events2(pjsip_endpoint *endpt,
const pj_time_val *max_timeout,
unsigned *count);
+
/**
* Schedule timer to endpoint's timer heap. Application must poll the endpoint
* periodically (by calling #pjsip_endpt_handle_events) to ensure that the
@@ -166,6 +167,44 @@ PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer( pjsip_endpoint *endpt,
const pj_time_val *delay );
#endif
+/**
+ * Schedule timer to endpoint's timer heap with group lock. Application must
+ * poll the endpoint periodically (by calling #pjsip_endpt_handle_events) to
+ * ensure that the timer events are handled in timely manner. When the
+ * timeout for the timer has elapsed, the callback specified in the entry
+ * argument will be called. This function, like all other endpoint functions,
+ * is thread safe.
+ *
+ * @param endpt The endpoint.
+ * @param entry The timer entry.
+ * @param delay The relative delay of the timer.
+ * @param id_val The value to be set to the "id" field of the timer entry
+ * once the timer is scheduled.
+ * @param grp_lock The group lock.
+ * @return PJ_OK (zero) if successfull.
+ */
+#if PJ_TIMER_DEBUG
+#define pjsip_endpt_schedule_timer_w_grp_lock(ept,ent,d,id,gl) \
+ pjsip_endpt_schedule_timer_w_grp_lock_dbg(ept,ent,d,id,gl,\
+ __FILE__, __LINE__)
+
+PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock_dbg(
+ pjsip_endpoint *endpt,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock,
+ const char *src_file,
+ int src_line);
+#else
+PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock(
+ pjsip_endpoint *endpt,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock );
+#endif
+
/**
* Cancel the previously registered timer.
* This function, like all other endpoint functions, is thread safe.
diff --git a/pjsip/include/pjsip/sip_transport.h b/pjsip/include/pjsip/sip_transport.h
index addc8d521..d1ff3618b 100644
--- a/pjsip/include/pjsip/sip_transport.h
+++ b/pjsip/include/pjsip/sip_transport.h
@@ -810,6 +810,8 @@ struct pjsip_transport
pj_pool_t *pool; /**< Pool used by transport. */
pj_atomic_t *ref_cnt; /**< Reference counter. */
pj_lock_t *lock; /**< Lock object. */
+ pj_grp_lock_t *grp_lock; /**< Group lock for sync with
+ ioqueue and timer. */
pj_bool_t tracing; /**< Tracing enabled? */
pj_bool_t is_shutdown; /**< Being shutdown? */
pj_bool_t is_destroying; /**< Destroy in progress? */
diff --git a/pjsip/src/pjsip/sip_endpoint.c b/pjsip/src/pjsip/sip_endpoint.c
index d810781d5..71bc761c2 100644
--- a/pjsip/src/pjsip/sip_endpoint.c
+++ b/pjsip/src/pjsip/sip_endpoint.c
@@ -802,6 +802,42 @@ PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer( pjsip_endpoint *endpt,
}
#endif
+/*
+ * Schedule timer with group lock.
+ */
+#if PJ_TIMER_DEBUG
+PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock_dbg(
+ pjsip_endpoint *endpt,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock,
+ const char *src_file,
+ int src_line)
+{
+ PJ_LOG(6, (THIS_FILE, "pjsip_endpt_schedule_timer_w_grp_lock"
+ "(entry=%p, delay=%u.%u, grp_lock=%p)",
+ entry, delay->sec, delay->msec, grp_lock));
+ return pj_timer_heap_schedule_w_grp_lock_dbg(endpt->timer_heap, entry,
+ delay, id_val, grp_lock,
+ src_file, src_line);
+}
+#else
+PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock(
+ pjsip_endpoint *endpt,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock )
+{
+ PJ_LOG(6, (THIS_FILE, "pjsip_endpt_schedule_timer_w_grp_lock"
+ "(entry=%p, delay=%u.%u, grp_lock=%p)",
+ entry, delay->sec, delay->msec, grp_lock));
+ return pj_timer_heap_schedule_w_grp_lock( endpt->timer_heap, entry,
+ delay, id_val, grp_lock );
+}
+#endif
+
/*
* Cancel the previously registered timer.
*/
diff --git a/pjsip/src/pjsip/sip_transport.c b/pjsip/src/pjsip/sip_transport.c
index 67e235a39..529604399 100644
--- a/pjsip/src/pjsip/sip_transport.c
+++ b/pjsip/src/pjsip/sip_transport.c
@@ -1012,6 +1012,9 @@ static void transport_idle_callback(pj_timer_heap_t *timer_heap,
PJ_UNUSED_ARG(timer_heap);
+ if (entry->id == PJ_FALSE)
+ return;
+
entry->id = PJ_FALSE;
pjsip_transport_destroy(tp);
}
@@ -1049,6 +1052,10 @@ PJ_DEF(pj_status_t) pjsip_transport_add_ref( pjsip_transport *tp )
PJ_ASSERT_RETURN(tp != NULL, PJ_EINVAL);
+ /* Add ref transport group lock, if any */
+ if (tp->grp_lock)
+ pj_grp_lock_add_ref(tp->grp_lock);
+
/* Cache some vars for checking transport validity later */
tpmgr = tp->tpmgr;
key_len = sizeof(tp->key.type) + tp->addr_len;
@@ -1063,8 +1070,8 @@ PJ_DEF(pj_status_t) pjsip_transport_add_ref( pjsip_transport *tp )
pj_atomic_get(tp->ref_cnt) == 1)
{
if (tp->idle_timer.id != PJ_FALSE) {
- pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer);
tp->idle_timer.id = PJ_FALSE;
+ pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer);
}
}
pj_lock_release(tpmgr->lock);
@@ -1114,14 +1121,23 @@ PJ_DEF(pj_status_t) pjsip_transport_dec_ref( pjsip_transport *tp )
delay.msec = 0;
}
- pj_assert(tp->idle_timer.id == 0);
- tp->idle_timer.id = PJ_TRUE;
- pjsip_endpt_schedule_timer(tp->tpmgr->endpt, &tp->idle_timer,
- &delay);
+ /* Avoid double timer entry scheduling */
+ if (pj_timer_entry_running(&tp->idle_timer))
+ pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer);
+
+ pjsip_endpt_schedule_timer_w_grp_lock(tp->tpmgr->endpt,
+ &tp->idle_timer,
+ &delay,
+ PJ_TRUE,
+ tp->grp_lock);
}
pj_lock_release(tpmgr->lock);
}
+ /* Dec ref transport group lock, if any */
+ if (tp->grp_lock)
+ pj_grp_lock_dec_ref(tp->grp_lock);
+
return PJ_SUCCESS;
}
@@ -1168,6 +1184,10 @@ PJ_DEF(pj_status_t) pjsip_transport_register( pjsip_tpmgr *mgr,
/* Register new entry */
pj_hash_set(tp->pool, mgr->table, &tp->key, key_len, hval, tp);
+ /* Add ref transport group lock, if any */
+ if (tp->grp_lock)
+ pj_grp_lock_add_ref(tp->grp_lock);
+
pj_lock_release(mgr->lock);
TRACE_((THIS_FILE,"Transport %s registered: type=%s, remote=%s:%d",
@@ -1199,8 +1219,8 @@ static pj_status_t destroy_transport( pjsip_tpmgr *mgr,
*/
//pj_assert(tp->idle_timer.id == PJ_FALSE);
if (tp->idle_timer.id != PJ_FALSE) {
- pjsip_endpt_cancel_timer(mgr->endpt, &tp->idle_timer);
tp->idle_timer.id = PJ_FALSE;
+ pjsip_endpt_cancel_timer(mgr->endpt, &tp->idle_timer);
}
/*
@@ -1226,6 +1246,10 @@ static pj_status_t destroy_transport( pjsip_tpmgr *mgr,
pj_lock_release(mgr->lock);
pj_lock_release(tp->lock);
+ /* Dec ref transport group lock, if any */
+ if (tp->grp_lock)
+ pj_grp_lock_dec_ref(tp->grp_lock);
+
/* Destroy. */
return tp->destroy(tp);
}
diff --git a/pjsip/src/pjsip/sip_transport_tcp.c b/pjsip/src/pjsip/sip_transport_tcp.c
index fe327459e..374bf461b 100644
--- a/pjsip/src/pjsip/sip_transport_tcp.c
+++ b/pjsip/src/pjsip/sip_transport_tcp.c
@@ -692,6 +692,8 @@ static pj_status_t tcp_create( struct tcp_listener *listener,
pj_grp_lock_add_ref(tcp->grp_lock);
pj_grp_lock_add_handler(tcp->grp_lock, pool, tcp, &tcp_on_destroy);
+ tcp->base.grp_lock = tcp->grp_lock;
+
/* Create active socket */
pj_activesock_cfg_default(&asock_cfg);
asock_cfg.async_cnt = 1;
@@ -746,7 +748,11 @@ static pj_status_t tcp_create( struct tcp_listener *listener,
return PJ_SUCCESS;
on_error:
- tcp_destroy(&tcp->base, status);
+ if (tcp->grp_lock && pj_grp_lock_get_ref(tcp->grp_lock))
+ tcp_destroy(&tcp->base, status);
+ else
+ tcp_on_destroy(tcp);
+
return status;
}
@@ -867,8 +873,6 @@ static pj_status_t tcp_destroy(pjsip_transport *transport,
tcp->grp_lock = NULL;
pj_grp_lock_dec_ref(grp_lock);
/* Transport may have been deleted at this point */
- } else {
- tcp_on_destroy(tcp);
}
return PJ_SUCCESS;
diff --git a/pjsip/src/pjsip/sip_transport_tls.c b/pjsip/src/pjsip/sip_transport_tls.c
index d3afae5e9..dd3a4d639 100644
--- a/pjsip/src/pjsip/sip_transport_tls.c
+++ b/pjsip/src/pjsip/sip_transport_tls.c
@@ -165,6 +165,10 @@ static pj_status_t tls_create(struct tls_listener *listener,
struct tls_transport **p_tls);
+/* Clean up TLS resources */
+static void tls_on_destroy(void *arg);
+
+
static void tls_perror(const char *sender, const char *title,
pj_status_t status)
{
@@ -893,7 +897,11 @@ static pj_status_t tls_create( struct tls_listener *listener,
return PJ_SUCCESS;
on_error:
- tls_destroy(&tls->base, status);
+ if (tls->grp_lock && pj_grp_lock_get_ref(tls->grp_lock))
+ tls_destroy(&tls->base, status);
+ else
+ tls_on_destroy(tls);
+
return status;
}
@@ -1048,8 +1056,6 @@ static pj_status_t tls_destroy(pjsip_transport *transport,
tls->grp_lock = NULL;
pj_grp_lock_dec_ref(grp_lock);
/* Transport may have been deleted at this point */
- } else {
- tls_on_destroy(tls);
}
return PJ_SUCCESS;
@@ -1235,7 +1241,7 @@ static pj_status_t lis_create_transport(pjsip_tpfactory *factory,
pj_ssl_sock_set_user_data(tls->ssock, tls);
/* Set up the group lock */
- tls->grp_lock = glock;
+ tls->grp_lock = tls->base.grp_lock = glock;
pj_grp_lock_add_ref(tls->grp_lock);
pj_grp_lock_add_handler(tls->grp_lock, pool, tls, &tls_on_destroy);
diff --git a/pjsip/src/pjsip/sip_transport_udp.c b/pjsip/src/pjsip/sip_transport_udp.c
index dbda474cf..b82d519c9 100644
--- a/pjsip/src/pjsip/sip_transport_udp.c
+++ b/pjsip/src/pjsip/sip_transport_udp.c
@@ -691,6 +691,8 @@ static pj_status_t register_to_ioqueue(struct udp_transport *tp)
pj_grp_lock_add_ref(tp->grp_lock);
pj_grp_lock_add_handler(tp->grp_lock, tp->base.pool, tp,
&udp_on_destroy);
+
+ tp->base.grp_lock = tp->grp_lock;
}
/* Register to ioqueue. */
--
2.20.1