Newer
Older
lwsl_info("%s: ctx %p\n", __func__, context);
lws_context_lock(context, "context destroy 2"); /* ------ context { */
context->being_destroyed2 = 1;
if (context->pt[0].fds)
lws_free_set_NULL(context->pt[0].fds);
/*
* free all the per-vhost allocations
*/
vh = context->vhost_list;
while (vh) {
vh1 = vh->vhost_next;
vh = vh1;
}
/* remove ourselves from the pending destruction list */
while (context->vhost_pending_destruction_list)
/* removes itself from list */
__lws_vhost_destroy2(context->vhost_pending_destruction_list);
lws_stats_log_dump(context);
lws_ssl_context_destroy(context);
lws_plat_context_late_destroy(context);
#if defined(LWS_WITH_PEER_LIMITS)
for (nu = 0; nu < context->pl_hash_elements; nu++) {
lws_start_foreach_llp(struct lws_peer **, peer,
struct lws_peer *df = *peer;
*peer = df->next;
lws_free(df);
continue;
} lws_end_foreach_llp(peer, next);
}
lws_free(context->pl_hash_table);
#endif
if (context->external_baggage_free_on_destroy)
free(context->external_baggage_free_on_destroy);
lws_check_deferred_free(context, 0, 1);
#if LWS_MAX_SMP > 1
#endif
if (context->event_loop_ops->destroy_context2)
if (context->event_loop_ops->destroy_context2(context)) {
context->finalize_destroy_after_internal_loops_stopped = 1;
return;
}
if (!context->pt[0].event_loop_foreign)
for (n = 0; n < context->count_threads; n++)
if (context->pt[n].inside_service)
return;
lws_context_unlock(context); /* } context ------------------- */
lws_context_destroy3(context);
}
/*
* Begin the context takedown
*/
lws_context_destroy(struct lws_context *context)
volatile struct lws_foreign_thread_pollfd *ftp, *next;
volatile struct lws_context_per_thread *vpt;
struct lws_context_per_thread *pt;
int n, m;
if (!context)
if (context->finalize_destroy_after_internal_loops_stopped) {
if (context->event_loop_ops->destroy_context2)
context->event_loop_ops->destroy_context2(context);
lws_context_destroy3(context);
return;
}
if (!context->being_destroyed2) {
lws_context_destroy2(context);
return;
}
lwsl_info("%s: ctx %p: already being destroyed\n",
lws_context_destroy3(context);
lwsl_info("%s: ctx %p\n", __func__, context);
m = context->count_threads;
context->being_destroyed = 1;
context->requested_kill = 1;
memset(&wsi, 0, sizeof(wsi));
wsi.context = context;
#ifdef LWS_LATENCY
if (context->worst_latency_info[0])
lwsl_notice("Worst latency: %s\n", context->worst_latency_info);
#endif
vpt = (volatile struct lws_context_per_thread *)pt;
ftp = vpt->foreign_pfd_list;
while (ftp) {
next = ftp->next;
lws_free((void *)ftp);
ftp = next;
}
vpt->foreign_pfd_list = NULL;
for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
if (wsi->event_pipe)
lws_destroy_event_pipe(wsi);
else
lws_close_free_wsi(wsi,
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
"ctx destroy"
/* no protocol close */);
lws_pt_mutex_destroy(pt);
/*
* inform all the protocols that they are done and will have no more
* callbacks.
*
* We can't free things until after the event loop shuts down.
if (context->protocol_init_done)
vh = context->vhost_list;
struct lws_vhost *vhn = vh->vhost_next;
lws_plat_context_early_destroy(context);
/*
* We face two different needs depending if foreign loop or not.
*
* 1) If foreign loop, we really want to advance the destroy_context()
* past here, and block only for libuv-style async close completion.
*
* 2a) If poll, and we exited by ourselves and are calling a final
* destroy_context() outside of any service already, we want to
* advance all the way in one step.
*
* 2b) If poll, and we are reacting to a SIGINT, service thread(s) may
* be in poll wait or servicing. We can't advance the
* destroy_context() to the point it's freeing things; we have to
* leave that for the final destroy_context() after the service
* thread(s) are finished calling for service.
*/
if (context->event_loop_ops->destroy_context1) {
context->event_loop_ops->destroy_context1(context);
lws_context_destroy2(context);