Newer
Older
if (!lws_check_opt(info->options, LWS_SERVER_OPTION_DISABLE_IPV6))
lwsl_info("IPV6 compiled in and enabled\n");
lwsl_info("IPV6 compiled in but disabled\n");
lwsl_info("IPV6 not compiled in\n");
lwsl_info(" LWS_DEF_HEADER_LEN : %u\n", LWS_DEF_HEADER_LEN);
lwsl_info(" LWS_MAX_PROTOCOLS : %u\n", LWS_MAX_PROTOCOLS);
lwsl_info(" LWS_MAX_SMP : %u\n", LWS_MAX_SMP);
lwsl_info(" sizeof (*info) : %ld\n", (long)sizeof(*info));
lwsl_info(" LWS_WITH_STATS : on\n");
lwsl_info(" SYSTEM_RANDOM_FILEPATH: '%s'\n", SYSTEM_RANDOM_FILEPATH);
#if defined(LWS_WITH_HTTP2)
lwsl_info(" HTTP2 support : available\n");
#else
if (lws_plat_context_early_init())
return NULL;
context = lws_zalloc(sizeof(struct lws_context), "context");
if (!context) {
lwsl_err("No memory for websocket context\n");
return NULL;
}
#if defined(LWS_WITH_TLS)
#if defined(LWS_WITH_MBEDTLS)
context->tls_ops = &tls_ops_mbedtls;
#else
context->tls_ops = &tls_ops_openssl;
#endif
#endif
if (info->pt_serv_buf_size)
context->pt_serv_buf_size = info->pt_serv_buf_size;
else
context->pt_serv_buf_size = 4096;
#if defined(LWS_ROLE_H2)
role_ops_h2.init_context(context, info);
#if LWS_MAX_SMP > 1
pthread_mutex_init(&context->lock, NULL);
#endif
#if defined(LWS_WITH_ESP32)
context->last_free_heap = esp_get_free_heap_size();
#endif
Andy Green
committed
/* default to just the platform fops implementation */
context->fops_platform.LWS_FOP_OPEN = _lws_plat_file_open;
context->fops_platform.LWS_FOP_CLOSE = _lws_plat_file_close;
context->fops_platform.LWS_FOP_SEEK_CUR = _lws_plat_file_seek_cur;
context->fops_platform.LWS_FOP_READ = _lws_plat_file_read;
context->fops_platform.LWS_FOP_WRITE = _lws_plat_file_write;
Andy Green
committed
/*
* arrange a linear linked-list of fops starting from context->fops
*
* platform fops
* [ -> fops_zip (copied into context so .next settable) ]
* [ -> info->fops ]
*/
context->fops = &context->fops_platform;
prev = (struct lws_plat_file_ops *)context->fops;
Andy Green
committed
#if defined(LWS_WITH_ZIP_FOPS)
/* make a soft copy so we can set .next */
context->fops_zip = fops_zip;
prev->next = &context->fops_zip;
prev = (struct lws_plat_file_ops *)prev->next;
#endif
/* if user provided fops, tack them on the end of the list */
Andy Green
committed
if (info->fops)
Andy Green
committed
context->reject_service_keywords = info->reject_service_keywords;
if (info->external_baggage_free_on_destroy)
context->external_baggage_free_on_destroy =
info->external_baggage_free_on_destroy;
context->pcontext_finalize = info->pcontext;
context->simultaneous_ssl_restriction =
info->simultaneous_ssl_restriction;
if (pid_daemon) {
context->started_with_parent = pid_daemon;
lwsl_info(" Started with daemon pid %d\n", pid_daemon);
#if defined(__ANDROID__)
n = getrlimit ( RLIMIT_NOFILE,&rt);
if (-1 == n) {
lwsl_err("Get RLIMIT_NOFILE failed!\n");
return NULL;
}
context->max_fds = rt.rlim_cur;
#else
context->max_fds = getdtablesize();
#endif
if (info->count_threads)
context->count_threads = info->count_threads;
else
context->count_threads = 1;
if (context->count_threads > LWS_MAX_SMP)
context->count_threads = LWS_MAX_SMP;
context->token_limits = info->token_limits;
context->options = info->options;
/*
* set the context event loops ops struct
*
* after this, all event_loop actions use the generic ops
*/
#if defined(LWS_WITH_POLL)
context->event_loop_ops = &event_loop_ops_poll;
#endif
if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
#if defined(LWS_WITH_LIBUV)
context->event_loop_ops = &event_loop_ops_uv;
#else
goto fail_event_libs;
if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBEV))
#if defined(LWS_WITH_LIBEV)
context->event_loop_ops = &event_loop_ops_ev;
#else
goto fail_event_libs;
if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBEVENT))
#if defined(LWS_WITH_LIBEVENT)
context->event_loop_ops = &event_loop_ops_event;
#else
goto fail_event_libs;
if (!context->event_loop_ops)
goto fail_event_libs;
lwsl_info("Using event loop: %s\n", context->event_loop_ops->name);
#if defined(LWS_WITH_TLS)
if (info->alpn)
context->tls.alpn_default = info->alpn;
char *p = context->tls.alpn_discovered, first = 1;
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
if (ar->alpn) {
if (!first)
*p++ = ',';
p += lws_snprintf(p,
context->tls.alpn_discovered +
sizeof(context->tls.alpn_discovered) -
2 - p, "%s", ar->alpn);
context->tls.alpn_default = context->tls.alpn_discovered;
lwsl_info("Default ALPN advertisment: %s\n", context->tls.alpn_default);
if (info->timeout_secs)
context->timeout_secs = info->timeout_secs;
else
context->timeout_secs = AWAITING_TIMEOUT;
context->ws_ping_pong_interval = info->ws_ping_pong_interval;
lwsl_info(" default timeout (secs): %u\n", context->timeout_secs);
if (info->max_http_header_data)
context->max_http_header_data = info->max_http_header_data;
else
if (info->max_http_header_data2)
context->max_http_header_data =
info->max_http_header_data2;
else
context->max_http_header_data = LWS_DEF_HEADER_LEN;
if (info->max_http_header_pool)
context->max_http_header_pool = info->max_http_header_pool;
else
context->max_http_header_pool = context->max_fds;
if (info->fd_limit_per_thread)
context->fd_limit_per_thread = info->fd_limit_per_thread;
else
context->fd_limit_per_thread = context->max_fds /
context->count_threads;
/*
* Allocate the per-thread storage for scratchpad buffers,
* and header data pool
*/
for (n = 0; n < context->count_threads; n++) {
context->pt[n].serv_buf = lws_malloc(context->pt_serv_buf_size,
"pt_serv_buf");
if (!context->pt[n].serv_buf) {
lwsl_err("OOM\n");
return NULL;
}
context->pt[n].context = context;
context->pt[n].tid = n;
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
Andy Green
committed
context->pt[n].http.ah_list = NULL;
context->pt[n].http.ah_pool_length = 0;
lwsl_info(" Threads: %d each %d fds\n", context->count_threads,
if (!info->ka_interval && info->ka_time > 0) {
lwsl_err("info->ka_interval can't be 0 if ka_time used\n");
return NULL;
}
#if defined(LWS_WITH_PEER_LIMITS)
/* scale the peer hash table according to the max fds for the process,
* so that the max list depth averages 16. Eg, 1024 fd -> 64,
* 102400 fd -> 6400
*/
context->pl_hash_elements =
(context->count_threads * context->fd_limit_per_thread) / 16;
context->pl_hash_table = lws_zalloc(sizeof(struct lws_peer *) *
context->pl_hash_elements, "peer limits hash table");
context->ip_limit_ah = info->ip_limit_ah;
context->ip_limit_wsi = info->ip_limit_wsi;
#endif
(long)sizeof(struct lws_context) +
(context->count_threads * context->pt_serv_buf_size),
(long)sizeof(struct lws_context),
(long)context->count_threads,
context->pt_serv_buf_size);
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
lwsl_info(" mem: http hdr rsvd: %5lu B (%u thr x (%u + %lu) x %u))\n",
(long)(context->max_http_header_data +
context->max_http_header_pool * context->count_threads,
context->count_threads,
(long)sizeof(struct allocated_headers),
n = sizeof(struct lws_pollfd) * context->count_threads *
context->fd_limit_per_thread;
lwsl_err("OOM allocating %d fds\n", context->max_fds);
if (info->server_string) {
context->server_string = info->server_string;
context->server_string_len = (short)
strlen(context->server_string);
/* each thread serves his own chunk of fds */
for (n = 1; n < (int)info->count_threads; n++)
context->pt[n].fds = context->pt[n - 1].fds +
context->fd_limit_per_thread;
#endif
if (lws_plat_init(context, info))
if (context->event_loop_ops->init_context)
if (context->event_loop_ops->init_context(context, info))
goto bail;
if (context->event_loop_ops->init_pt)
for (n = 0; n < context->count_threads; n++) {
void *lp = NULL;
if (info->foreign_loops)
lp = info->foreign_loops[n];
if (context->event_loop_ops->init_pt(context, lp, n))
goto bail;
}
if (lws_create_event_pipes(context))
goto bail;
/*
* if he's not saying he'll make his own vhosts later then act
* compatibly and make a default vhost using the data in the info
*/
if (!lws_check_opt(info->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
if (!lws_create_vhost(context, info)) {
for (n = 0; n < context->count_threads; n++)
lws_free_set_NULL(context->pt[n].serv_buf);
#if defined(LWS_WITH_PEER_LIMITS)
lws_free_set_NULL(context->pl_hash_table);
#endif
lws_free_set_NULL(context->pt[0].fds);
lws_plat_context_late_destroy(context);
lws_free_set_NULL(context);
lws_context_init_extensions(info, context);
lwsl_info(" mem: per-conn: %5lu bytes + protocol rx buf\n",
Martin Milata
committed
(unsigned long)sizeof(struct lws));
strcpy(context->canonical_hostname, "unknown");
lws_server_get_canonical_hostname(context, info);
#if defined(LWS_HAVE_SYS_CAPABILITY_H) && defined(LWS_HAVE_LIBCAP)
memcpy(context->caps, info->caps, sizeof(context->caps));
context->count_caps = info->count_caps;
#endif
/*
* drop any root privs for this process
* to listen on port < 1023 we would have needed root, but now we are
* listening, we don't want the power for anything else
*/
if (!lws_check_opt(info->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
lws_plat_drop_app_privileges(info);
/* expedite post-context init (eg, protocols) */
lws_cancel_service(context);
#if defined(LWS_WITH_SELFTESTS)
lws_jws_selftest();
#endif
return context;
bail:
lws_context_destroy(context);
return NULL;
fail_event_libs:
lwsl_err("Requested event library support not configured, available:\n");
{
const struct lws_event_loop_ops **elops = available_event_libs;
while (*elops) {
lwsl_err(" - %s\n", (*elops)->name);
elops++;
}
}
lws_free(context);
return NULL;
}
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
LWS_VISIBLE LWS_EXTERN void
lws_context_deprecate(struct lws_context *context, lws_reload_func cb)
{
struct lws_vhost *vh = context->vhost_list, *vh1;
struct lws *wsi;
/*
* "deprecation" means disable the context from accepting any new
* connections and free up listen sockets to be used by a replacement
* context.
*
* Otherwise the deprecated context remains operational, until its
* number of connected sockets falls to zero, when it is deleted.
*/
/* for each vhost, close his listen socket */
while (vh) {
wsi = vh->lserv_wsi;
if (wsi) {
wsi->socket_is_permanently_unusable = 1;
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "ctx deprecate");
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
wsi->context->deprecation_pending_listen_close_count++;
/*
* other vhosts can share the listen port, they
* point to the same wsi. So zap those too.
*/
vh1 = context->vhost_list;
while (vh1) {
if (vh1->lserv_wsi == wsi)
vh1->lserv_wsi = NULL;
vh1 = vh1->vhost_next;
}
}
vh = vh->vhost_next;
}
context->deprecated = 1;
context->deprecation_cb = cb;
}
LWS_VISIBLE LWS_EXTERN int
lws_context_is_deprecated(struct lws_context *context)
{
return context->deprecated;
}
void
lws_vhost_destroy1(struct lws_vhost *vh)
{
const struct lws_protocols *protocol = NULL;
struct lws_context_per_thread *pt;
int n, m = vh->context->count_threads;
struct lws_context *context = vh->context;
struct lws wsi;
lwsl_info("%s\n", __func__);
if (vh->being_destroyed)
return;
vh->being_destroyed = 1;
/*
* Are there other vhosts that are piggybacking on our listen socket?
* If so we need to hand the listen socket off to one of the others
* so it will remain open. If not, leave it attached to the closing
* vhost and it will get closed.
*/
if (vh->lserv_wsi)
lws_start_foreach_ll(struct lws_vhost *, v,
context->vhost_list) {
if (v != vh &&
!v->being_destroyed &&
v->listen_port == vh->listen_port &&
((!v->iface && !vh->iface) ||
(v->iface && vh->iface &&
!strcmp(v->iface, vh->iface)))) {
/*
* this can only be a listen wsi, which is
* restricted... it has no protocol or other
* bindings or states. So we can simply
* swap it to a vhost that has the same
* iface + port, but is not closing.
*/
assert(v->lserv_wsi == NULL);
v->lserv_wsi = vh->lserv_wsi;
vh->lserv_wsi = NULL;
if (v->lserv_wsi)
v->lserv_wsi->vhost = v;
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
lwsl_notice("%s: listen skt from %s to %s\n",
__func__, vh->name, v->name);
break;
}
} lws_end_foreach_ll(v, vhost_next);
/*
* Forcibly close every wsi assoicated with this vhost. That will
* include the listen socket if it is still associated with the closing
* vhost.
*/
while (m--) {
pt = &context->pt[m];
for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
if (!wsi)
continue;
if (wsi->vhost != vh)
continue;
lws_close_free_wsi(wsi,
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
"vh destroy"
/*
* destroy any pending timed events
*/
while (vh->timed_vh_protocol_list)
lws_timed_callback_remove(vh, vh->timed_vh_protocol_list);
/*
* let the protocols destroy the per-vhost protocol objects
*/
memset(&wsi, 0, sizeof(wsi));
wsi.context = vh->context;
wsi.vhost = vh;
protocol = vh->protocols;
if (protocol && vh->created_vhost_protocols) {
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
n = 0;
while (n < vh->count_protocols) {
wsi.protocol = protocol;
protocol->callback(&wsi, LWS_CALLBACK_PROTOCOL_DESTROY,
NULL, NULL, 0);
protocol++;
n++;
}
}
/*
* remove vhost from context list of vhosts
*/
lws_start_foreach_llp(struct lws_vhost **, pv, context->vhost_list) {
if (*pv == vh) {
*pv = vh->vhost_next;
break;
}
} lws_end_foreach_llp(pv, vhost_next);
/* add ourselves to the pending destruction list */
vh->vhost_next = vh->context->vhost_pending_destruction_list;
vh->context->vhost_pending_destruction_list = vh;
}
static void
lws_vhost_destroy2(struct lws_vhost *vh)
{
const struct lws_protocols *protocol = NULL;
struct lws_context *context = vh->context;
struct lws_deferred_free *df;
int n;
lwsl_info("%s: %p\n", __func__, vh);
/* if we are still on deferred free list, remove ourselves */
lws_start_foreach_llp(struct lws_deferred_free **, pdf,
context->deferred_free_list) {
if ((*pdf)->payload == vh) {
df = *pdf;
*pdf = df->next;
lws_free(df);
break;
}
} lws_end_foreach_llp(pdf, next);
/* remove ourselves from the pending destruction list */
lws_start_foreach_llp(struct lws_vhost **, pv,
context->vhost_pending_destruction_list) {
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
if ((*pv) == vh) {
*pv = (*pv)->vhost_next;
break;
}
} lws_end_foreach_llp(pv, vhost_next);
/*
* Free all the allocations associated with the vhost
*/
protocol = vh->protocols;
if (protocol) {
n = 0;
while (n < vh->count_protocols) {
if (vh->protocol_vh_privs &&
vh->protocol_vh_privs[n]) {
lws_free(vh->protocol_vh_privs[n]);
vh->protocol_vh_privs[n] = NULL;
}
protocol++;
n++;
}
}
if (vh->protocol_vh_privs)
lws_free(vh->protocol_vh_privs);
lws_ssl_SSL_CTX_destroy(vh);
lws_free(vh->same_vh_protocol_list);
if (context->plugin_list ||
(context->options & LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
lws_free((void *)vh->protocols);
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar)
if (ar->destroy_vhost)
ar->destroy_vhost(vh);
LWS_FOR_EVERY_AVAILABLE_ROLE_END;
#ifdef LWS_WITH_ACCESS_LOG
if (vh->log_fd != (int)LWS_INVALID_FILE)
close(vh->log_fd);
#endif
#if defined (LWS_WITH_TLS)
lws_free_set_NULL(vh->tls.alloc_cert_path);
#endif
#if LWS_MAX_SMP > 1
pthread_mutex_destroy(&vh->lock);
#endif
#if defined(LWS_WITH_UNIX_SOCK)
if (LWS_UNIX_SOCK_ENABLED(context)) {
n = unlink(vh->iface);
if (n)
lwsl_info("Closing unix socket %s: errno %d\n",
vh->iface, errno);
}
#endif
/*
* although async event callbacks may still come for wsi handles with
* pending close in the case of asycn event library like libuv,
* they do not refer to the vhost. So it's safe to free.
*/
lwsl_info(" %s: Freeing vhost %p\n", __func__, vh);
}
int
lws_check_deferred_free(struct lws_context *context, int force)
{
struct lws_deferred_free *df;
time_t now = lws_now_secs();
lws_start_foreach_llp(struct lws_deferred_free **, pdf,
context->deferred_free_list) {
if (force ||
lws_compare_time_t(context, now, (*pdf)->deadline) > 5) {
df = *pdf;
*pdf = df->next;
/* finalize vh destruction */
lws_vhost_destroy2(df->payload);
lws_free(df);
continue; /* after deletion we already point to next */
}
} lws_end_foreach_llp(pdf, next);
return 0;
}
LWS_VISIBLE void
lws_vhost_destroy(struct lws_vhost *vh)
{
struct lws_deferred_free *df = lws_malloc(sizeof(*df), "deferred free");
if (!df)
return;
lws_vhost_destroy1(vh);
/* part 2 is deferred to allow all the handle closes to complete */
df->next = vh->context->deferred_free_list;
df->deadline = lws_now_secs();
df->payload = vh;
vh->context->deferred_free_list = df;
}
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
/*
* When using an event loop, the context destruction is in three separate
* parts. This is to cover both internal and foreign event loops cleanly.
*
* - lws_context_destroy() simply starts a soft close of all wsi and
* related allocations. The event loop continues.
*
* As the closes complete in the event loop, reference counting is used
* to determine when everything is closed. It then calls
* lws_context_destroy2().
*
* - lws_context_destroy2() cleans up the rest of the higher-level logical
* lws pieces like vhosts. If the loop was foreign, it then proceeds to
* lws_context_destroy3(). If it the loop is internal, it stops the
* internal loops and waits for lws_context_destroy() to be called again
* outside the event loop (since we cannot destroy the loop from
* within the loop). That will cause lws_context_destroy3() to run
* directly.
*
* - lws_context_destroy3() destroys any internal event loops and then
* destroys the context itself, setting what was info.pcontext to NULL.
*/
/*
* destroy the actual context itself
*/
static void
lws_context_destroy3(struct lws_context *context)
{
struct lws_context **pcontext_finalize = context->pcontext_finalize;
lws_free(context);
lwsl_info("%s: ctx %p freed\n", __func__, context);
if (pcontext_finalize)
*pcontext_finalize = NULL;
}
/*
* really start destroying things
*/
void
lws_context_destroy2(struct lws_context *context)
{
struct lws_vhost *vh = NULL, *vh1;
#if defined(LWS_WITH_PEER_LIMITS)
uint32_t n;
#endif
lwsl_info("%s: ctx %p\n", __func__, context);
context->being_destroyed2 = 1;
if (context->pt[0].fds)
lws_free_set_NULL(context->pt[0].fds);
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
/*
* free all the per-vhost allocations
*/
vh = context->vhost_list;
while (vh) {
vh1 = vh->vhost_next;
lws_vhost_destroy2(vh);
vh = vh1;
}
/* remove ourselves from the pending destruction list */
while (context->vhost_pending_destruction_list)
/* removes itself from list */
lws_vhost_destroy2(context->vhost_pending_destruction_list);
lws_stats_log_dump(context);
lws_ssl_context_destroy(context);
lws_plat_context_late_destroy(context);
#if defined(LWS_WITH_PEER_LIMITS)
for (n = 0; n < context->pl_hash_elements; n++) {
lws_start_foreach_llp(struct lws_peer **, peer,
context->pl_hash_table[n]) {
struct lws_peer *df = *peer;
*peer = df->next;
lws_free(df);
continue;
} lws_end_foreach_llp(peer, next);
}
lws_free(context->pl_hash_table);
#endif
if (context->external_baggage_free_on_destroy)
free(context->external_baggage_free_on_destroy);
lws_check_deferred_free(context, 1);
#if LWS_MAX_SMP > 1
pthread_mutex_destroy(&context->lock);
#endif
if (context->event_loop_ops->destroy_context2)
if (context->event_loop_ops->destroy_context2(context)) {
context->finalize_destroy_after_internal_loops_stopped = 1;
return;
}
lws_context_destroy3(context);
}
/*
* Begin the context takedown
*/
lws_context_destroy(struct lws_context *context)
volatile struct lws_foreign_thread_pollfd *ftp, *next;
volatile struct lws_context_per_thread *vpt;
struct lws_context_per_thread *pt;
int n, m;
if (!context)
if (context->finalize_destroy_after_internal_loops_stopped) {
if (context->event_loop_ops->destroy_context2)
context->event_loop_ops->destroy_context2(context);
lws_context_destroy3(context);
return;
}
if (!context->being_destroyed2) {
lws_context_destroy2(context);
return;
}
lwsl_info("%s: ctx %p: already being destroyed\n",
lwsl_info("%s: ctx %p\n", __func__, context);
m = context->count_threads;
context->being_destroyed = 1;
context->requested_kill = 1;
memset(&wsi, 0, sizeof(wsi));
wsi.context = context;
#ifdef LWS_LATENCY
if (context->worst_latency_info[0])
lwsl_notice("Worst latency: %s\n", context->worst_latency_info);
#endif
vpt = (volatile struct lws_context_per_thread *)pt;
ftp = vpt->foreign_pfd_list;
while (ftp) {
next = ftp->next;
lws_free((void *)ftp);
ftp = next;
}
vpt->foreign_pfd_list = NULL;
for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
if (wsi->event_pipe)
lws_destroy_event_pipe(wsi);
else
lws_close_free_wsi(wsi,
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
"ctx destroy"
/* no protocol close */);
lws_pt_mutex_destroy(pt);
for (n = 0; n < context->count_threads; n++) {
pt = &context->pt[n];
if (context->event_loop_ops->destroy_pt)
context->event_loop_ops->destroy_pt(context, n);
lws_free_set_NULL(context->pt[n].serv_buf);
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
while (pt->http.ah_list)
_lws_destroy_ah(pt, pt->http.ah_list);
#endif
}
/*
* inform all the protocols that they are done and will have no more
* callbacks.
*
* We can't free things until after the event loop shuts down.
if (context->protocol_init_done)
vh = context->vhost_list;
struct lws_vhost *vhn = vh->vhost_next;
lws_plat_context_early_destroy(context);
/*
* We face two different needs depending if foreign loop or not.
*
* 1) If foreign loop, we really want to advance the destroy_context()
* past here, and block only for libuv-style async close completion.
*
* 2a) If poll, and we exited by ourselves and are calling a final
* destroy_context() outside of any service already, we want to
* advance all the way in one step.
*
* 2b) If poll, and we are reacting to a SIGINT, service thread(s) may
* be in poll wait or servicing. We can't advance the
* destroy_context() to the point it's freeing things; we have to
* leave that for the final destroy_context() after the service
* thread(s) are finished calling for service.
*/
if (context->event_loop_ops->destroy_context1) {
context->event_loop_ops->destroy_context1(context);
if (!context->pt[0].event_loop_foreign)
for (n = 0; n < context->count_threads; n++)
if (context->pt[n].inside_service)
return;
lws_context_destroy2(context);