1
0
mirror of https://github.com/godotengine/godot.git synced 2025-11-22 15:06:45 +00:00

Update libwebsockets to 3.1 (plus UWP patch)

This commit is contained in:
Fabio Alessandrelli
2019-03-06 01:07:13 +01:00
parent f43ee4aff8
commit 90210c4862
140 changed files with 16943 additions and 12437 deletions

View File

@@ -0,0 +1,414 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010-2018 Andy Green <andy@warmcat.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation:
* version 2.1 of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include "core/private.h"
static int
lws_get_idlest_tsi(struct lws_context *context)
{
unsigned int lowest = ~0;
int n = 0, hit = -1;
for (; n < context->count_threads; n++) {
if ((unsigned int)context->pt[n].fds_count !=
context->fd_limit_per_thread - 1 &&
(unsigned int)context->pt[n].fds_count < lowest) {
lowest = context->pt[n].fds_count;
hit = n;
}
}
return hit;
}
struct lws *
lws_create_new_server_wsi(struct lws_vhost *vhost, int fixed_tsi)
{
struct lws *new_wsi;
int n = fixed_tsi;
if (n < 0)
n = lws_get_idlest_tsi(vhost->context);
if (n < 0) {
lwsl_err("no space for new conn\n");
return NULL;
}
new_wsi = lws_zalloc(sizeof(struct lws), "new server wsi");
if (new_wsi == NULL) {
lwsl_err("Out of memory for new connection\n");
return NULL;
}
new_wsi->tsi = n;
lwsl_debug("new wsi %p joining vhost %s, tsi %d\n", new_wsi,
vhost->name, new_wsi->tsi);
lws_vhost_bind_wsi(vhost, new_wsi);
new_wsi->context = vhost->context;
new_wsi->pending_timeout = NO_PENDING_TIMEOUT;
new_wsi->rxflow_change_to = LWS_RXFLOW_ALLOW;
/* initialize the instance struct */
lwsi_set_state(new_wsi, LRS_UNCONNECTED);
new_wsi->hdr_parsing_completed = 0;
#ifdef LWS_WITH_TLS
new_wsi->tls.use_ssl = LWS_SSL_ENABLED(vhost);
#endif
/*
* these can only be set once the protocol is known
* we set an un-established connection's protocol pointer
* to the start of the supported list, so it can look
* for matching ones during the handshake
*/
new_wsi->protocol = vhost->protocols;
new_wsi->user_space = NULL;
new_wsi->desc.sockfd = LWS_SOCK_INVALID;
new_wsi->position_in_fds_table = LWS_NO_FDS_POS;
vhost->context->count_wsi_allocated++;
/*
* outermost create notification for wsi
* no user_space because no protocol selection
*/
vhost->protocols[0].callback(new_wsi, LWS_CALLBACK_WSI_CREATE, NULL,
NULL, 0);
return new_wsi;
}
/* if not a socket, it's a raw, non-ssl file descriptor */
LWS_VISIBLE struct lws *
lws_adopt_descriptor_vhost(struct lws_vhost *vh, lws_adoption_type type,
lws_sock_file_fd_type fd, const char *vh_prot_name,
struct lws *parent)
{
struct lws_context *context = vh->context;
struct lws *new_wsi;
struct lws_context_per_thread *pt;
int n;
#if defined(LWS_WITH_PEER_LIMITS)
struct lws_peer *peer = NULL;
if (type & LWS_ADOPT_SOCKET) {
peer = lws_get_or_create_peer(vh, fd.sockfd);
if (peer && context->ip_limit_wsi &&
peer->count_wsi >= context->ip_limit_wsi) {
lwsl_notice("Peer reached wsi limit %d\n",
context->ip_limit_wsi);
lws_stats_atomic_bump(context, &context->pt[0],
LWSSTATS_C_PEER_LIMIT_WSI_DENIED,
1);
return NULL;
}
}
#endif
n = -1;
if (parent)
n = parent->tsi;
new_wsi = lws_create_new_server_wsi(vh, n);
if (!new_wsi) {
if (type & LWS_ADOPT_SOCKET)
compatible_close(fd.sockfd);
return NULL;
}
#if defined(LWS_WITH_PEER_LIMITS)
if (peer)
lws_peer_add_wsi(context, peer, new_wsi);
#endif
pt = &context->pt[(int)new_wsi->tsi];
lws_stats_atomic_bump(context, pt, LWSSTATS_C_CONNECTIONS, 1);
if (parent) {
new_wsi->parent = parent;
new_wsi->sibling_list = parent->child_list;
parent->child_list = new_wsi;
}
new_wsi->desc = fd;
if (vh_prot_name) {
new_wsi->protocol = lws_vhost_name_to_protocol(new_wsi->vhost,
vh_prot_name);
if (!new_wsi->protocol) {
lwsl_err("Protocol %s not enabled on vhost %s\n",
vh_prot_name, new_wsi->vhost->name);
goto bail;
}
if (lws_ensure_user_space(new_wsi)) {
lwsl_notice("OOM trying to get user_space\n");
goto bail;
}
}
if (!LWS_SSL_ENABLED(new_wsi->vhost) || !(type & LWS_ADOPT_SOCKET))
type &= ~LWS_ADOPT_ALLOW_SSL;
if (lws_role_call_adoption_bind(new_wsi, type, vh_prot_name)) {
lwsl_err("Unable to find a role that can adopt descriptor\n");
goto bail;
}
/*
* A new connection was accepted. Give the user a chance to
* set properties of the newly created wsi. There's no protocol
* selected yet so we issue this to the vhosts's default protocol,
* itself by default protocols[0]
*/
n = LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED;
if (!(type & LWS_ADOPT_HTTP)) {
if (!(type & LWS_ADOPT_SOCKET))
n = LWS_CALLBACK_RAW_ADOPT_FILE;
else
n = LWS_CALLBACK_RAW_ADOPT;
}
lwsl_debug("new wsi wsistate 0x%x\n", new_wsi->wsistate);
if (context->event_loop_ops->accept)
if (context->event_loop_ops->accept(new_wsi))
goto fail;
if (!(type & LWS_ADOPT_ALLOW_SSL)) {
lws_pt_lock(pt, __func__);
if (__insert_wsi_socket_into_fds(context, new_wsi)) {
lws_pt_unlock(pt);
lwsl_err("%s: fail inserting socket\n", __func__);
goto fail;
}
lws_pt_unlock(pt);
} else
if (lws_server_socket_service_ssl(new_wsi, fd.sockfd)) {
lwsl_info("%s: fail ssl negotiation\n", __func__);
goto fail;
}
/*
* by deferring callback to this point, after insertion to fds,
* lws_callback_on_writable() can work from the callback
*/
if ((new_wsi->protocol->callback)(new_wsi, n, new_wsi->user_space,
NULL, 0))
goto fail;
/* role may need to do something after all adoption completed */
lws_role_call_adoption_bind(new_wsi, type | _LWS_ADOPT_FINISH,
vh_prot_name);
lws_cancel_service_pt(new_wsi);
return new_wsi;
fail:
if (type & LWS_ADOPT_SOCKET)
lws_close_free_wsi(new_wsi, LWS_CLOSE_STATUS_NOSTATUS,
"adopt skt fail");
return NULL;
bail:
lwsl_notice("%s: exiting on bail\n", __func__);
if (parent)
parent->child_list = new_wsi->sibling_list;
if (new_wsi->user_space)
lws_free(new_wsi->user_space);
vh->context->count_wsi_allocated--;
lws_vhost_unbind_wsi(new_wsi);
lws_free(new_wsi);
compatible_close(fd.sockfd);
return NULL;
}
LWS_VISIBLE struct lws *
lws_adopt_socket_vhost(struct lws_vhost *vh, lws_sockfd_type accept_fd)
{
lws_sock_file_fd_type fd;
fd.sockfd = accept_fd;
return lws_adopt_descriptor_vhost(vh, LWS_ADOPT_SOCKET |
LWS_ADOPT_HTTP | LWS_ADOPT_ALLOW_SSL, fd, NULL, NULL);
}
LWS_VISIBLE struct lws *
lws_adopt_socket(struct lws_context *context, lws_sockfd_type accept_fd)
{
return lws_adopt_socket_vhost(context->vhost_list, accept_fd);
}
/* Common read-buffer adoption for lws_adopt_*_readbuf */
static struct lws*
adopt_socket_readbuf(struct lws *wsi, const char *readbuf, size_t len)
{
struct lws_context_per_thread *pt;
struct lws_pollfd *pfd;
int n;
if (!wsi)
return NULL;
if (!readbuf || len == 0)
return wsi;
if (wsi->position_in_fds_table == LWS_NO_FDS_POS)
return wsi;
pt = &wsi->context->pt[(int)wsi->tsi];
n = lws_buflist_append_segment(&wsi->buflist, (const uint8_t *)readbuf,
len);
if (n < 0)
goto bail;
if (n)
lws_dll_lws_add_front(&wsi->dll_buflist, &pt->dll_head_buflist);
/*
* we can't process the initial read data until we can attach an ah.
*
* if one is available, get it and place the data in his ah rxbuf...
* wsi with ah that have pending rxbuf get auto-POLLIN service.
*
* no autoservice because we didn't get a chance to attach the
* readbuf data to wsi or ah yet, and we will do it next if we get
* the ah.
*/
if (wsi->http.ah || !lws_header_table_attach(wsi, 0)) {
lwsl_notice("%s: calling service on readbuf ah\n", __func__);
/*
* unlike a normal connect, we have the headers already
* (or the first part of them anyway).
* libuv won't come back and service us without a network
* event, so we need to do the header service right here.
*/
pfd = &pt->fds[wsi->position_in_fds_table];
pfd->revents |= LWS_POLLIN;
lwsl_err("%s: calling service\n", __func__);
if (lws_service_fd_tsi(wsi->context, pfd, wsi->tsi))
/* service closed us */
return NULL;
return wsi;
}
lwsl_err("%s: deferring handling ah\n", __func__);
return wsi;
bail:
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
"adopt skt readbuf fail");
return NULL;
}
LWS_EXTERN struct lws *
lws_create_adopt_udp(struct lws_vhost *vhost, int port, int flags,
const char *protocol_name, struct lws *parent_wsi)
{
lws_sock_file_fd_type sock;
struct addrinfo h, *r, *rp;
struct lws *wsi = NULL;
char buf[16];
int n;
memset(&h, 0, sizeof(h));
h.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
h.ai_socktype = SOCK_DGRAM;
h.ai_protocol = IPPROTO_UDP;
h.ai_flags = AI_PASSIVE | AI_ADDRCONFIG;
lws_snprintf(buf, sizeof(buf), "%u", port);
n = getaddrinfo(NULL, buf, &h, &r);
if (n) {
lwsl_info("%s: getaddrinfo error: %s\n", __func__,
gai_strerror(n));
goto bail;
}
for (rp = r; rp; rp = rp->ai_next) {
sock.sockfd = socket(rp->ai_family, rp->ai_socktype,
rp->ai_protocol);
if (sock.sockfd != LWS_SOCK_INVALID)
break;
}
if (!rp) {
lwsl_err("%s: unable to create INET socket\n", __func__);
goto bail1;
}
if ((flags & LWS_CAUDP_BIND) && bind(sock.sockfd, rp->ai_addr,
#if defined(_WIN32)
(int)rp->ai_addrlen
#else
rp->ai_addrlen
#endif
) == -1) {
lwsl_err("%s: bind failed\n", __func__);
goto bail2;
}
wsi = lws_adopt_descriptor_vhost(vhost, LWS_ADOPT_RAW_SOCKET_UDP, sock,
protocol_name, parent_wsi);
if (!wsi)
lwsl_err("%s: udp adoption failed\n", __func__);
bail2:
if (!wsi)
compatible_close((int)sock.sockfd);
bail1:
freeaddrinfo(r);
bail:
return wsi;
}
LWS_VISIBLE struct lws *
lws_adopt_socket_readbuf(struct lws_context *context, lws_sockfd_type accept_fd,
const char *readbuf, size_t len)
{
return adopt_socket_readbuf(lws_adopt_socket(context, accept_fd),
readbuf, len);
}
LWS_VISIBLE struct lws *
lws_adopt_socket_vhost_readbuf(struct lws_vhost *vhost,
lws_sockfd_type accept_fd,
const char *readbuf, size_t len)
{
return adopt_socket_readbuf(lws_adopt_socket_vhost(vhost, accept_fd),
readbuf, len);
}

View File

@@ -0,0 +1,92 @@
#include "core/private.h"
#if defined(LWS_PLAT_OPTEE)
#define TEE_USER_MEM_HINT_NO_FILL_ZERO 0x80000000
void *__attribute__((weak))
TEE_Malloc(uint32_t size, uint32_t hint)
{
return NULL;
}
void *__attribute__((weak))
TEE_Realloc(void *buffer, uint32_t newSize)
{
return NULL;
}
void __attribute__((weak))
TEE_Free(void *buffer)
{
}
void *lws_realloc(void *ptr, size_t size, const char *reason)
{
return TEE_Realloc(ptr, size);
}
void *lws_malloc(size_t size, const char *reason)
{
return TEE_Malloc(size, TEE_USER_MEM_HINT_NO_FILL_ZERO);
}
void lws_free(void *p)
{
TEE_Free(p);
}
void *lws_zalloc(size_t size, const char *reason)
{
void *ptr = TEE_Malloc(size, TEE_USER_MEM_HINT_NO_FILL_ZERO);
if (ptr)
memset(ptr, 0, size);
return ptr;
}
void lws_set_allocator(void *(*cb)(void *ptr, size_t size, const char *reason))
{
(void)cb;
}
#else
static void *_realloc(void *ptr, size_t size, const char *reason)
{
if (size) {
#if defined(LWS_WITH_ESP32)
lwsl_notice("%s: size %lu: %s (free heap %d)\n", __func__,
(unsigned long)size, reason, (unsigned int)esp_get_free_heap_size() - (int)size);
#else
lwsl_debug("%s: size %lu: %s\n", __func__,
(unsigned long)size, reason);
#endif
#if defined(LWS_PLAT_OPTEE)
return (void *)TEE_Realloc(ptr, size);
#else
return (void *)realloc(ptr, size);
#endif
}
if (ptr)
free(ptr);
return NULL;
}
void *(*_lws_realloc)(void *ptr, size_t size, const char *reason) = _realloc;
void *lws_realloc(void *ptr, size_t size, const char *reason)
{
return _lws_realloc(ptr, size, reason);
}
void *lws_zalloc(size_t size, const char *reason)
{
void *ptr = _lws_realloc(NULL, size, reason);
if (ptr)
memset(ptr, 0, size);
return ptr;
}
void lws_set_allocator(void *(*cb)(void *ptr, size_t size, const char *reason))
{
_lws_realloc = cb;
}
#endif

View File

@@ -0,0 +1,287 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010-2018 Andy Green <andy@warmcat.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation:
* version 2.1 of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include "core/private.h"
void
lws_client_stash_destroy(struct lws *wsi)
{
if (!wsi || !wsi->stash)
return;
lws_free_set_NULL(wsi->stash->address);
lws_free_set_NULL(wsi->stash->path);
lws_free_set_NULL(wsi->stash->host);
lws_free_set_NULL(wsi->stash->origin);
lws_free_set_NULL(wsi->stash->protocol);
lws_free_set_NULL(wsi->stash->method);
lws_free_set_NULL(wsi->stash->iface);
lws_free_set_NULL(wsi->stash->alpn);
lws_free_set_NULL(wsi->stash);
}
LWS_VISIBLE struct lws *
lws_client_connect_via_info(const struct lws_client_connect_info *i)
{
struct lws *wsi, *safe = NULL;
const struct lws_protocols *p;
const char *local = i->protocol;
#if LWS_MAX_SMP > 1
int n, tid;
#endif
if (i->context->requested_kill)
return NULL;
if (!i->context->protocol_init_done)
lws_protocol_init(i->context);
/*
* If we have .local_protocol_name, use it to select the local protocol
* handler to bind to. Otherwise use .protocol if http[s].
*/
if (i->local_protocol_name)
local = i->local_protocol_name;
/* PHASE 1: create a bare wsi */
wsi = lws_zalloc(sizeof(struct lws), "client wsi");
if (wsi == NULL)
goto bail;
wsi->context = i->context;
wsi->desc.sockfd = LWS_SOCK_INVALID;
wsi->vhost = NULL;
if (!i->vhost)
lws_vhost_bind_wsi(i->context->vhost_list, wsi);
else
lws_vhost_bind_wsi(i->vhost, wsi);
if (!wsi->vhost) {
lwsl_err("%s: No vhost in the context\n", __func__);
goto bail;
}
/*
* PHASE 2: if SMP, bind the client to whatever tsi the current thread
* represents
*/
#if LWS_MAX_SMP > 1
tid = wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_GET_THREAD_ID,
NULL, NULL, 0);
lws_context_lock(i->context, "client find tsi");
for (n = 0; n < i->context->count_threads; n++)
if (i->context->pt[n].service_tid == tid) {
lwsl_info("%s: client binds to caller tsi %d\n",
__func__, n);
wsi->tsi = n;
break;
}
/*
* this binding is sort of provisional, since when we try to insert
* into the pt fds, there may be no space and it will fail
*/
lws_context_unlock(i->context);
#endif
/*
* PHASE 3: Choose an initial role for the wsi and do role-specific init
*
* Note the initial role may not reflect the final role, eg,
* we may want ws, but first we have to go through h1 to get that
*/
lws_role_call_client_bind(wsi, i);
/*
* PHASE 4: fill up the wsi with stuff from the connect_info as far as
* it can go. It's uncertain because not only is our connection
* going to complete asynchronously, we might have bound to h1 and not
* even be able to get ahold of an ah immediately.
*/
wsi->user_space = NULL;
wsi->pending_timeout = NO_PENDING_TIMEOUT;
wsi->position_in_fds_table = LWS_NO_FDS_POS;
wsi->c_port = i->port;
wsi->protocol = &wsi->vhost->protocols[0];
wsi->client_pipeline = !!(i->ssl_connection & LCCSCF_PIPELINE);
/*
* PHASE 5: handle external user_space now, generic alloc is done in
* role finalization
*/
if (!wsi->user_space && i->userdata) {
wsi->user_space_externally_allocated = 1;
wsi->user_space = i->userdata;
}
if (local) {
lwsl_info("%s: protocol binding to %s\n", __func__, local);
p = lws_vhost_name_to_protocol(wsi->vhost, local);
if (p)
lws_bind_protocol(wsi, p, __func__);
}
/*
* PHASE 5: handle external user_space now, generic alloc is done in
* role finalization
*/
if (!wsi->user_space && i->userdata) {
wsi->user_space_externally_allocated = 1;
wsi->user_space = i->userdata;
}
#if defined(LWS_WITH_TLS)
wsi->tls.use_ssl = i->ssl_connection;
#else
if (i->ssl_connection & LCCSCF_USE_SSL) {
lwsl_err("%s: lws not configured for tls\n", __func__);
goto bail;
}
#endif
/*
* PHASE 6: stash the things from connect_info that we can't process
* right now, eg, if http binding, without an ah. If h1 and no ah, we
* will go on the ah waiting list and process those things later (after
* the connect_info and maybe the things pointed to have gone out of
* scope)
*
* However these things are stashed in a generic way at this point,
* with no relationship to http or ah
*/
wsi->stash = lws_zalloc(sizeof(*wsi->stash), "client stash");
if (!wsi->stash) {
lwsl_err("%s: OOM\n", __func__);
goto bail1;
}
wsi->stash->address = lws_strdup(i->address);
wsi->stash->path = lws_strdup(i->path);
wsi->stash->host = lws_strdup(i->host);
if (!wsi->stash->address || !wsi->stash->path || !wsi->stash->host)
goto bail1;
if (i->origin) {
wsi->stash->origin = lws_strdup(i->origin);
if (!wsi->stash->origin)
goto bail1;
}
if (i->protocol) {
wsi->stash->protocol = lws_strdup(i->protocol);
if (!wsi->stash->protocol)
goto bail1;
}
if (i->method) {
wsi->stash->method = lws_strdup(i->method);
if (!wsi->stash->method)
goto bail1;
}
if (i->iface) {
wsi->stash->iface = lws_strdup(i->iface);
if (!wsi->stash->iface)
goto bail1;
}
if (i->alpn) {
wsi->stash->alpn = lws_strdup(i->alpn);
if (!wsi->stash->alpn)
goto bail1;
}
/*
* at this point user callbacks like
* LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER will be interested to
* know the parent... eg for proxying we can grab extra headers from
* the parent's incoming ah and add them to the child client handshake
*/
if (i->parent_wsi) {
lwsl_info("%s: created child %p of parent %p\n", __func__,
wsi, i->parent_wsi);
wsi->parent = i->parent_wsi;
safe = wsi->sibling_list = i->parent_wsi->child_list;
i->parent_wsi->child_list = wsi;
}
/*
* PHASE 7: Do any role-specific finalization processing. We can still
* see important info things via wsi->stash
*/
if (wsi->role_ops->client_bind) {
int n = wsi->role_ops->client_bind(wsi, NULL);
if (n && i->parent_wsi) {
/* unpick from parent */
i->parent_wsi->child_list = safe;
}
if (n < 0)
/* we didn't survive, wsi is freed */
goto bail2;
if (n)
/* something else failed, wsi needs freeing */
goto bail;
}
/* let the caller's optional wsi storage have the wsi we created */
if (i->pwsi)
*i->pwsi = wsi;
#if defined(LWS_WITH_HUBBUB)
if (i->uri_replace_to)
wsi->http.rw = lws_rewrite_create(wsi, html_parser_cb,
i->uri_replace_from,
i->uri_replace_to);
#endif
return wsi;
bail1:
lws_client_stash_destroy(wsi);
bail:
lws_free(wsi);
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
bail2:
#endif
if (i->pwsi)
*i->pwsi = NULL;
return NULL;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,611 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010-2018 Andy Green <andy@warmcat.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation:
* version 2.1 of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include "core/private.h"
#if defined(LWS_WITH_HTTP_PROXY)
static int
proxy_header(struct lws *wsi, struct lws *par, unsigned char *temp,
int temp_len, int index, unsigned char **p, unsigned char *end)
{
int n = lws_hdr_total_length(par, index);
if (n < 1) {
lwsl_debug("%s: no index %d:\n", __func__, index);
return 0;
}
if (lws_hdr_copy(par, (char *)temp, temp_len, index) < 0)
return -1;
lwsl_debug("%s: index %d: %s\n", __func__, index, (char *)temp);
if (lws_add_http_header_by_token(wsi, index, temp, n, p, end))
return -1;
return 0;
}
static int
stream_close(struct lws *wsi)
{
char buf[LWS_PRE + 6], *out = buf + LWS_PRE;
if (wsi->http.did_stream_close)
return 0;
wsi->http.did_stream_close = 1;
if (wsi->http2_substream) {
if (lws_write(wsi, (unsigned char *)buf + LWS_PRE, 0,
LWS_WRITE_HTTP_FINAL) < 0) {
lwsl_info("%s: COMPL_CLIENT_HTTP: h2 fin wr failed\n",
__func__);
return -1;
}
} else {
*out++ = '0';
*out++ = '\x0d';
*out++ = '\x0a';
*out++ = '\x0d';
*out++ = '\x0a';
if (lws_write(wsi, (unsigned char *)buf + LWS_PRE, 5,
LWS_WRITE_HTTP_FINAL) < 0) {
lwsl_err("%s: COMPL_CLIENT_HTTP: "
"h2 final write failed\n", __func__);
return -1;
}
}
return 0;
}
#endif
LWS_VISIBLE int
lws_callback_http_dummy(struct lws *wsi, enum lws_callback_reasons reason,
void *user, void *in, size_t len)
{
struct lws_ssl_info *si;
#ifdef LWS_WITH_CGI
struct lws_cgi_args *args;
#endif
#if defined(LWS_WITH_CGI) || defined(LWS_WITH_HTTP_PROXY)
char buf[8192];
int n;
#endif
#if defined(LWS_WITH_HTTP_PROXY)
unsigned char **p, *end;
struct lws *parent;
#endif
switch (reason) {
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
case LWS_CALLBACK_HTTP:
#ifndef LWS_NO_SERVER
if (lws_return_http_status(wsi, HTTP_STATUS_NOT_FOUND, NULL))
return -1;
if (lws_http_transaction_completed(wsi))
#endif
return -1;
break;
#if !defined(LWS_NO_SERVER)
case LWS_CALLBACK_HTTP_BODY_COMPLETION:
case LWS_CALLBACK_HTTP_FILE_COMPLETION:
if (lws_http_transaction_completed(wsi))
return -1;
break;
#endif
case LWS_CALLBACK_HTTP_WRITEABLE:
#ifdef LWS_WITH_CGI
if (wsi->reason_bf & (LWS_CB_REASON_AUX_BF__CGI_HEADERS |
LWS_CB_REASON_AUX_BF__CGI)) {
n = lws_cgi_write_split_stdout_headers(wsi);
if (n < 0) {
lwsl_debug("AUX_BF__CGI forcing close\n");
return -1;
}
if (!n)
lws_rx_flow_control(
wsi->http.cgi->stdwsi[LWS_STDOUT], 1);
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__CGI_HEADERS)
wsi->reason_bf &=
~LWS_CB_REASON_AUX_BF__CGI_HEADERS;
else
wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__CGI;
if (wsi->http.cgi && wsi->http.cgi->cgi_transaction_over)
return -1;
break;
}
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__CGI_CHUNK_END) {
if (!wsi->http2_substream) {
memcpy(buf + LWS_PRE, "0\x0d\x0a\x0d\x0a", 5);
lwsl_debug("writing chunk term and exiting\n");
n = lws_write(wsi, (unsigned char *)buf +
LWS_PRE, 5, LWS_WRITE_HTTP);
} else
n = lws_write(wsi, (unsigned char *)buf +
LWS_PRE, 0,
LWS_WRITE_HTTP_FINAL);
/* always close after sending it */
return -1;
}
#endif
#if defined(LWS_WITH_HTTP_PROXY)
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY_HEADERS) {
wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__PROXY_HEADERS;
lwsl_debug("%s: %p: issuing proxy headers\n",
__func__, wsi);
n = lws_write(wsi, wsi->http.pending_return_headers +
LWS_PRE,
wsi->http.pending_return_headers_len,
LWS_WRITE_HTTP_HEADERS);
lws_free_set_NULL(wsi->http.pending_return_headers);
if (n < 0) {
lwsl_err("%s: EST_CLIENT_HTTP: write failed\n",
__func__);
return -1;
}
lws_callback_on_writable(wsi);
break;
}
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY) {
char *px = buf + LWS_PRE;
int lenx = sizeof(buf) - LWS_PRE - 32;
/*
* our sink is writeable and our source has something
* to read. So read a lump of source material of
* suitable size to send or what's available, whichever
* is the smaller.
*/
wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__PROXY;
if (!lws_get_child(wsi))
break;
/* this causes LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ */
if (lws_http_client_read(lws_get_child(wsi), &px,
&lenx) < 0) {
lwsl_info("%s: LWS_CB_REASON_AUX_BF__PROXY: "
"client closed\n", __func__);
stream_close(wsi);
return -1;
}
break;
}
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY_TRANS_END) {
lwsl_info("%s: LWS_CB_REASON_AUX_BF__PROXY_TRANS_END\n",
__func__);
wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__PROXY_TRANS_END;
if (stream_close(wsi))
return -1;
if (lws_http_transaction_completed(wsi))
return -1;
}
#endif
break;
#if defined(LWS_WITH_HTTP_PROXY)
case LWS_CALLBACK_RECEIVE_CLIENT_HTTP:
assert(lws_get_parent(wsi));
if (!lws_get_parent(wsi))
break;
lws_get_parent(wsi)->reason_bf |= LWS_CB_REASON_AUX_BF__PROXY;
lws_callback_on_writable(lws_get_parent(wsi));
break;
case LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ: {
char *out = buf + LWS_PRE;
assert(lws_get_parent(wsi));
if (wsi->http.proxy_parent_chunked) {
if (len > sizeof(buf) - LWS_PRE - 16) {
lwsl_err("oversize buf %d %d\n", (int)len,
(int)sizeof(buf) - LWS_PRE - 16);
return -1;
}
/*
* this only needs dealing with on http/1.1 to allow
* pipelining
*/
n = lws_snprintf(out, 14, "%X\x0d\x0a", (int)len);
out += n;
memcpy(out, in, len);
out += len;
*out++ = '\x0d';
*out++ = '\x0a';
n = lws_write(lws_get_parent(wsi),
(unsigned char *)buf + LWS_PRE,
len + n + 2, LWS_WRITE_HTTP);
} else
n = lws_write(lws_get_parent(wsi), (unsigned char *)in,
len, LWS_WRITE_HTTP);
if (n < 0)
return -1;
break; }
/* this handles the proxy case... */
case LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP: {
unsigned char *start, *p, *end;
/*
* We want to proxy these headers, but we are being called
* at the point the onward client was established, which is
* unrelated to the state or writability of our proxy
* connection.
*
* Therefore produce the headers using the onward client ah
* while we have it, and stick them on the output buflist to be
* written on the proxy connection as soon as convenient.
*/
parent = lws_get_parent(wsi);
if (!parent)
return 0;
start = p = (unsigned char *)buf + LWS_PRE;
end = p + sizeof(buf) - LWS_PRE - 256;
if (lws_add_http_header_status(lws_get_parent(wsi),
lws_http_client_http_response(wsi), &p, end))
return 1;
/*
* copy these headers from the client connection to the parent
*/
proxy_header(parent, wsi, end, 256,
WSI_TOKEN_HTTP_CONTENT_LENGTH, &p, end);
proxy_header(parent, wsi, end, 256,
WSI_TOKEN_HTTP_CONTENT_TYPE, &p, end);
proxy_header(parent, wsi, end, 256,
WSI_TOKEN_HTTP_ETAG, &p, end);
proxy_header(parent, wsi, end, 256,
WSI_TOKEN_HTTP_ACCEPT_LANGUAGE, &p, end);
proxy_header(parent, wsi, end, 256,
WSI_TOKEN_HTTP_CONTENT_ENCODING, &p, end);
proxy_header(parent, wsi, end, 256,
WSI_TOKEN_HTTP_CACHE_CONTROL, &p, end);
if (!parent->http2_substream)
if (lws_add_http_header_by_token(parent,
WSI_TOKEN_CONNECTION, (unsigned char *)"close",
5, &p, end))
return -1;
/*
* We proxy using h1 only atm, and strip any chunking so it
* can go back out on h2 just fine.
*
* However if we are actually going out on h1, we need to add
* our own chunking since we still don't know the size.
*/
if (!parent->http2_substream &&
!lws_hdr_total_length(wsi, WSI_TOKEN_HTTP_CONTENT_LENGTH)) {
lwsl_debug("downstream parent chunked\n");
if (lws_add_http_header_by_token(parent,
WSI_TOKEN_HTTP_TRANSFER_ENCODING,
(unsigned char *)"chunked", 7, &p, end))
return -1;
wsi->http.proxy_parent_chunked = 1;
}
if (lws_finalize_http_header(parent, &p, end))
return 1;
parent->http.pending_return_headers_len =
lws_ptr_diff(p, start);
parent->http.pending_return_headers =
lws_malloc(parent->http.pending_return_headers_len +
LWS_PRE, "return proxy headers");
if (!parent->http.pending_return_headers)
return -1;
memcpy(parent->http.pending_return_headers + LWS_PRE, start,
parent->http.pending_return_headers_len);
parent->reason_bf |= LWS_CB_REASON_AUX_BF__PROXY_HEADERS;
lwsl_debug("%s: LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP: "
"prepared headers\n", __func__);
lws_callback_on_writable(parent);
break; }
case LWS_CALLBACK_COMPLETED_CLIENT_HTTP:
lwsl_info("%s: COMPLETED_CLIENT_HTTP: %p (parent %p)\n",
__func__, wsi, lws_get_parent(wsi));
if (!lws_get_parent(wsi))
break;
lws_get_parent(wsi)->reason_bf |=
LWS_CB_REASON_AUX_BF__PROXY_TRANS_END;
lws_callback_on_writable(lws_get_parent(wsi));
break;
case LWS_CALLBACK_CLOSED_CLIENT_HTTP:
if (!lws_get_parent(wsi))
break;
lwsl_err("%s: LWS_CALLBACK_CLOSED_CLIENT_HTTP\n", __func__);
lws_set_timeout(lws_get_parent(wsi), LWS_TO_KILL_ASYNC,
PENDING_TIMEOUT_KILLED_BY_PROXY_CLIENT_CLOSE);
break;
case LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER:
parent = lws_get_parent(wsi);
if (!parent)
break;
p = (unsigned char **)in;
end = (*p) + len;
/*
* copy these headers from the parent request to the client
* connection's request
*/
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HOST, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_ETAG, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_IF_MODIFIED_SINCE, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_ACCEPT_LANGUAGE, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_ACCEPT_ENCODING, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_CACHE_CONTROL, p, end);
buf[0] = '\0';
lws_get_peer_simple(parent, buf, sizeof(buf));
if (lws_add_http_header_by_token(wsi, WSI_TOKEN_X_FORWARDED_FOR,
(unsigned char *)buf, (int)strlen(buf), p, end))
return -1;
break;
#endif
#ifdef LWS_WITH_CGI
/* CGI IO events (POLLIN/OUT) appear here, our default policy is:
*
* - POST data goes on subprocess stdin
* - subprocess stdout goes on http via writeable callback
* - subprocess stderr goes to the logs
*/
case LWS_CALLBACK_CGI:
args = (struct lws_cgi_args *)in;
switch (args->ch) { /* which of stdin/out/err ? */
case LWS_STDIN:
/* TBD stdin rx flow control */
break;
case LWS_STDOUT:
/* quench POLLIN on STDOUT until MASTER got writeable */
lws_rx_flow_control(args->stdwsi[LWS_STDOUT], 0);
wsi->reason_bf |= LWS_CB_REASON_AUX_BF__CGI;
/* when writing to MASTER would not block */
lws_callback_on_writable(wsi);
break;
case LWS_STDERR:
n = lws_get_socket_fd(args->stdwsi[LWS_STDERR]);
if (n < 0)
break;
n = read(n, buf, sizeof(buf) - 2);
if (n > 0) {
if (buf[n - 1] != '\n')
buf[n++] = '\n';
buf[n] = '\0';
lwsl_notice("CGI-stderr: %s\n", buf);
}
break;
}
break;
case LWS_CALLBACK_CGI_TERMINATED:
lwsl_debug("LWS_CALLBACK_CGI_TERMINATED: %d %" PRIu64 "\n",
wsi->http.cgi->explicitly_chunked,
(uint64_t)wsi->http.cgi->content_length);
if (!wsi->http.cgi->explicitly_chunked &&
!wsi->http.cgi->content_length) {
/* send terminating chunk */
lwsl_debug("LWS_CALLBACK_CGI_TERMINATED: ending\n");
wsi->reason_bf |= LWS_CB_REASON_AUX_BF__CGI_CHUNK_END;
lws_callback_on_writable(wsi);
lws_set_timeout(wsi, PENDING_TIMEOUT_CGI, 3);
break;
}
return -1;
case LWS_CALLBACK_CGI_STDIN_DATA: /* POST body for stdin */
args = (struct lws_cgi_args *)in;
args->data[args->len] = '\0';
if (!args->stdwsi[LWS_STDIN])
return -1;
n = lws_get_socket_fd(args->stdwsi[LWS_STDIN]);
if (n < 0)
return -1;
#if defined(LWS_WITH_ZLIB)
if (wsi->http.cgi->gzip_inflate) {
/* gzip handling */
if (!wsi->http.cgi->gzip_init) {
lwsl_info("inflating gzip\n");
memset(&wsi->http.cgi->inflate, 0,
sizeof(wsi->http.cgi->inflate));
if (inflateInit2(&wsi->http.cgi->inflate,
16 + 15) != Z_OK) {
lwsl_err("%s: iniflateInit failed\n",
__func__);
return -1;
}
wsi->http.cgi->gzip_init = 1;
}
wsi->http.cgi->inflate.next_in = args->data;
wsi->http.cgi->inflate.avail_in = args->len;
do {
wsi->http.cgi->inflate.next_out =
wsi->http.cgi->inflate_buf;
wsi->http.cgi->inflate.avail_out =
sizeof(wsi->http.cgi->inflate_buf);
n = inflate(&wsi->http.cgi->inflate,
Z_SYNC_FLUSH);
switch (n) {
case Z_NEED_DICT:
case Z_STREAM_ERROR:
case Z_DATA_ERROR:
case Z_MEM_ERROR:
inflateEnd(&wsi->http.cgi->inflate);
wsi->http.cgi->gzip_init = 0;
lwsl_err("zlib error inflate %d\n", n);
return -1;
}
if (wsi->http.cgi->inflate.avail_out !=
sizeof(wsi->http.cgi->inflate_buf)) {
int written;
written = write(args->stdwsi[LWS_STDIN]->desc.filefd,
wsi->http.cgi->inflate_buf,
sizeof(wsi->http.cgi->inflate_buf) -
wsi->http.cgi->inflate.avail_out);
if (written != (int)(
sizeof(wsi->http.cgi->inflate_buf) -
wsi->http.cgi->inflate.avail_out)) {
lwsl_notice("LWS_CALLBACK_CGI_STDIN_DATA: "
"sent %d only %d went", n, args->len);
}
if (n == Z_STREAM_END) {
lwsl_err("gzip inflate end\n");
inflateEnd(&wsi->http.cgi->inflate);
wsi->http.cgi->gzip_init = 0;
break;
}
} else
break;
if (wsi->http.cgi->inflate.avail_out)
break;
} while (1);
return args->len;
}
#endif /* WITH_ZLIB */
n = write(n, args->data, args->len);
// lwsl_hexdump_notice(args->data, args->len);
if (n < args->len)
lwsl_notice("LWS_CALLBACK_CGI_STDIN_DATA: "
"sent %d only %d went", n, args->len);
if (wsi->http.cgi->post_in_expected && args->stdwsi[LWS_STDIN] &&
args->stdwsi[LWS_STDIN]->desc.filefd > 0) {
wsi->http.cgi->post_in_expected -= n;
if (!wsi->http.cgi->post_in_expected) {
struct lws *siwsi = args->stdwsi[LWS_STDIN];
lwsl_debug("%s: expected POST in end: "
"closing stdin wsi %p, fd %d\n",
__func__, siwsi, siwsi->desc.sockfd);
__remove_wsi_socket_from_fds(siwsi);
lwsi_set_state(siwsi, LRS_DEAD_SOCKET);
siwsi->socket_is_permanently_unusable = 1;
lws_remove_child_from_any_parent(siwsi);
if (wsi->context->event_loop_ops->
close_handle_manually) {
wsi->context->event_loop_ops->
close_handle_manually(siwsi);
siwsi->told_event_loop_closed = 1;
} else {
compatible_close(siwsi->desc.sockfd);
__lws_free_wsi(siwsi);
}
wsi->http.cgi->pipe_fds[LWS_STDIN][1] = -1;
args->stdwsi[LWS_STDIN] = NULL;
}
}
return n;
#endif /* WITH_CGI */
#endif /* ROLE_ H1 / H2 */
case LWS_CALLBACK_SSL_INFO:
si = in;
(void)si;
lwsl_notice("LWS_CALLBACK_SSL_INFO: where: 0x%x, ret: 0x%x\n",
si->where, si->ret);
break;
#if LWS_MAX_SMP > 1
case LWS_CALLBACK_GET_THREAD_ID:
return (int)(unsigned long long)pthread_self();
#endif
default:
break;
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,323 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010-2018 Andy Green <andy@warmcat.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation:
* version 2.1 of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include "core/private.h"
/*
* notice this returns number of bytes consumed, or -1
*/
int lws_issue_raw(struct lws *wsi, unsigned char *buf, size_t len)
{
struct lws_context *context = lws_get_context(wsi);
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
size_t real_len = len;
unsigned int n, m;
// lwsl_notice("%s: len %d\n", __func__, (int)len);
// lwsl_hexdump_level(LLL_NOTICE, buf, len);
/*
* Detect if we got called twice without going through the
* event loop to handle pending. Since that guarantees extending any
* existing buflist_out it's inefficient.
*/
if (0 && buf && wsi->could_have_pending) {
lwsl_hexdump_level(LLL_INFO, buf, len);
lwsl_info("** %p: vh: %s, prot: %s, role %s: "
"Inefficient back-to-back write of %lu detected...\n",
wsi, wsi->vhost->name, wsi->protocol->name,
wsi->role_ops->name,
(unsigned long)len);
}
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_WRITE, 1);
/* just ignore sends after we cleared the truncation buffer */
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE &&
!lws_has_buffered_out(wsi)
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
&& !wsi->http.comp_ctx.may_have_more
#endif
)
return (int)len;
if (buf && lws_has_buffered_out(wsi)) {
lwsl_info("** %p: vh: %s, prot: %s, incr buflist_out by %lu\n",
wsi, wsi->vhost->name, wsi->protocol->name,
(unsigned long)len);
/*
* already buflist ahead of this, add it on the tail of the
* buflist, then ignore it for now and act like we're flushing
* the buflist...
*/
lws_buflist_append_segment(&wsi->buflist_out, buf, len);
buf = NULL;
len = 0;
}
if (wsi->buflist_out) {
/* we have to drain the earliest buflist_out stuff first */
len = lws_buflist_next_segment_len(&wsi->buflist_out, &buf);
real_len = len;
lwsl_debug("%s: draining %d\n", __func__, (int)len);
}
if (!len || !buf)
return 0;
if (!wsi->http2_substream && !lws_socket_is_valid(wsi->desc.sockfd))
lwsl_warn("** error invalid sock but expected to send\n");
/* limit sending */
if (wsi->protocol->tx_packet_size)
n = (int)wsi->protocol->tx_packet_size;
else {
n = (int)wsi->protocol->rx_buffer_size;
if (!n)
n = context->pt_serv_buf_size;
}
n += LWS_PRE + 4;
if (n > len)
n = (int)len;
/* nope, send it on the socket directly */
lws_latency_pre(context, wsi);
m = lws_ssl_capable_write(wsi, buf, n);
lws_latency(context, wsi, "send lws_issue_raw", n, n == m);
lwsl_info("%s: ssl_capable_write (%d) says %d\n", __func__, n, m);
/* something got written, it can have been truncated now */
wsi->could_have_pending = 1;
switch (m) {
case LWS_SSL_CAPABLE_ERROR:
/* we're going to close, let close know sends aren't possible */
wsi->socket_is_permanently_unusable = 1;
return -1;
case LWS_SSL_CAPABLE_MORE_SERVICE:
/*
* nothing got sent, not fatal. Retry the whole thing later,
* ie, implying treat it was a truncated send so it gets
* retried
*/
m = 0;
break;
}
/*
* we were sending this from buflist_out? Then not sending everything
* is a small matter of advancing ourselves only by the amount we did
* send in the buflist.
*/
if (lws_has_buffered_out(wsi)) {
if (m) {
lwsl_info("%p partial adv %d (vs %ld)\n", wsi, m,
(long)real_len);
lws_buflist_use_segment(&wsi->buflist_out, m);
}
if (!lws_has_buffered_out(wsi)) {
lwsl_info("%s: wsi %p: buflist_out flushed\n",
__func__, wsi);
m = (int)real_len;
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
lwsl_info("*%p signalling to close now\n", wsi);
return -1; /* retry closing now */
}
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
#if !defined(LWS_WITHOUT_SERVER)
if (wsi->http.deferred_transaction_completed) {
lwsl_notice("%s: partial completed, doing "
"deferred transaction completed\n",
__func__);
wsi->http.deferred_transaction_completed = 0;
return lws_http_transaction_completed(wsi) ?
-1 : (int)real_len;
}
#endif
#endif
}
/* always callback on writeable */
lws_callback_on_writable(wsi);
return m;
}
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
if (wsi->http.comp_ctx.may_have_more)
lws_callback_on_writable(wsi);
#endif
if (m == real_len)
/* what we just sent went out cleanly */
return m;
/*
* We were not able to send everything... and we were not sending from
* an existing buflist_out. So we are starting a fresh buflist_out, by
* buffering the unsent remainder on it.
* (it will get first priority next time the socket is writable).
*/
lwsl_debug("%p new partial sent %d from %lu total\n", wsi, m,
(unsigned long)real_len);
lws_buflist_append_segment(&wsi->buflist_out, buf + m, real_len - m);
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITE_PARTIALS, 1);
lws_stats_atomic_bump(wsi->context, pt,
LWSSTATS_B_PARTIALS_ACCEPTED_PARTS, m);
#if !defined(LWS_WITH_ESP32)
if (lws_wsi_is_udp(wsi)) {
/* stash original destination for fulfilling UDP partials */
wsi->udp->sa_pending = wsi->udp->sa;
wsi->udp->salen_pending = wsi->udp->salen;
}
#endif
/* since something buffered, force it to get another chance to send */
lws_callback_on_writable(wsi);
return (int)real_len;
}
LWS_VISIBLE int lws_write(struct lws *wsi, unsigned char *buf, size_t len,
enum lws_write_protocol wp)
{
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_LWS_WRITE, 1);
if ((int)len < 0) {
lwsl_err("%s: suspicious len int %d, ulong %lu\n", __func__,
(int)len, (unsigned long)len);
return -1;
}
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_B_WRITE, len);
#ifdef LWS_WITH_ACCESS_LOG
wsi->http.access_log.sent += len;
#endif
if (wsi->vhost)
wsi->vhost->conn_stats.tx += len;
assert(wsi->role_ops);
if (!wsi->role_ops->write_role_protocol)
return lws_issue_raw(wsi, buf, len);
return wsi->role_ops->write_role_protocol(wsi, buf, len, &wp);
}
LWS_VISIBLE int
lws_ssl_capable_read_no_ssl(struct lws *wsi, unsigned char *buf, int len)
{
struct lws_context *context = wsi->context;
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
int n = 0;
lws_stats_atomic_bump(context, pt, LWSSTATS_C_API_READ, 1);
if (lws_wsi_is_udp(wsi)) {
#if !defined(LWS_WITH_ESP32)
wsi->udp->salen = sizeof(wsi->udp->sa);
n = recvfrom(wsi->desc.sockfd, (char *)buf, len, 0,
&wsi->udp->sa, &wsi->udp->salen);
#endif
} else
n = recv(wsi->desc.sockfd, (char *)buf, len, 0);
if (n >= 0) {
if (!n && wsi->unix_skt)
return LWS_SSL_CAPABLE_ERROR;
if (wsi->vhost)
wsi->vhost->conn_stats.rx += n;
lws_stats_atomic_bump(context, pt, LWSSTATS_B_READ, n);
return n;
}
if (LWS_ERRNO == LWS_EAGAIN ||
LWS_ERRNO == LWS_EWOULDBLOCK ||
LWS_ERRNO == LWS_EINTR)
return LWS_SSL_CAPABLE_MORE_SERVICE;
lwsl_info("error on reading from skt : %d\n", LWS_ERRNO);
return LWS_SSL_CAPABLE_ERROR;
}
LWS_VISIBLE int
lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, int len)
{
int n = 0;
if (lws_wsi_is_udp(wsi)) {
#if !defined(LWS_WITH_ESP32)
if (lws_has_buffered_out(wsi))
n = sendto(wsi->desc.sockfd, (const char *)buf,
len, 0, &wsi->udp->sa_pending,
wsi->udp->salen_pending);
else
n = sendto(wsi->desc.sockfd, (const char *)buf,
len, 0, &wsi->udp->sa, wsi->udp->salen);
#endif
} else
n = send(wsi->desc.sockfd, (char *)buf, len, MSG_NOSIGNAL);
// lwsl_info("%s: sent len %d result %d", __func__, len, n);
if (n >= 0)
return n;
if (LWS_ERRNO == LWS_EAGAIN ||
LWS_ERRNO == LWS_EWOULDBLOCK ||
LWS_ERRNO == LWS_EINTR) {
if (LWS_ERRNO == LWS_EWOULDBLOCK) {
lws_set_blocking_send(wsi);
}
return LWS_SSL_CAPABLE_MORE_SERVICE;
}
lwsl_debug("ERROR writing len %d to skt fd %d err %d / errno %d\n",
len, wsi->desc.sockfd, n, LWS_ERRNO);
return LWS_SSL_CAPABLE_ERROR;
}
LWS_VISIBLE int
lws_ssl_pending_no_ssl(struct lws *wsi)
{
(void)wsi;
#if defined(LWS_WITH_ESP32)
return 100;
#else
return 0;
#endif
}

View File

@@ -0,0 +1,566 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010-2017 Andy Green <andy@warmcat.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation:
* version 2.1 of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include "core/private.h"
int
_lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa)
{
#if !defined(LWS_WITH_LIBUV) && !defined(LWS_WITH_LIBEV) && !defined(LWS_WITH_LIBEVENT)
volatile struct lws_context_per_thread *vpt;
#endif
struct lws_context_per_thread *pt;
struct lws_context *context;
int ret = 0, pa_events = 1;
struct lws_pollfd *pfd;
int sampled_tid, tid;
if (!wsi)
return 0;
assert(wsi->position_in_fds_table == LWS_NO_FDS_POS ||
wsi->position_in_fds_table >= 0);
if (wsi->position_in_fds_table == LWS_NO_FDS_POS)
return 0;
if (((volatile struct lws *)wsi)->handling_pollout &&
!_and && _or == LWS_POLLOUT) {
/*
* Happening alongside service thread handling POLLOUT.
* The danger is when he is finished, he will disable POLLOUT,
* countermanding what we changed here.
*
* Instead of changing the fds, inform the service thread
* what happened, and ask it to leave POLLOUT active on exit
*/
((volatile struct lws *)wsi)->leave_pollout_active = 1;
/*
* by definition service thread is not in poll wait, so no need
* to cancel service
*/
lwsl_debug("%s: using leave_pollout_active\n", __func__);
return 0;
}
context = wsi->context;
pt = &context->pt[(int)wsi->tsi];
assert(wsi->position_in_fds_table < (int)pt->fds_count);
#if !defined(LWS_WITH_LIBUV) && \
!defined(LWS_WITH_LIBEV) && \
!defined(LWS_WITH_LIBEVENT)
/*
* This only applies when we use the default poll() event loop.
*
* BSD can revert pa->events at any time, when the kernel decides to
* exit from poll(). We can't protect against it using locking.
*
* Therefore we must check first if the service thread is in poll()
* wait; if so, we know we must be being called from a foreign thread,
* and we must keep a strictly ordered list of changes we made instead
* of trying to apply them, since when poll() exits, which may happen
* at any time it would revert our changes.
*
* The plat code will apply them when it leaves the poll() wait
* before doing anything else.
*/
vpt = (volatile struct lws_context_per_thread *)pt;
vpt->foreign_spinlock = 1;
lws_memory_barrier();
if (vpt->inside_poll) {
struct lws_foreign_thread_pollfd *ftp, **ftp1;
/*
* We are certainly a foreign thread trying to change events
* while the service thread is in the poll() wait.
*
* Create a list of changes to be applied after poll() exit,
* instead of trying to apply them now.
*/
ftp = lws_malloc(sizeof(*ftp), "ftp");
if (!ftp) {
vpt->foreign_spinlock = 0;
lws_memory_barrier();
ret = -1;
goto bail;
}
ftp->_and = _and;
ftp->_or = _or;
ftp->fd_index = wsi->position_in_fds_table;
ftp->next = NULL;
/* place at END of list to maintain order */
ftp1 = (struct lws_foreign_thread_pollfd **)
&vpt->foreign_pfd_list;
while (*ftp1)
ftp1 = &((*ftp1)->next);
*ftp1 = ftp;
vpt->foreign_spinlock = 0;
lws_memory_barrier();
lws_cancel_service_pt(wsi);
return 0;
}
vpt->foreign_spinlock = 0;
lws_memory_barrier();
#endif
pfd = &pt->fds[wsi->position_in_fds_table];
pa->fd = wsi->desc.sockfd;
lwsl_debug("%s: wsi %p: fd %d events %d -> %d\n", __func__, wsi,
pa->fd, pfd->events, (pfd->events & ~_and) | _or);
pa->prev_events = pfd->events;
pa->events = pfd->events = (pfd->events & ~_and) | _or;
if (wsi->http2_substream)
return 0;
if (wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi,
LWS_CALLBACK_CHANGE_MODE_POLL_FD,
wsi->user_space, (void *)pa, 0)) {
ret = -1;
goto bail;
}
if (context->event_loop_ops->io) {
if (_and & LWS_POLLIN)
context->event_loop_ops->io(wsi,
LWS_EV_STOP | LWS_EV_READ);
if (_or & LWS_POLLIN)
context->event_loop_ops->io(wsi,
LWS_EV_START | LWS_EV_READ);
if (_and & LWS_POLLOUT)
context->event_loop_ops->io(wsi,
LWS_EV_STOP | LWS_EV_WRITE);
if (_or & LWS_POLLOUT)
context->event_loop_ops->io(wsi,
LWS_EV_START | LWS_EV_WRITE);
}
/*
* if we changed something in this pollfd...
* ... and we're running in a different thread context
* than the service thread...
* ... and the service thread is waiting ...
* then cancel it to force a restart with our changed events
*/
pa_events = pa->prev_events != pa->events;
if (pa_events) {
if (lws_plat_change_pollfd(context, wsi, pfd)) {
lwsl_info("%s failed\n", __func__);
ret = -1;
goto bail;
}
sampled_tid = pt->service_tid;
if (sampled_tid && wsi->vhost) {
tid = wsi->vhost->protocols[0].callback(wsi,
LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
if (tid == -1) {
ret = -1;
goto bail;
}
if (tid != sampled_tid)
lws_cancel_service_pt(wsi);
}
}
bail:
return ret;
}
#ifndef LWS_NO_SERVER
/*
* Enable or disable listen sockets on this pt globally...
* it's modulated according to the pt having space for a new accept.
*/
static void
lws_accept_modulation(struct lws_context *context,
struct lws_context_per_thread *pt, int allow)
{
struct lws_vhost *vh = context->vhost_list;
struct lws_pollargs pa1;
while (vh) {
if (vh->lserv_wsi) {
if (allow)
_lws_change_pollfd(vh->lserv_wsi,
0, LWS_POLLIN, &pa1);
else
_lws_change_pollfd(vh->lserv_wsi,
LWS_POLLIN, 0, &pa1);
}
vh = vh->vhost_next;
}
}
#endif
int
__insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi)
{
struct lws_pollargs pa = { wsi->desc.sockfd, LWS_POLLIN, 0 };
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
int ret = 0;
lwsl_debug("%s: %p: tsi=%d, sock=%d, pos-in-fds=%d\n",
__func__, wsi, wsi->tsi, wsi->desc.sockfd, pt->fds_count);
if ((unsigned int)pt->fds_count >= context->fd_limit_per_thread) {
lwsl_err("Too many fds (%d vs %d)\n", context->max_fds,
context->fd_limit_per_thread );
return 1;
}
#if !defined(_WIN32)
if (wsi->desc.sockfd - lws_plat_socket_offset() >= context->max_fds) {
lwsl_err("Socket fd %d is too high (%d) offset %d\n",
wsi->desc.sockfd, context->max_fds,
lws_plat_socket_offset());
return 1;
}
#endif
assert(wsi);
assert(wsi->event_pipe || wsi->vhost);
assert(lws_socket_is_valid(wsi->desc.sockfd));
if (wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
wsi->user_space, (void *) &pa, 1))
return -1;
pt->count_conns++;
insert_wsi(context, wsi);
wsi->position_in_fds_table = pt->fds_count;
pt->fds[wsi->position_in_fds_table].fd = wsi->desc.sockfd;
pt->fds[wsi->position_in_fds_table].events = LWS_POLLIN;
pa.events = pt->fds[pt->fds_count].events;
lws_plat_insert_socket_into_fds(context, wsi);
/* external POLL support via protocol 0 */
if (wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_ADD_POLL_FD,
wsi->user_space, (void *) &pa, 0))
ret = -1;
#ifndef LWS_NO_SERVER
/* if no more room, defeat accepts on this thread */
if ((unsigned int)pt->fds_count == context->fd_limit_per_thread - 1)
lws_accept_modulation(context, pt, 0);
#endif
if (wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
wsi->user_space, (void *)&pa, 1))
ret = -1;
return ret;
}
int
__remove_wsi_socket_from_fds(struct lws *wsi)
{
struct lws_context *context = wsi->context;
struct lws_pollargs pa = { wsi->desc.sockfd, 0, 0 };
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
struct lws *end_wsi;
int v;
int m, ret = 0;
#if !defined(_WIN32)
if (wsi->desc.sockfd - lws_plat_socket_offset() > context->max_fds) {
lwsl_err("fd %d too high (%d)\n", wsi->desc.sockfd,
context->max_fds);
return 1;
}
#endif
if (wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
wsi->user_space, (void *)&pa, 1))
return -1;
lws_same_vh_protocol_remove(wsi);
/* the guy who is to be deleted's slot index in pt->fds */
m = wsi->position_in_fds_table;
/* these are the only valid possibilities for position_in_fds_table */
assert(m == LWS_NO_FDS_POS || (m >= 0 &&
(unsigned int)m < pt->fds_count));
if (context->event_loop_ops->io)
context->event_loop_ops->io(wsi,
LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE |
LWS_EV_PREPARE_DELETION);
lwsl_debug("%s: wsi=%p, skt=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
__func__, wsi, wsi->desc.sockfd, wsi->position_in_fds_table,
pt->fds_count, pt->fds[pt->fds_count].fd);
if (m != LWS_NO_FDS_POS) {
/* have the last guy take up the now vacant slot */
pt->fds[m] = pt->fds[pt->fds_count - 1];
/* this decrements pt->fds_count */
lws_plat_delete_socket_from_fds(context, wsi, m);
pt->count_conns--;
v = (int) pt->fds[m].fd;
/* end guy's "position in fds table" is now the deletion
* guy's old one */
end_wsi = wsi_from_fd(context, v);
if (!end_wsi) {
lwsl_err("no wsi for fd %d pos %d, pt->fds_count=%d\n",
(int)pt->fds[m].fd, m, pt->fds_count);
assert(0);
} else
end_wsi->position_in_fds_table = m;
/* deletion guy's lws_lookup entry needs nuking */
delete_from_fd(context, wsi->desc.sockfd);
/* removed wsi has no position any more */
wsi->position_in_fds_table = LWS_NO_FDS_POS;
}
/* remove also from external POLL support via protocol 0 */
if (lws_socket_is_valid(wsi->desc.sockfd) && wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_DEL_POLL_FD,
wsi->user_space, (void *) &pa, 0))
ret = -1;
#ifndef LWS_NO_SERVER
if (!context->being_destroyed &&
/* if this made some room, accept connects on this thread */
(unsigned int)pt->fds_count < context->fd_limit_per_thread - 1)
lws_accept_modulation(context, pt, 1);
#endif
if (wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
wsi->user_space, (void *) &pa, 1))
ret = -1;
return ret;
}
int
__lws_change_pollfd(struct lws *wsi, int _and, int _or)
{
struct lws_context *context;
struct lws_pollargs pa;
int ret = 0;
if (!wsi || (!wsi->protocol && !wsi->event_pipe) ||
wsi->position_in_fds_table == LWS_NO_FDS_POS)
return 0;
context = lws_get_context(wsi);
if (!context)
return 1;
if (wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
wsi->user_space, (void *) &pa, 0))
return -1;
ret = _lws_change_pollfd(wsi, _and, _or, &pa);
if (wsi->vhost &&
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
wsi->user_space, (void *) &pa, 0))
ret = -1;
return ret;
}
int
lws_change_pollfd(struct lws *wsi, int _and, int _or)
{
struct lws_context_per_thread *pt;
int ret = 0;
pt = &wsi->context->pt[(int)wsi->tsi];
lws_pt_lock(pt, __func__);
ret = __lws_change_pollfd(wsi, _and, _or);
lws_pt_unlock(pt);
return ret;
}
LWS_VISIBLE int
lws_callback_on_writable(struct lws *wsi)
{
struct lws_context_per_thread *pt;
if (lwsi_state(wsi) == LRS_SHUTDOWN)
return 0;
if (wsi->socket_is_permanently_unusable)
return 0;
pt = &wsi->context->pt[(int)wsi->tsi];
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB_REQ, 1);
#if defined(LWS_WITH_STATS)
if (!wsi->active_writable_req_us) {
wsi->active_writable_req_us = lws_time_in_microseconds();
lws_stats_atomic_bump(wsi->context, pt,
LWSSTATS_C_WRITEABLE_CB_EFF_REQ, 1);
}
#endif
if (wsi->role_ops->callback_on_writable) {
if (wsi->role_ops->callback_on_writable(wsi))
return 1;
wsi = lws_get_network_wsi(wsi);
}
if (wsi->position_in_fds_table == LWS_NO_FDS_POS) {
lwsl_debug("%s: failed to find socket %d\n", __func__,
wsi->desc.sockfd);
return -1;
}
if (__lws_change_pollfd(wsi, 0, LWS_POLLOUT))
return -1;
return 1;
}
/*
* stitch protocol choice into the vh protocol linked list
* We always insert ourselves at the start of the list
*
* X <-> B
* X <-> pAn <-> pB
*
* Illegal to attach more than once without detach inbetween
*/
void
lws_same_vh_protocol_insert(struct lws *wsi, int n)
{
lws_vhost_lock(wsi->vhost);
if (!lws_dll_is_null(&wsi->same_vh_protocol))
lws_dll_lws_remove(&wsi->same_vh_protocol);
lws_dll_lws_add_front(&wsi->same_vh_protocol,
&wsi->vhost->same_vh_protocol_heads[n]);
lws_vhost_unlock(wsi->vhost);
}
void
__lws_same_vh_protocol_remove(struct lws *wsi)
{
if (!lws_dll_is_null(&wsi->same_vh_protocol))
lws_dll_lws_remove(&wsi->same_vh_protocol);
}
void
lws_same_vh_protocol_remove(struct lws *wsi)
{
if (!wsi->vhost)
return;
lws_vhost_lock(wsi->vhost);
__lws_same_vh_protocol_remove(wsi);
lws_vhost_unlock(wsi->vhost);
}
LWS_VISIBLE int
lws_callback_on_writable_all_protocol_vhost(const struct lws_vhost *vhost,
const struct lws_protocols *protocol)
{
struct lws *wsi;
int n;
if (protocol < vhost->protocols ||
protocol >= (vhost->protocols + vhost->count_protocols)) {
lwsl_err("%s: protocol %p is not from vhost %p (%p - %p)\n",
__func__, protocol, vhost->protocols, vhost,
(vhost->protocols + vhost->count_protocols));
return -1;
}
n = (int)(protocol - vhost->protocols);
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
vhost->same_vh_protocol_heads[n].next) {
wsi = lws_container_of(d, struct lws, same_vh_protocol);
assert(wsi->protocol == protocol);
lws_callback_on_writable(wsi);
} lws_end_foreach_dll_safe(d, d1);
return 0;
}
LWS_VISIBLE int
lws_callback_on_writable_all_protocol(const struct lws_context *context,
const struct lws_protocols *protocol)
{
struct lws_vhost *vhost;
int n;
if (!context)
return 0;
vhost = context->vhost_list;
while (vhost) {
for (n = 0; n < vhost->count_protocols; n++)
if (protocol->callback ==
vhost->protocols[n].callback &&
!strcmp(protocol->name, vhost->protocols[n].name))
break;
if (n != vhost->count_protocols)
lws_callback_on_writable_all_protocol_vhost(
vhost, &vhost->protocols[n]);
vhost = vhost->vhost_next;
}
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff