aboutsummaryrefslogtreecommitdiff
path: root/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api')
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/api_lib.c1010
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/api_msg.c1947
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/err.c115
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netbuf.c246
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netdb.c413
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netifapi.c221
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/pppapi.c416
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/sockets.c2827
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/tcpip.c518
9 files changed, 7713 insertions, 0 deletions
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/api_lib.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/api_lib.c
new file mode 100644
index 0000000..3c1d6a6
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/api_lib.c
@@ -0,0 +1,1010 @@
+/**
+ * @file
+ * Sequential API External module
+ *
+ * @defgroup netconn Netconn API
+ * @ingroup sequential_api
+ * Thread-safe, to be called from non-TCPIP threads only.
+ * TX/RX handling based on @ref netbuf (containing @ref pbuf)
+ * to avoid copying data around.
+ *
+ * @defgroup netconn_common Common functions
+ * @ingroup netconn
+ * For use with TCP and UDP
+ *
+ * @defgroup netconn_tcp TCP only
+ * @ingroup netconn
+ * TCP only functions
+ *
+ * @defgroup netconn_udp UDP only
+ * @ingroup netconn
+ * UDP only functions
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ */
+
+/* This is the part of the API that is linked with
+ the application */
+
+#include "lwip/opt.h"
+
+#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/api.h"
+#include "lwip/memp.h"
+
+#include "lwip/ip.h"
+#include "lwip/raw.h"
+#include "lwip/udp.h"
+#include "lwip/priv/api_msg.h"
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/priv/tcpip_priv.h"
+
+#include <string.h>
+
+#define API_MSG_VAR_REF(name) API_VAR_REF(name)
+#define API_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct api_msg, name)
+#define API_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, ERR_MEM)
+#define API_MSG_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, NULL)
+#define API_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_API_MSG, name)
+
+static err_t netconn_close_shutdown(struct netconn *conn, u8_t how);
+
+/**
+ * Call the lower part of a netconn_* function
+ * This function is then running in the thread context
+ * of tcpip_thread and has exclusive access to lwIP core code.
+ *
+ * @param fn function to call
+ * @param apimsg a struct containing the function to call and its parameters
+ * @return ERR_OK if the function was called, another err_t if not
+ */
+static err_t
+netconn_apimsg(tcpip_callback_fn fn, struct api_msg *apimsg)
+{
+ err_t err;
+
+#ifdef LWIP_DEBUG
+ /* catch functions that don't set err */
+ apimsg->err = ERR_VAL;
+#endif /* LWIP_DEBUG */
+
+#if LWIP_NETCONN_SEM_PER_THREAD
+ apimsg->op_completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+
+ err = tcpip_send_msg_wait_sem(fn, apimsg, LWIP_API_MSG_SEM(apimsg));
+ if (err == ERR_OK) {
+ return apimsg->err;
+ }
+ return err;
+}
+
+/**
+ * Create a new netconn (of a specific type) that has a callback function.
+ * The corresponding pcb is also created.
+ *
+ * @param t the type of 'connection' to create (@see enum netconn_type)
+ * @param proto the IP protocol for RAW IP pcbs
+ * @param callback a function to call on status changes (RX available, TX'ed)
+ * @return a newly allocated struct netconn or
+ * NULL on memory error
+ */
+struct netconn*
+netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, netconn_callback callback)
+{
+ struct netconn *conn;
+ API_MSG_VAR_DECLARE(msg);
+ API_MSG_VAR_ALLOC_RETURN_NULL(msg);
+
+ conn = netconn_alloc(t, callback);
+ if (conn != NULL) {
+ err_t err;
+
+ API_MSG_VAR_REF(msg).msg.n.proto = proto;
+ API_MSG_VAR_REF(msg).conn = conn;
+ err = netconn_apimsg(lwip_netconn_do_newconn, &API_MSG_VAR_REF(msg));
+ if (err != ERR_OK) {
+ LWIP_ASSERT("freeing conn without freeing pcb", conn->pcb.tcp == NULL);
+ LWIP_ASSERT("conn has no recvmbox", sys_mbox_valid(&conn->recvmbox));
+#if LWIP_TCP
+ LWIP_ASSERT("conn->acceptmbox shouldn't exist", !sys_mbox_valid(&conn->acceptmbox));
+#endif /* LWIP_TCP */
+#if !LWIP_NETCONN_SEM_PER_THREAD
+ LWIP_ASSERT("conn has no op_completed", sys_sem_valid(&conn->op_completed));
+ sys_sem_free(&conn->op_completed);
+#endif /* !LWIP_NETCONN_SEM_PER_THREAD */
+ sys_mbox_free(&conn->recvmbox);
+ memp_free(MEMP_NETCONN, conn);
+ API_MSG_VAR_FREE(msg);
+ return NULL;
+ }
+ }
+ API_MSG_VAR_FREE(msg);
+ return conn;
+}
+
+/**
+ * @ingroup netconn_common
+ * Close a netconn 'connection' and free its resources.
+ * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate
+ * after this returns.
+ *
+ * @param conn the netconn to delete
+ * @return ERR_OK if the connection was deleted
+ */
+err_t
+netconn_delete(struct netconn *conn)
+{
+ err_t err;
+ API_MSG_VAR_DECLARE(msg);
+
+ /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */
+ if (conn == NULL) {
+ return ERR_OK;
+ }
+
+ API_MSG_VAR_ALLOC(msg);
+ API_MSG_VAR_REF(msg).conn = conn;
+#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER
+ /* get the time we started, which is later compared to
+ sys_now() + conn->send_timeout */
+ API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now();
+#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */
+#if LWIP_TCP
+ API_MSG_VAR_REF(msg).msg.sd.polls_left =
+ ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1;
+#endif /* LWIP_TCP */
+#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */
+ err = netconn_apimsg(lwip_netconn_do_delconn, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ if (err != ERR_OK) {
+ return err;
+ }
+
+ netconn_free(conn);
+
+ return ERR_OK;
+}
+
+/**
+ * Get the local or remote IP address and port of a netconn.
+ * For RAW netconns, this returns the protocol instead of a port!
+ *
+ * @param conn the netconn to query
+ * @param addr a pointer to which to save the IP address
+ * @param port a pointer to which to save the port (or protocol for RAW)
+ * @param local 1 to get the local IP address, 0 to get the remote one
+ * @return ERR_CONN for invalid connections
+ * ERR_OK if the information was retrieved
+ */
+err_t
+netconn_getaddr(struct netconn *conn, ip_addr_t *addr, u16_t *port, u8_t local)
+{
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+
+ LWIP_ERROR("netconn_getaddr: invalid conn", (conn != NULL), return ERR_ARG;);
+ LWIP_ERROR("netconn_getaddr: invalid addr", (addr != NULL), return ERR_ARG;);
+ LWIP_ERROR("netconn_getaddr: invalid port", (port != NULL), return ERR_ARG;);
+
+ API_MSG_VAR_ALLOC(msg);
+ API_MSG_VAR_REF(msg).conn = conn;
+ API_MSG_VAR_REF(msg).msg.ad.local = local;
+#if LWIP_MPU_COMPATIBLE
+ err = netconn_apimsg(lwip_netconn_do_getaddr, &API_MSG_VAR_REF(msg));
+ *addr = msg->msg.ad.ipaddr;
+ *port = msg->msg.ad.port;
+#else /* LWIP_MPU_COMPATIBLE */
+ msg.msg.ad.ipaddr = addr;
+ msg.msg.ad.port = port;
+ err = netconn_apimsg(lwip_netconn_do_getaddr, &msg);
+#endif /* LWIP_MPU_COMPATIBLE */
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+}
+
+/**
+ * @ingroup netconn_common
+ * Bind a netconn to a specific local IP address and port.
+ * Binding one netconn twice might not always be checked correctly!
+ *
+ * @param conn the netconn to bind
+ * @param addr the local IP address to bind the netconn to
+ * (use IP4_ADDR_ANY/IP6_ADDR_ANY to bind to all addresses)
+ * @param port the local port to bind the netconn to (not used for RAW)
+ * @return ERR_OK if bound, any other err_t on failure
+ */
+err_t
+netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port)
+{
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+
+ LWIP_ERROR("netconn_bind: invalid conn", (conn != NULL), return ERR_ARG;);
+
+#if LWIP_IPV4
+ /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */
+ if (addr == NULL) {
+ addr = IP4_ADDR_ANY;
+ }
+#endif /* LWIP_IPV4 */
+
+#if LWIP_IPV4 && LWIP_IPV6
+ /* "Socket API like" dual-stack support: If IP to bind to is IP6_ADDR_ANY,
+ * and NETCONN_FLAG_IPV6_V6ONLY is 0, use IP_ANY_TYPE to bind
+ */
+ if ((netconn_get_ipv6only(conn) == 0) &&
+ ip_addr_cmp(addr, IP6_ADDR_ANY)) {
+ addr = IP_ANY_TYPE;
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ API_MSG_VAR_ALLOC(msg);
+ API_MSG_VAR_REF(msg).conn = conn;
+ API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr);
+ API_MSG_VAR_REF(msg).msg.bc.port = port;
+ err = netconn_apimsg(lwip_netconn_do_bind, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+}
+
+/**
+ * @ingroup netconn_common
+ * Connect a netconn to a specific remote IP address and port.
+ *
+ * @param conn the netconn to connect
+ * @param addr the remote IP address to connect to
+ * @param port the remote port to connect to (no used for RAW)
+ * @return ERR_OK if connected, return value of tcp_/udp_/raw_connect otherwise
+ */
+err_t
+netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port)
+{
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+
+ LWIP_ERROR("netconn_connect: invalid conn", (conn != NULL), return ERR_ARG;);
+
+#if LWIP_IPV4
+ /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */
+ if (addr == NULL) {
+ addr = IP4_ADDR_ANY;
+ }
+#endif /* LWIP_IPV4 */
+
+ API_MSG_VAR_ALLOC(msg);
+ API_MSG_VAR_REF(msg).conn = conn;
+ API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr);
+ API_MSG_VAR_REF(msg).msg.bc.port = port;
+ err = netconn_apimsg(lwip_netconn_do_connect, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+}
+
+/**
+ * @ingroup netconn_udp
+ * Disconnect a netconn from its current peer (only valid for UDP netconns).
+ *
+ * @param conn the netconn to disconnect
+ * @return See @ref err_t
+ */
+err_t
+netconn_disconnect(struct netconn *conn)
+{
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+
+ LWIP_ERROR("netconn_disconnect: invalid conn", (conn != NULL), return ERR_ARG;);
+
+ API_MSG_VAR_ALLOC(msg);
+ API_MSG_VAR_REF(msg).conn = conn;
+ err = netconn_apimsg(lwip_netconn_do_disconnect, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+}
+
+/**
+ * @ingroup netconn_tcp
+ * Set a TCP netconn into listen mode
+ *
+ * @param conn the tcp netconn to set to listen mode
+ * @param backlog the listen backlog, only used if TCP_LISTEN_BACKLOG==1
+ * @return ERR_OK if the netconn was set to listen (UDP and RAW netconns
+ * don't return any error (yet?))
+ */
+err_t
+netconn_listen_with_backlog(struct netconn *conn, u8_t backlog)
+{
+#if LWIP_TCP
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+
+ /* This does no harm. If TCP_LISTEN_BACKLOG is off, backlog is unused. */
+ LWIP_UNUSED_ARG(backlog);
+
+ LWIP_ERROR("netconn_listen: invalid conn", (conn != NULL), return ERR_ARG;);
+
+ API_MSG_VAR_ALLOC(msg);
+ API_MSG_VAR_REF(msg).conn = conn;
+#if TCP_LISTEN_BACKLOG
+ API_MSG_VAR_REF(msg).msg.lb.backlog = backlog;
+#endif /* TCP_LISTEN_BACKLOG */
+ err = netconn_apimsg(lwip_netconn_do_listen, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+#else /* LWIP_TCP */
+ LWIP_UNUSED_ARG(conn);
+ LWIP_UNUSED_ARG(backlog);
+ return ERR_ARG;
+#endif /* LWIP_TCP */
+}
+
+/**
+ * @ingroup netconn_tcp
+ * Accept a new connection on a TCP listening netconn.
+ *
+ * @param conn the TCP listen netconn
+ * @param new_conn pointer where the new connection is stored
+ * @return ERR_OK if a new connection has been received or an error
+ * code otherwise
+ */
+err_t
+netconn_accept(struct netconn *conn, struct netconn **new_conn)
+{
+#if LWIP_TCP
+ void *accept_ptr;
+ struct netconn *newconn;
+#if TCP_LISTEN_BACKLOG
+ API_MSG_VAR_DECLARE(msg);
+#endif /* TCP_LISTEN_BACKLOG */
+
+ LWIP_ERROR("netconn_accept: invalid pointer", (new_conn != NULL), return ERR_ARG;);
+ *new_conn = NULL;
+ LWIP_ERROR("netconn_accept: invalid conn", (conn != NULL), return ERR_ARG;);
+
+ if (ERR_IS_FATAL(conn->last_err)) {
+ /* don't recv on fatal errors: this might block the application task
+ waiting on acceptmbox forever! */
+ return conn->last_err;
+ }
+ if (!sys_mbox_valid(&conn->acceptmbox)) {
+ return ERR_CLSD;
+ }
+
+#if TCP_LISTEN_BACKLOG
+ API_MSG_VAR_ALLOC(msg);
+#endif /* TCP_LISTEN_BACKLOG */
+
+#if LWIP_SO_RCVTIMEO
+ if (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, conn->recv_timeout) == SYS_ARCH_TIMEOUT) {
+#if TCP_LISTEN_BACKLOG
+ API_MSG_VAR_FREE(msg);
+#endif /* TCP_LISTEN_BACKLOG */
+ return ERR_TIMEOUT;
+ }
+#else
+ sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0);
+#endif /* LWIP_SO_RCVTIMEO*/
+ newconn = (struct netconn *)accept_ptr;
+ /* Register event with callback */
+ API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0);
+
+ if (accept_ptr == &netconn_aborted) {
+ /* a connection has been aborted: out of pcbs or out of netconns during accept */
+ /* @todo: set netconn error, but this would be fatal and thus block further accepts */
+#if TCP_LISTEN_BACKLOG
+ API_MSG_VAR_FREE(msg);
+#endif /* TCP_LISTEN_BACKLOG */
+ return ERR_ABRT;
+ }
+ if (newconn == NULL) {
+ /* connection has been aborted */
+ /* in this special case, we set the netconn error from application thread, as
+ on a ready-to-accept listening netconn, there should not be anything running
+ in tcpip_thread */
+ NETCONN_SET_SAFE_ERR(conn, ERR_CLSD);
+#if TCP_LISTEN_BACKLOG
+ API_MSG_VAR_FREE(msg);
+#endif /* TCP_LISTEN_BACKLOG */
+ return ERR_CLSD;
+ }
+#if TCP_LISTEN_BACKLOG
+ /* Let the stack know that we have accepted the connection. */
+ API_MSG_VAR_REF(msg).conn = newconn;
+ /* don't care for the return value of lwip_netconn_do_recv */
+ netconn_apimsg(lwip_netconn_do_accepted, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+#endif /* TCP_LISTEN_BACKLOG */
+
+ *new_conn = newconn;
+ /* don't set conn->last_err: it's only ERR_OK, anyway */
+ return ERR_OK;
+#else /* LWIP_TCP */
+ LWIP_UNUSED_ARG(conn);
+ LWIP_UNUSED_ARG(new_conn);
+ return ERR_ARG;
+#endif /* LWIP_TCP */
+}
+
+/**
+ * @ingroup netconn_common
+ * Receive data: actual implementation that doesn't care whether pbuf or netbuf
+ * is received
+ *
+ * @param conn the netconn from which to receive data
+ * @param new_buf pointer where a new pbuf/netbuf is stored when received data
+ * @return ERR_OK if data has been received, an error code otherwise (timeout,
+ * memory error or another error)
+ */
+static err_t
+netconn_recv_data(struct netconn *conn, void **new_buf)
+{
+ void *buf = NULL;
+ u16_t len;
+#if LWIP_TCP
+ API_MSG_VAR_DECLARE(msg);
+#if LWIP_MPU_COMPATIBLE
+ msg = NULL;
+#endif
+#endif /* LWIP_TCP */
+
+ LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;);
+ *new_buf = NULL;
+ LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;);
+#if LWIP_TCP
+#if (LWIP_UDP || LWIP_RAW)
+ if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)
+#endif /* (LWIP_UDP || LWIP_RAW) */
+ {
+ if (!sys_mbox_valid(&conn->recvmbox)) {
+ /* This happens when calling this function after receiving FIN */
+ return sys_mbox_valid(&conn->acceptmbox) ? ERR_CONN : ERR_CLSD;
+ }
+ }
+#endif /* LWIP_TCP */
+ LWIP_ERROR("netconn_recv: invalid recvmbox", sys_mbox_valid(&conn->recvmbox), return ERR_CONN;);
+
+ if (ERR_IS_FATAL(conn->last_err)) {
+ /* don't recv on fatal errors: this might block the application task
+ waiting on recvmbox forever! */
+ /* @todo: this does not allow us to fetch data that has been put into recvmbox
+ before the fatal error occurred - is that a problem? */
+ return conn->last_err;
+ }
+#if LWIP_TCP
+#if (LWIP_UDP || LWIP_RAW)
+ if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)
+#endif /* (LWIP_UDP || LWIP_RAW) */
+ {
+ API_MSG_VAR_ALLOC(msg);
+ }
+#endif /* LWIP_TCP */
+
+#if LWIP_SO_RCVTIMEO
+ if (sys_arch_mbox_fetch(&conn->recvmbox, &buf, conn->recv_timeout) == SYS_ARCH_TIMEOUT) {
+#if LWIP_TCP
+#if (LWIP_UDP || LWIP_RAW)
+ if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)
+#endif /* (LWIP_UDP || LWIP_RAW) */
+ {
+ API_MSG_VAR_FREE(msg);
+ }
+#endif /* LWIP_TCP */
+ return ERR_TIMEOUT;
+ }
+#else
+ sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0);
+#endif /* LWIP_SO_RCVTIMEO*/
+
+#if LWIP_TCP
+#if (LWIP_UDP || LWIP_RAW)
+ if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)
+#endif /* (LWIP_UDP || LWIP_RAW) */
+ {
+ /* Let the stack know that we have taken the data. */
+ /* @todo: Speedup: Don't block and wait for the answer here
+ (to prevent multiple thread-switches). */
+ API_MSG_VAR_REF(msg).conn = conn;
+ if (buf != NULL) {
+ API_MSG_VAR_REF(msg).msg.r.len = ((struct pbuf *)buf)->tot_len;
+ } else {
+ API_MSG_VAR_REF(msg).msg.r.len = 1;
+ }
+
+ /* don't care for the return value of lwip_netconn_do_recv */
+ netconn_apimsg(lwip_netconn_do_recv, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ /* If we are closed, we indicate that we no longer wish to use the socket */
+ if (buf == NULL) {
+ API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0);
+ if (conn->pcb.ip == NULL) {
+ /* race condition: RST during recv */
+ return conn->last_err == ERR_OK ? ERR_RST : conn->last_err;
+ }
+ /* RX side is closed, so deallocate the recvmbox */
+ netconn_close_shutdown(conn, NETCONN_SHUT_RD);
+ /* Don' store ERR_CLSD as conn->err since we are only half-closed */
+ return ERR_CLSD;
+ }
+ len = ((struct pbuf *)buf)->tot_len;
+ }
+#endif /* LWIP_TCP */
+#if LWIP_TCP && (LWIP_UDP || LWIP_RAW)
+ else
+#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */
+#if (LWIP_UDP || LWIP_RAW)
+ {
+ LWIP_ASSERT("buf != NULL", buf != NULL);
+ len = netbuf_len((struct netbuf*)buf);
+ }
+#endif /* (LWIP_UDP || LWIP_RAW) */
+
+#if LWIP_SO_RCVBUF
+ SYS_ARCH_DEC(conn->recv_avail, len);
+#endif /* LWIP_SO_RCVBUF */
+ /* Register event with callback */
+ API_EVENT(conn, NETCONN_EVT_RCVMINUS, len);
+
+ LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_recv_data: received %p, len=%"U16_F"\n", buf, len));
+
+ *new_buf = buf;
+ /* don't set conn->last_err: it's only ERR_OK, anyway */
+ return ERR_OK;
+}
+
+/**
+ * @ingroup netconn_tcp
+ * Receive data (in form of a pbuf) from a TCP netconn
+ *
+ * @param conn the netconn from which to receive data
+ * @param new_buf pointer where a new pbuf is stored when received data
+ * @return ERR_OK if data has been received, an error code otherwise (timeout,
+ * memory error or another error)
+ * ERR_ARG if conn is not a TCP netconn
+ */
+err_t
+netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf)
+{
+ LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL) &&
+ NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;);
+
+ return netconn_recv_data(conn, (void **)new_buf);
+}
+
+/**
+ * @ingroup netconn_common
+ * Receive data (in form of a netbuf containing a packet buffer) from a netconn
+ *
+ * @param conn the netconn from which to receive data
+ * @param new_buf pointer where a new netbuf is stored when received data
+ * @return ERR_OK if data has been received, an error code otherwise (timeout,
+ * memory error or another error)
+ */
+err_t
+netconn_recv(struct netconn *conn, struct netbuf **new_buf)
+{
+#if LWIP_TCP
+ struct netbuf *buf = NULL;
+ err_t err;
+#endif /* LWIP_TCP */
+
+ LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;);
+ *new_buf = NULL;
+ LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;);
+
+#if LWIP_TCP
+#if (LWIP_UDP || LWIP_RAW)
+ if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)
+#endif /* (LWIP_UDP || LWIP_RAW) */
+ {
+ struct pbuf *p = NULL;
+ /* This is not a listening netconn, since recvmbox is set */
+
+ buf = (struct netbuf *)memp_malloc(MEMP_NETBUF);
+ if (buf == NULL) {
+ return ERR_MEM;
+ }
+
+ err = netconn_recv_data(conn, (void **)&p);
+ if (err != ERR_OK) {
+ memp_free(MEMP_NETBUF, buf);
+ return err;
+ }
+ LWIP_ASSERT("p != NULL", p != NULL);
+
+ buf->p = p;
+ buf->ptr = p;
+ buf->port = 0;
+ ip_addr_set_zero(&buf->addr);
+ *new_buf = buf;
+ /* don't set conn->last_err: it's only ERR_OK, anyway */
+ return ERR_OK;
+ }
+#endif /* LWIP_TCP */
+#if LWIP_TCP && (LWIP_UDP || LWIP_RAW)
+ else
+#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */
+ {
+#if (LWIP_UDP || LWIP_RAW)
+ return netconn_recv_data(conn, (void **)new_buf);
+#endif /* (LWIP_UDP || LWIP_RAW) */
+ }
+}
+
+/**
+ * @ingroup netconn_udp
+ * Send data (in form of a netbuf) to a specific remote IP address and port.
+ * Only to be used for UDP and RAW netconns (not TCP).
+ *
+ * @param conn the netconn over which to send data
+ * @param buf a netbuf containing the data to send
+ * @param addr the remote IP address to which to send the data
+ * @param port the remote port to which to send the data
+ * @return ERR_OK if data was sent, any other err_t on error
+ */
+err_t
+netconn_sendto(struct netconn *conn, struct netbuf *buf, const ip_addr_t *addr, u16_t port)
+{
+ if (buf != NULL) {
+ ip_addr_set(&buf->addr, addr);
+ buf->port = port;
+ return netconn_send(conn, buf);
+ }
+ return ERR_VAL;
+}
+
+/**
+ * @ingroup netconn_udp
+ * Send data over a UDP or RAW netconn (that is already connected).
+ *
+ * @param conn the UDP or RAW netconn over which to send data
+ * @param buf a netbuf containing the data to send
+ * @return ERR_OK if data was sent, any other err_t on error
+ */
+err_t
+netconn_send(struct netconn *conn, struct netbuf *buf)
+{
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+
+ LWIP_ERROR("netconn_send: invalid conn", (conn != NULL), return ERR_ARG;);
+
+ LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_send: sending %"U16_F" bytes\n", buf->p->tot_len));
+
+ API_MSG_VAR_ALLOC(msg);
+ API_MSG_VAR_REF(msg).conn = conn;
+ API_MSG_VAR_REF(msg).msg.b = buf;
+ err = netconn_apimsg(lwip_netconn_do_send, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+}
+
+/**
+ * @ingroup netconn_tcp
+ * Send data over a TCP netconn.
+ *
+ * @param conn the TCP netconn over which to send data
+ * @param dataptr pointer to the application buffer that contains the data to send
+ * @param size size of the application data to send
+ * @param apiflags combination of following flags :
+ * - NETCONN_COPY: data will be copied into memory belonging to the stack
+ * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent
+ * - NETCONN_DONTBLOCK: only write the data if all data can be written at once
+ * @param bytes_written pointer to a location that receives the number of written bytes
+ * @return ERR_OK if data was sent, any other err_t on error
+ */
+err_t
+netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size,
+ u8_t apiflags, size_t *bytes_written)
+{
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+ u8_t dontblock;
+
+ LWIP_ERROR("netconn_write: invalid conn", (conn != NULL), return ERR_ARG;);
+ LWIP_ERROR("netconn_write: invalid conn->type", (NETCONNTYPE_GROUP(conn->type)== NETCONN_TCP), return ERR_VAL;);
+ if (size == 0) {
+ return ERR_OK;
+ }
+ dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK);
+#if LWIP_SO_SNDTIMEO
+ if (conn->send_timeout != 0) {
+ dontblock = 1;
+ }
+#endif /* LWIP_SO_SNDTIMEO */
+ if (dontblock && !bytes_written) {
+ /* This implies netconn_write() cannot be used for non-blocking send, since
+ it has no way to return the number of bytes written. */
+ return ERR_VAL;
+ }
+
+ API_MSG_VAR_ALLOC(msg);
+ /* non-blocking write sends as much */
+ API_MSG_VAR_REF(msg).conn = conn;
+ API_MSG_VAR_REF(msg).msg.w.dataptr = dataptr;
+ API_MSG_VAR_REF(msg).msg.w.apiflags = apiflags;
+ API_MSG_VAR_REF(msg).msg.w.len = size;
+#if LWIP_SO_SNDTIMEO
+ if (conn->send_timeout != 0) {
+ /* get the time we started, which is later compared to
+ sys_now() + conn->send_timeout */
+ API_MSG_VAR_REF(msg).msg.w.time_started = sys_now();
+ } else {
+ API_MSG_VAR_REF(msg).msg.w.time_started = 0;
+ }
+#endif /* LWIP_SO_SNDTIMEO */
+
+ /* For locking the core: this _can_ be delayed on low memory/low send buffer,
+ but if it is, this is done inside api_msg.c:do_write(), so we can use the
+ non-blocking version here. */
+ err = netconn_apimsg(lwip_netconn_do_write, &API_MSG_VAR_REF(msg));
+ if ((err == ERR_OK) && (bytes_written != NULL)) {
+ if (dontblock) {
+ /* nonblocking write: maybe the data has been sent partly */
+ *bytes_written = API_MSG_VAR_REF(msg).msg.w.len;
+ } else {
+ /* blocking call succeeded: all data has been sent if it */
+ *bytes_written = size;
+ }
+ }
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+}
+
+/**
+ * @ingroup netconn_tcp
+ * Close or shutdown a TCP netconn (doesn't delete it).
+ *
+ * @param conn the TCP netconn to close or shutdown
+ * @param how fully close or only shutdown one side?
+ * @return ERR_OK if the netconn was closed, any other err_t on error
+ */
+static err_t
+netconn_close_shutdown(struct netconn *conn, u8_t how)
+{
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+ LWIP_UNUSED_ARG(how);
+
+ LWIP_ERROR("netconn_close: invalid conn", (conn != NULL), return ERR_ARG;);
+
+ API_MSG_VAR_ALLOC(msg);
+ API_MSG_VAR_REF(msg).conn = conn;
+#if LWIP_TCP
+ /* shutting down both ends is the same as closing */
+ API_MSG_VAR_REF(msg).msg.sd.shut = how;
+#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER
+ /* get the time we started, which is later compared to
+ sys_now() + conn->send_timeout */
+ API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now();
+#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */
+ API_MSG_VAR_REF(msg).msg.sd.polls_left =
+ ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1;
+#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */
+#endif /* LWIP_TCP */
+ err = netconn_apimsg(lwip_netconn_do_close, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+}
+
+/**
+ * @ingroup netconn_tcp
+ * Close a TCP netconn (doesn't delete it).
+ *
+ * @param conn the TCP netconn to close
+ * @return ERR_OK if the netconn was closed, any other err_t on error
+ */
+err_t
+netconn_close(struct netconn *conn)
+{
+ /* shutting down both ends is the same as closing */
+ return netconn_close_shutdown(conn, NETCONN_SHUT_RDWR);
+}
+
+/**
+ * @ingroup netconn_tcp
+ * Shut down one or both sides of a TCP netconn (doesn't delete it).
+ *
+ * @param conn the TCP netconn to shut down
+ * @param shut_rx shut down the RX side (no more read possible after this)
+ * @param shut_tx shut down the TX side (no more write possible after this)
+ * @return ERR_OK if the netconn was closed, any other err_t on error
+ */
+err_t
+netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx)
+{
+ return netconn_close_shutdown(conn, (shut_rx ? NETCONN_SHUT_RD : 0) | (shut_tx ? NETCONN_SHUT_WR : 0));
+}
+
+#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD)
+/**
+ * @ingroup netconn_udp
+ * Join multicast groups for UDP netconns.
+ *
+ * @param conn the UDP netconn for which to change multicast addresses
+ * @param multiaddr IP address of the multicast group to join or leave
+ * @param netif_addr the IP address of the network interface on which to send
+ * the igmp message
+ * @param join_or_leave flag whether to send a join- or leave-message
+ * @return ERR_OK if the action was taken, any err_t on error
+ */
+err_t
+netconn_join_leave_group(struct netconn *conn,
+ const ip_addr_t *multiaddr,
+ const ip_addr_t *netif_addr,
+ enum netconn_igmp join_or_leave)
+{
+ API_MSG_VAR_DECLARE(msg);
+ err_t err;
+
+ LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;);
+
+ API_MSG_VAR_ALLOC(msg);
+
+#if LWIP_IPV4
+ /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */
+ if (multiaddr == NULL) {
+ multiaddr = IP4_ADDR_ANY;
+ }
+ if (netif_addr == NULL) {
+ netif_addr = IP4_ADDR_ANY;
+ }
+#endif /* LWIP_IPV4 */
+
+ API_MSG_VAR_REF(msg).conn = conn;
+ API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr);
+ API_MSG_VAR_REF(msg).msg.jl.netif_addr = API_MSG_VAR_REF(netif_addr);
+ API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave;
+ err = netconn_apimsg(lwip_netconn_do_join_leave_group, &API_MSG_VAR_REF(msg));
+ API_MSG_VAR_FREE(msg);
+
+ return err;
+}
+#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */
+
+#if LWIP_DNS
+/**
+ * @ingroup netconn_common
+ * Execute a DNS query, only one IP address is returned
+ *
+ * @param name a string representation of the DNS host name to query
+ * @param addr a preallocated ip_addr_t where to store the resolved IP address
+ * @param dns_addrtype IP address type (IPv4 / IPv6)
+ * @return ERR_OK: resolving succeeded
+ * ERR_MEM: memory error, try again later
+ * ERR_ARG: dns client not initialized or invalid hostname
+ * ERR_VAL: dns server response was invalid
+ */
+#if LWIP_IPV4 && LWIP_IPV6
+err_t
+netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype)
+#else
+err_t
+netconn_gethostbyname(const char *name, ip_addr_t *addr)
+#endif
+{
+ API_VAR_DECLARE(struct dns_api_msg, msg);
+#if !LWIP_MPU_COMPATIBLE
+ sys_sem_t sem;
+#endif /* LWIP_MPU_COMPATIBLE */
+ err_t err;
+ err_t cberr;
+
+ LWIP_ERROR("netconn_gethostbyname: invalid name", (name != NULL), return ERR_ARG;);
+ LWIP_ERROR("netconn_gethostbyname: invalid addr", (addr != NULL), return ERR_ARG;);
+#if LWIP_MPU_COMPATIBLE
+ if (strlen(name) >= DNS_MAX_NAME_LENGTH) {
+ return ERR_ARG;
+ }
+#endif
+
+ API_VAR_ALLOC(struct dns_api_msg, MEMP_DNS_API_MSG, msg, ERR_MEM);
+#if LWIP_MPU_COMPATIBLE
+ strncpy(API_VAR_REF(msg).name, name, DNS_MAX_NAME_LENGTH-1);
+ API_VAR_REF(msg).name[DNS_MAX_NAME_LENGTH-1] = 0;
+#else /* LWIP_MPU_COMPATIBLE */
+ msg.err = &err;
+ msg.sem = &sem;
+ API_VAR_REF(msg).addr = API_VAR_REF(addr);
+ API_VAR_REF(msg).name = name;
+#endif /* LWIP_MPU_COMPATIBLE */
+#if LWIP_IPV4 && LWIP_IPV6
+ API_VAR_REF(msg).dns_addrtype = dns_addrtype;
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+#if LWIP_NETCONN_SEM_PER_THREAD
+ API_VAR_REF(msg).sem = LWIP_NETCONN_THREAD_SEM_GET();
+#else /* LWIP_NETCONN_SEM_PER_THREAD*/
+ err = sys_sem_new(API_EXPR_REF(API_VAR_REF(msg).sem), 0);
+ if (err != ERR_OK) {
+ API_VAR_FREE(MEMP_DNS_API_MSG, msg);
+ return err;
+ }
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+
+ cberr = tcpip_callback(lwip_netconn_do_gethostbyname, &API_VAR_REF(msg));
+ if (cberr != ERR_OK) {
+#if !LWIP_NETCONN_SEM_PER_THREAD
+ sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem));
+#endif /* !LWIP_NETCONN_SEM_PER_THREAD */
+ API_VAR_FREE(MEMP_DNS_API_MSG, msg);
+ return cberr;
+ }
+ sys_sem_wait(API_EXPR_REF_SEM(API_VAR_REF(msg).sem));
+#if !LWIP_NETCONN_SEM_PER_THREAD
+ sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem));
+#endif /* !LWIP_NETCONN_SEM_PER_THREAD */
+
+#if LWIP_MPU_COMPATIBLE
+ *addr = msg->addr;
+ err = msg->err;
+#endif /* LWIP_MPU_COMPATIBLE */
+
+ API_VAR_FREE(MEMP_DNS_API_MSG, msg);
+ return err;
+}
+#endif /* LWIP_DNS*/
+
+#if LWIP_NETCONN_SEM_PER_THREAD
+void
+netconn_thread_init(void)
+{
+ sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET();
+ if ((sem == NULL) || !sys_sem_valid(sem)) {
+ /* call alloc only once */
+ LWIP_NETCONN_THREAD_SEM_ALLOC();
+ LWIP_ASSERT("LWIP_NETCONN_THREAD_SEM_ALLOC() failed", sys_sem_valid(LWIP_NETCONN_THREAD_SEM_GET()));
+ }
+}
+
+void
+netconn_thread_cleanup(void)
+{
+ sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET();
+ if ((sem != NULL) && sys_sem_valid(sem)) {
+ /* call free only once */
+ LWIP_NETCONN_THREAD_SEM_FREE();
+ }
+}
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+
+#endif /* LWIP_NETCONN */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/api_msg.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/api_msg.c
new file mode 100644
index 0000000..dd99c1e
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/api_msg.c
@@ -0,0 +1,1947 @@
+/**
+ * @file
+ * Sequential API Internal module
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/priv/api_msg.h"
+
+#include "lwip/ip.h"
+#include "lwip/ip_addr.h"
+#include "lwip/udp.h"
+#include "lwip/tcp.h"
+#include "lwip/raw.h"
+
+#include "lwip/memp.h"
+#include "lwip/igmp.h"
+#include "lwip/dns.h"
+#include "lwip/mld6.h"
+#include "lwip/priv/tcpip_priv.h"
+
+#include <string.h>
+
+/* netconns are polled once per second (e.g. continue write on memory error) */
+#define NETCONN_TCP_POLL_INTERVAL 2
+
+#define SET_NONBLOCKING_CONNECT(conn, val) do { if (val) { \
+ (conn)->flags |= NETCONN_FLAG_IN_NONBLOCKING_CONNECT; \
+} else { \
+ (conn)->flags &= ~ NETCONN_FLAG_IN_NONBLOCKING_CONNECT; }} while(0)
+#define IN_NONBLOCKING_CONNECT(conn) (((conn)->flags & NETCONN_FLAG_IN_NONBLOCKING_CONNECT) != 0)
+
+/* forward declarations */
+#if LWIP_TCP
+#if LWIP_TCPIP_CORE_LOCKING
+#define WRITE_DELAYED , 1
+#define WRITE_DELAYED_PARAM , u8_t delayed
+#else /* LWIP_TCPIP_CORE_LOCKING */
+#define WRITE_DELAYED
+#define WRITE_DELAYED_PARAM
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+static err_t lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM);
+static err_t lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM);
+#endif
+
+#if LWIP_TCPIP_CORE_LOCKING
+#define TCPIP_APIMSG_ACK(m) NETCONN_SET_SAFE_ERR((m)->conn, (m)->err)
+#else /* LWIP_TCPIP_CORE_LOCKING */
+#define TCPIP_APIMSG_ACK(m) do { NETCONN_SET_SAFE_ERR((m)->conn, (m)->err); sys_sem_signal(LWIP_API_MSG_SEM(m)); } while(0)
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+
+#if LWIP_TCP
+u8_t netconn_aborted;
+#endif /* LWIP_TCP */
+
+#if LWIP_RAW
+/**
+ * Receive callback function for RAW netconns.
+ * Doesn't 'eat' the packet, only copies it and sends it to
+ * conn->recvmbox
+ *
+ * @see raw.h (struct raw_pcb.recv) for parameters and return value
+ */
+static u8_t
+recv_raw(void *arg, struct raw_pcb *pcb, struct pbuf *p,
+ const ip_addr_t *addr)
+{
+ struct pbuf *q;
+ struct netbuf *buf;
+ struct netconn *conn;
+
+ LWIP_UNUSED_ARG(addr);
+ conn = (struct netconn *)arg;
+
+ if ((conn != NULL) && sys_mbox_valid(&conn->recvmbox)) {
+#if LWIP_SO_RCVBUF
+ int recv_avail;
+ SYS_ARCH_GET(conn->recv_avail, recv_avail);
+ if ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize) {
+ return 0;
+ }
+#endif /* LWIP_SO_RCVBUF */
+ /* copy the whole packet into new pbufs */
+ q = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
+ if (q != NULL) {
+ if (pbuf_copy(q, p) != ERR_OK) {
+ pbuf_free(q);
+ q = NULL;
+ }
+ }
+
+ if (q != NULL) {
+ u16_t len;
+ buf = (struct netbuf *)memp_malloc(MEMP_NETBUF);
+ if (buf == NULL) {
+ pbuf_free(q);
+ return 0;
+ }
+
+ buf->p = q;
+ buf->ptr = q;
+ ip_addr_copy(buf->addr, *ip_current_src_addr());
+ buf->port = pcb->protocol;
+
+ len = q->tot_len;
+ if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) {
+ netbuf_delete(buf);
+ return 0;
+ } else {
+#if LWIP_SO_RCVBUF
+ SYS_ARCH_INC(conn->recv_avail, len);
+#endif /* LWIP_SO_RCVBUF */
+ /* Register event with callback */
+ API_EVENT(conn, NETCONN_EVT_RCVPLUS, len);
+ }
+ }
+ }
+
+ return 0; /* do not eat the packet */
+}
+#endif /* LWIP_RAW*/
+
+#if LWIP_UDP
+/**
+ * Receive callback function for UDP netconns.
+ * Posts the packet to conn->recvmbox or deletes it on memory error.
+ *
+ * @see udp.h (struct udp_pcb.recv) for parameters
+ */
+static void
+recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p,
+ const ip_addr_t *addr, u16_t port)
+{
+ struct netbuf *buf;
+ struct netconn *conn;
+ u16_t len;
+#if LWIP_SO_RCVBUF
+ int recv_avail;
+#endif /* LWIP_SO_RCVBUF */
+
+ LWIP_UNUSED_ARG(pcb); /* only used for asserts... */
+ LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL);
+ LWIP_ASSERT("recv_udp must have an argument", arg != NULL);
+ conn = (struct netconn *)arg;
+ LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb);
+
+#if LWIP_SO_RCVBUF
+ SYS_ARCH_GET(conn->recv_avail, recv_avail);
+ if ((conn == NULL) || !sys_mbox_valid(&conn->recvmbox) ||
+ ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) {
+#else /* LWIP_SO_RCVBUF */
+ if ((conn == NULL) || !sys_mbox_valid(&conn->recvmbox)) {
+#endif /* LWIP_SO_RCVBUF */
+ pbuf_free(p);
+ return;
+ }
+
+ buf = (struct netbuf *)memp_malloc(MEMP_NETBUF);
+ if (buf == NULL) {
+ pbuf_free(p);
+ return;
+ } else {
+ buf->p = p;
+ buf->ptr = p;
+ ip_addr_set(&buf->addr, addr);
+ buf->port = port;
+#if LWIP_NETBUF_RECVINFO
+ {
+ /* get the UDP header - always in the first pbuf, ensured by udp_input */
+ const struct udp_hdr* udphdr = (const struct udp_hdr*)ip_next_header_ptr();
+#if LWIP_CHECKSUM_ON_COPY
+ buf->flags = NETBUF_FLAG_DESTADDR;
+#endif /* LWIP_CHECKSUM_ON_COPY */
+ ip_addr_set(&buf->toaddr, ip_current_dest_addr());
+ buf->toport_chksum = udphdr->dest;
+ }
+#endif /* LWIP_NETBUF_RECVINFO */
+ }
+
+ len = p->tot_len;
+ if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) {
+ netbuf_delete(buf);
+ return;
+ } else {
+#if LWIP_SO_RCVBUF
+ SYS_ARCH_INC(conn->recv_avail, len);
+#endif /* LWIP_SO_RCVBUF */
+ /* Register event with callback */
+ API_EVENT(conn, NETCONN_EVT_RCVPLUS, len);
+ }
+}
+#endif /* LWIP_UDP */
+
+#if LWIP_TCP
+/**
+ * Receive callback function for TCP netconns.
+ * Posts the packet to conn->recvmbox, but doesn't delete it on errors.
+ *
+ * @see tcp.h (struct tcp_pcb.recv) for parameters and return value
+ */
+static err_t
+recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err)
+{
+ struct netconn *conn;
+ u16_t len;
+
+ LWIP_UNUSED_ARG(pcb);
+ LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL);
+ LWIP_ASSERT("recv_tcp must have an argument", arg != NULL);
+ conn = (struct netconn *)arg;
+
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+ LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb);
+
+ if (!sys_mbox_valid(&conn->recvmbox)) {
+ /* recvmbox already deleted */
+ if (p != NULL) {
+ tcp_recved(pcb, p->tot_len);
+ pbuf_free(p);
+ }
+ return ERR_OK;
+ }
+ /* Unlike for UDP or RAW pcbs, don't check for available space
+ using recv_avail since that could break the connection
+ (data is already ACKed) */
+
+ /* don't overwrite fatal errors! */
+ if (err != ERR_OK) {
+ NETCONN_SET_SAFE_ERR(conn, err);
+ }
+
+ if (p != NULL) {
+ len = p->tot_len;
+ } else {
+ len = 0;
+ }
+
+ if (sys_mbox_trypost(&conn->recvmbox, p) != ERR_OK) {
+ /* don't deallocate p: it is presented to us later again from tcp_fasttmr! */
+ return ERR_MEM;
+ } else {
+#if LWIP_SO_RCVBUF
+ SYS_ARCH_INC(conn->recv_avail, len);
+#endif /* LWIP_SO_RCVBUF */
+ /* Register event with callback */
+ API_EVENT(conn, NETCONN_EVT_RCVPLUS, len);
+ }
+
+ return ERR_OK;
+}
+
+/**
+ * Poll callback function for TCP netconns.
+ * Wakes up an application thread that waits for a connection to close
+ * or data to be sent. The application thread then takes the
+ * appropriate action to go on.
+ *
+ * Signals the conn->sem.
+ * netconn_close waits for conn->sem if closing failed.
+ *
+ * @see tcp.h (struct tcp_pcb.poll) for parameters and return value
+ */
+static err_t
+poll_tcp(void *arg, struct tcp_pcb *pcb)
+{
+ struct netconn *conn = (struct netconn *)arg;
+
+ LWIP_UNUSED_ARG(pcb);
+ LWIP_ASSERT("conn != NULL", (conn != NULL));
+
+ if (conn->state == NETCONN_WRITE) {
+ lwip_netconn_do_writemore(conn WRITE_DELAYED);
+ } else if (conn->state == NETCONN_CLOSE) {
+#if !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER
+ if (conn->current_msg && conn->current_msg->msg.sd.polls_left) {
+ conn->current_msg->msg.sd.polls_left--;
+ }
+#endif /* !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER */
+ lwip_netconn_do_close_internal(conn WRITE_DELAYED);
+ }
+ /* @todo: implement connect timeout here? */
+
+ /* Did a nonblocking write fail before? Then check available write-space. */
+ if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) {
+ /* If the queued byte- or pbuf-count drops below the configured low-water limit,
+ let select mark this pcb as writable again. */
+ if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) &&
+ (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) {
+ conn->flags &= ~NETCONN_FLAG_CHECK_WRITESPACE;
+ API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0);
+ }
+ }
+
+ return ERR_OK;
+}
+
+/**
+ * Sent callback function for TCP netconns.
+ * Signals the conn->sem and calls API_EVENT.
+ * netconn_write waits for conn->sem if send buffer is low.
+ *
+ * @see tcp.h (struct tcp_pcb.sent) for parameters and return value
+ */
+static err_t
+sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len)
+{
+ struct netconn *conn = (struct netconn *)arg;
+
+ LWIP_UNUSED_ARG(pcb);
+ LWIP_ASSERT("conn != NULL", (conn != NULL));
+
+ if (conn) {
+ if (conn->state == NETCONN_WRITE) {
+ lwip_netconn_do_writemore(conn WRITE_DELAYED);
+ } else if (conn->state == NETCONN_CLOSE) {
+ lwip_netconn_do_close_internal(conn WRITE_DELAYED);
+ }
+
+ /* If the queued byte- or pbuf-count drops below the configured low-water limit,
+ let select mark this pcb as writable again. */
+ if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) &&
+ (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) {
+ conn->flags &= ~NETCONN_FLAG_CHECK_WRITESPACE;
+ API_EVENT(conn, NETCONN_EVT_SENDPLUS, len);
+ }
+ }
+
+ return ERR_OK;
+}
+
+/**
+ * Error callback function for TCP netconns.
+ * Signals conn->sem, posts to all conn mboxes and calls API_EVENT.
+ * The application thread has then to decide what to do.
+ *
+ * @see tcp.h (struct tcp_pcb.err) for parameters
+ */
+static void
+err_tcp(void *arg, err_t err)
+{
+ struct netconn *conn;
+ enum netconn_state old_state;
+
+ conn = (struct netconn *)arg;
+ LWIP_ASSERT("conn != NULL", (conn != NULL));
+
+ conn->pcb.tcp = NULL;
+
+ /* reset conn->state now before waking up other threads */
+ old_state = conn->state;
+ conn->state = NETCONN_NONE;
+
+ if (old_state == NETCONN_CLOSE) {
+ /* RST during close: let close return success & dealloc the netconn */
+ err = ERR_OK;
+ NETCONN_SET_SAFE_ERR(conn, ERR_OK);
+ } else {
+ /* no check since this is always fatal! */
+ SYS_ARCH_SET(conn->last_err, err);
+ }
+
+ /* @todo: the type of NETCONN_EVT created should depend on 'old_state' */
+
+ /* Notify the user layer about a connection error. Used to signal select. */
+ API_EVENT(conn, NETCONN_EVT_ERROR, 0);
+ /* Try to release selects pending on 'read' or 'write', too.
+ They will get an error if they actually try to read or write. */
+ API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
+ API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0);
+
+ /* pass NULL-message to recvmbox to wake up pending recv */
+ if (sys_mbox_valid(&conn->recvmbox)) {
+ /* use trypost to prevent deadlock */
+ sys_mbox_trypost(&conn->recvmbox, NULL);
+ }
+ /* pass NULL-message to acceptmbox to wake up pending accept */
+ if (sys_mbox_valid(&conn->acceptmbox)) {
+ /* use trypost to preven deadlock */
+ sys_mbox_trypost(&conn->acceptmbox, NULL);
+ }
+
+ if ((old_state == NETCONN_WRITE) || (old_state == NETCONN_CLOSE) ||
+ (old_state == NETCONN_CONNECT)) {
+ /* calling lwip_netconn_do_writemore/lwip_netconn_do_close_internal is not necessary
+ since the pcb has already been deleted! */
+ int was_nonblocking_connect = IN_NONBLOCKING_CONNECT(conn);
+ SET_NONBLOCKING_CONNECT(conn, 0);
+
+ if (!was_nonblocking_connect) {
+ sys_sem_t* op_completed_sem;
+ /* set error return code */
+ LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL);
+ conn->current_msg->err = err;
+ op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg);
+ LWIP_ASSERT("inavlid op_completed_sem", sys_sem_valid(op_completed_sem));
+ conn->current_msg = NULL;
+ /* wake up the waiting task */
+ NETCONN_SET_SAFE_ERR(conn, err);
+ sys_sem_signal(op_completed_sem);
+ }
+ } else {
+ LWIP_ASSERT("conn->current_msg == NULL", conn->current_msg == NULL);
+ }
+}
+
+/**
+ * Setup a tcp_pcb with the correct callback function pointers
+ * and their arguments.
+ *
+ * @param conn the TCP netconn to setup
+ */
+static void
+setup_tcp(struct netconn *conn)
+{
+ struct tcp_pcb *pcb;
+
+ pcb = conn->pcb.tcp;
+ tcp_arg(pcb, conn);
+ tcp_recv(pcb, recv_tcp);
+ tcp_sent(pcb, sent_tcp);
+ tcp_poll(pcb, poll_tcp, NETCONN_TCP_POLL_INTERVAL);
+ tcp_err(pcb, err_tcp);
+}
+
+/**
+ * Accept callback function for TCP netconns.
+ * Allocates a new netconn and posts that to conn->acceptmbox.
+ *
+ * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value
+ */
+static err_t
+accept_function(void *arg, struct tcp_pcb *newpcb, err_t err)
+{
+ struct netconn *newconn;
+ struct netconn *conn = (struct netconn *)arg;
+
+ LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: newpcb->tate: %s\n", tcp_debug_state_str(newpcb->state)));
+
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+ if (!sys_mbox_valid(&conn->acceptmbox)) {
+ LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: acceptmbox already deleted\n"));
+ return ERR_VAL;
+ }
+
+ if (newpcb == NULL) {
+ /* out-of-pcbs during connect: pass on this error to the application */
+ if (sys_mbox_trypost(&conn->acceptmbox, &netconn_aborted) == ERR_OK) {
+ /* Register event with callback */
+ API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
+ }
+ return ERR_VAL;
+ }
+
+ /* We have to set the callback here even though
+ * the new socket is unknown. newconn->socket is marked as -1. */
+ newconn = netconn_alloc(conn->type, conn->callback);
+ if (newconn == NULL) {
+ /* outof netconns: pass on this error to the application */
+ if (sys_mbox_trypost(&conn->acceptmbox, &netconn_aborted) == ERR_OK) {
+ /* Register event with callback */
+ API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
+ }
+ return ERR_MEM;
+ }
+ newconn->pcb.tcp = newpcb;
+ setup_tcp(newconn);
+ /* no protection: when creating the pcb, the netconn is not yet known
+ to the application thread */
+ newconn->last_err = err;
+
+ /* handle backlog counter */
+ tcp_backlog_delayed(newpcb);
+
+ if (sys_mbox_trypost(&conn->acceptmbox, newconn) != ERR_OK) {
+ /* When returning != ERR_OK, the pcb is aborted in tcp_process(),
+ so do nothing here! */
+ /* remove all references to this netconn from the pcb */
+ struct tcp_pcb* pcb = newconn->pcb.tcp;
+ tcp_arg(pcb, NULL);
+ tcp_recv(pcb, NULL);
+ tcp_sent(pcb, NULL);
+ tcp_poll(pcb, NULL, 0);
+ tcp_err(pcb, NULL);
+ /* remove reference from to the pcb from this netconn */
+ newconn->pcb.tcp = NULL;
+ /* no need to drain since we know the recvmbox is empty. */
+ sys_mbox_free(&newconn->recvmbox);
+ sys_mbox_set_invalid(&newconn->recvmbox);
+ netconn_free(newconn);
+ return ERR_MEM;
+ } else {
+ /* Register event with callback */
+ API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
+ }
+
+ return ERR_OK;
+}
+#endif /* LWIP_TCP */
+
+/**
+ * Create a new pcb of a specific type.
+ * Called from lwip_netconn_do_newconn().
+ *
+ * @param msg the api_msg_msg describing the connection type
+ */
+static void
+pcb_new(struct api_msg *msg)
+{
+ enum lwip_ip_addr_type iptype = IPADDR_TYPE_V4;
+
+ LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL);
+
+#if LWIP_IPV6 && LWIP_IPV4
+ /* IPv6: Dual-stack by default, unless netconn_set_ipv6only() is called */
+ if(NETCONNTYPE_ISIPV6(netconn_type(msg->conn))) {
+ iptype = IPADDR_TYPE_ANY;
+ }
+#endif
+
+ /* Allocate a PCB for this connection */
+ switch(NETCONNTYPE_GROUP(msg->conn->type)) {
+#if LWIP_RAW
+ case NETCONN_RAW:
+ msg->conn->pcb.raw = raw_new_ip_type(iptype, msg->msg.n.proto);
+ if (msg->conn->pcb.raw != NULL) {
+#if LWIP_IPV6
+ /* ICMPv6 packets should always have checksum calculated by the stack as per RFC 3542 chapter 3.1 */
+ if (NETCONNTYPE_ISIPV6(msg->conn->type) && msg->conn->pcb.raw->protocol == IP6_NEXTH_ICMP6) {
+ msg->conn->pcb.raw->chksum_reqd = 1;
+ msg->conn->pcb.raw->chksum_offset = 2;
+ }
+#endif /* LWIP_IPV6 */
+ raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn);
+ }
+ break;
+#endif /* LWIP_RAW */
+#if LWIP_UDP
+ case NETCONN_UDP:
+ msg->conn->pcb.udp = udp_new_ip_type(iptype);
+ if (msg->conn->pcb.udp != NULL) {
+#if LWIP_UDPLITE
+ if (NETCONNTYPE_ISUDPLITE(msg->conn->type)) {
+ udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE);
+ }
+#endif /* LWIP_UDPLITE */
+ if (NETCONNTYPE_ISUDPNOCHKSUM(msg->conn->type)) {
+ udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
+ }
+ udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn);
+ }
+ break;
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ case NETCONN_TCP:
+ msg->conn->pcb.tcp = tcp_new_ip_type(iptype);
+ if (msg->conn->pcb.tcp != NULL) {
+ setup_tcp(msg->conn);
+ }
+ break;
+#endif /* LWIP_TCP */
+ default:
+ /* Unsupported netconn type, e.g. protocol disabled */
+ msg->err = ERR_VAL;
+ return;
+ }
+ if (msg->conn->pcb.ip == NULL) {
+ msg->err = ERR_MEM;
+ }
+}
+
+/**
+ * Create a new pcb of a specific type inside a netconn.
+ * Called from netconn_new_with_proto_and_callback.
+ *
+ * @param m the api_msg_msg describing the connection type
+ */
+void
+lwip_netconn_do_newconn(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ msg->err = ERR_OK;
+ if (msg->conn->pcb.tcp == NULL) {
+ pcb_new(msg);
+ }
+ /* Else? This "new" connection already has a PCB allocated. */
+ /* Is this an error condition? Should it be deleted? */
+ /* We currently just are happy and return. */
+
+ TCPIP_APIMSG_ACK(msg);
+}
+
+/**
+ * Create a new netconn (of a specific type) that has a callback function.
+ * The corresponding pcb is NOT created!
+ *
+ * @param t the type of 'connection' to create (@see enum netconn_type)
+ * @param callback a function to call on status changes (RX available, TX'ed)
+ * @return a newly allocated struct netconn or
+ * NULL on memory error
+ */
+struct netconn*
+netconn_alloc(enum netconn_type t, netconn_callback callback)
+{
+ struct netconn *conn;
+ int size;
+
+ conn = (struct netconn *)memp_malloc(MEMP_NETCONN);
+ if (conn == NULL) {
+ return NULL;
+ }
+
+ conn->last_err = ERR_OK;
+ conn->type = t;
+ conn->pcb.tcp = NULL;
+
+ /* If all sizes are the same, every compiler should optimize this switch to nothing */
+ switch(NETCONNTYPE_GROUP(t)) {
+#if LWIP_RAW
+ case NETCONN_RAW:
+ size = DEFAULT_RAW_RECVMBOX_SIZE;
+ break;
+#endif /* LWIP_RAW */
+#if LWIP_UDP
+ case NETCONN_UDP:
+ size = DEFAULT_UDP_RECVMBOX_SIZE;
+ break;
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ case NETCONN_TCP:
+ size = DEFAULT_TCP_RECVMBOX_SIZE;
+ break;
+#endif /* LWIP_TCP */
+ default:
+ LWIP_ASSERT("netconn_alloc: undefined netconn_type", 0);
+ goto free_and_return;
+ }
+
+ if (sys_mbox_new(&conn->recvmbox, size) != ERR_OK) {
+ goto free_and_return;
+ }
+#if !LWIP_NETCONN_SEM_PER_THREAD
+ if (sys_sem_new(&conn->op_completed, 0) != ERR_OK) {
+ sys_mbox_free(&conn->recvmbox);
+ goto free_and_return;
+ }
+#endif
+
+#if LWIP_TCP
+ sys_mbox_set_invalid(&conn->acceptmbox);
+#endif
+ conn->state = NETCONN_NONE;
+#if LWIP_SOCKET
+ /* initialize socket to -1 since 0 is a valid socket */
+ conn->socket = -1;
+#endif /* LWIP_SOCKET */
+ conn->callback = callback;
+#if LWIP_TCP
+ conn->current_msg = NULL;
+ conn->write_offset = 0;
+#endif /* LWIP_TCP */
+#if LWIP_SO_SNDTIMEO
+ conn->send_timeout = 0;
+#endif /* LWIP_SO_SNDTIMEO */
+#if LWIP_SO_RCVTIMEO
+ conn->recv_timeout = 0;
+#endif /* LWIP_SO_RCVTIMEO */
+#if LWIP_SO_RCVBUF
+ conn->recv_bufsize = RECV_BUFSIZE_DEFAULT;
+ conn->recv_avail = 0;
+#endif /* LWIP_SO_RCVBUF */
+#if LWIP_SO_LINGER
+ conn->linger = -1;
+#endif /* LWIP_SO_LINGER */
+ conn->flags = 0;
+ return conn;
+free_and_return:
+ memp_free(MEMP_NETCONN, conn);
+ return NULL;
+}
+
+/**
+ * Delete a netconn and all its resources.
+ * The pcb is NOT freed (since we might not be in the right thread context do this).
+ *
+ * @param conn the netconn to free
+ */
+void
+netconn_free(struct netconn *conn)
+{
+ LWIP_ASSERT("PCB must be deallocated outside this function", conn->pcb.tcp == NULL);
+ LWIP_ASSERT("recvmbox must be deallocated before calling this function",
+ !sys_mbox_valid(&conn->recvmbox));
+#if LWIP_TCP
+ LWIP_ASSERT("acceptmbox must be deallocated before calling this function",
+ !sys_mbox_valid(&conn->acceptmbox));
+#endif /* LWIP_TCP */
+
+#if !LWIP_NETCONN_SEM_PER_THREAD
+ sys_sem_free(&conn->op_completed);
+ sys_sem_set_invalid(&conn->op_completed);
+#endif
+
+ memp_free(MEMP_NETCONN, conn);
+}
+
+/**
+ * Delete rcvmbox and acceptmbox of a netconn and free the left-over data in
+ * these mboxes
+ *
+ * @param conn the netconn to free
+ * @bytes_drained bytes drained from recvmbox
+ * @accepts_drained pending connections drained from acceptmbox
+ */
+static void
+netconn_drain(struct netconn *conn)
+{
+ void *mem;
+#if LWIP_TCP
+ struct pbuf *p;
+#endif /* LWIP_TCP */
+
+ /* This runs in tcpip_thread, so we don't need to lock against rx packets */
+
+ /* Delete and drain the recvmbox. */
+ if (sys_mbox_valid(&conn->recvmbox)) {
+ while (sys_mbox_tryfetch(&conn->recvmbox, &mem) != SYS_MBOX_EMPTY) {
+#if LWIP_TCP
+ if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) {
+ if (mem != NULL) {
+ p = (struct pbuf*)mem;
+ /* pcb might be set to NULL already by err_tcp() */
+ if (conn->pcb.tcp != NULL) {
+ tcp_recved(conn->pcb.tcp, p->tot_len);
+ }
+ pbuf_free(p);
+ }
+ } else
+#endif /* LWIP_TCP */
+ {
+ netbuf_delete((struct netbuf *)mem);
+ }
+ }
+ sys_mbox_free(&conn->recvmbox);
+ sys_mbox_set_invalid(&conn->recvmbox);
+ }
+
+ /* Delete and drain the acceptmbox. */
+#if LWIP_TCP
+ if (sys_mbox_valid(&conn->acceptmbox)) {
+ while (sys_mbox_tryfetch(&conn->acceptmbox, &mem) != SYS_MBOX_EMPTY) {
+ if (mem != &netconn_aborted) {
+ struct netconn *newconn = (struct netconn *)mem;
+ /* Only tcp pcbs have an acceptmbox, so no need to check conn->type */
+ /* pcb might be set to NULL already by err_tcp() */
+ /* drain recvmbox */
+ netconn_drain(newconn);
+ if (newconn->pcb.tcp != NULL) {
+ tcp_abort(newconn->pcb.tcp);
+ newconn->pcb.tcp = NULL;
+ }
+ netconn_free(newconn);
+ }
+ }
+ sys_mbox_free(&conn->acceptmbox);
+ sys_mbox_set_invalid(&conn->acceptmbox);
+ }
+#endif /* LWIP_TCP */
+}
+
+#if LWIP_TCP
+/**
+ * Internal helper function to close a TCP netconn: since this sometimes
+ * doesn't work at the first attempt, this function is called from multiple
+ * places.
+ *
+ * @param conn the TCP netconn to close
+ */
+static err_t
+lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM)
+{
+ err_t err;
+ u8_t shut, shut_rx, shut_tx, close;
+ u8_t close_finished = 0;
+ struct tcp_pcb* tpcb;
+#if LWIP_SO_LINGER
+ u8_t linger_wait_required = 0;
+#endif /* LWIP_SO_LINGER */
+
+ LWIP_ASSERT("invalid conn", (conn != NULL));
+ LWIP_ASSERT("this is for tcp netconns only", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP));
+ LWIP_ASSERT("conn must be in state NETCONN_CLOSE", (conn->state == NETCONN_CLOSE));
+ LWIP_ASSERT("pcb already closed", (conn->pcb.tcp != NULL));
+ LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL);
+
+ tpcb = conn->pcb.tcp;
+ shut = conn->current_msg->msg.sd.shut;
+ shut_rx = shut & NETCONN_SHUT_RD;
+ shut_tx = shut & NETCONN_SHUT_WR;
+ /* shutting down both ends is the same as closing
+ (also if RD or WR side was shut down before already) */
+ if (shut == NETCONN_SHUT_RDWR) {
+ close = 1;
+ } else if (shut_rx &&
+ ((tpcb->state == FIN_WAIT_1) ||
+ (tpcb->state == FIN_WAIT_2) ||
+ (tpcb->state == CLOSING))) {
+ close = 1;
+ } else if (shut_tx && ((tpcb->flags & TF_RXCLOSED) != 0)) {
+ close = 1;
+ } else {
+ close = 0;
+ }
+
+ /* Set back some callback pointers */
+ if (close) {
+ tcp_arg(tpcb, NULL);
+ }
+ if (tpcb->state == LISTEN) {
+ tcp_accept(tpcb, NULL);
+ } else {
+ /* some callbacks have to be reset if tcp_close is not successful */
+ if (shut_rx) {
+ tcp_recv(tpcb, NULL);
+ tcp_accept(tpcb, NULL);
+ }
+ if (shut_tx) {
+ tcp_sent(tpcb, NULL);
+ }
+ if (close) {
+ tcp_poll(tpcb, NULL, 0);
+ tcp_err(tpcb, NULL);
+ }
+ }
+ /* Try to close the connection */
+ if (close) {
+#if LWIP_SO_LINGER
+ /* check linger possibilites before calling tcp_close */
+ err = ERR_OK;
+ /* linger enabled/required at all? (i.e. is there untransmitted data left?) */
+ if ((conn->linger >= 0) && (conn->pcb.tcp->unsent || conn->pcb.tcp->unacked)) {
+ if ((conn->linger == 0)) {
+ /* data left but linger prevents waiting */
+ tcp_abort(tpcb);
+ tpcb = NULL;
+ } else if (conn->linger > 0) {
+ /* data left and linger says we should wait */
+ if (netconn_is_nonblocking(conn)) {
+ /* data left on a nonblocking netconn -> cannot linger */
+ err = ERR_WOULDBLOCK;
+ } else if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >=
+ (conn->linger * 1000)) {
+ /* data left but linger timeout has expired (this happens on further
+ calls to this function through poll_tcp */
+ tcp_abort(tpcb);
+ tpcb = NULL;
+ } else {
+ /* data left -> need to wait for ACK after successful close */
+ linger_wait_required = 1;
+ }
+ }
+ }
+ if ((err == ERR_OK) && (tpcb != NULL))
+#endif /* LWIP_SO_LINGER */
+ {
+ err = tcp_close(tpcb);
+ }
+ } else {
+ err = tcp_shutdown(tpcb, shut_rx, shut_tx);
+ }
+ if (err == ERR_OK) {
+ close_finished = 1;
+#if LWIP_SO_LINGER
+ if (linger_wait_required) {
+ /* wait for ACK of all unsent/unacked data by just getting called again */
+ close_finished = 0;
+ err = ERR_INPROGRESS;
+ }
+#endif /* LWIP_SO_LINGER */
+ } else {
+ if (err == ERR_MEM) {
+ /* Closing failed because of memory shortage, try again later. Even for
+ nonblocking netconns, we have to wait since no standard socket application
+ is prepared for close failing because of resource shortage.
+ Check the timeout: this is kind of an lwip addition to the standard sockets:
+ we wait for some time when failing to allocate a segment for the FIN */
+#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER
+ s32_t close_timeout = LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT;
+#if LWIP_SO_SNDTIMEO
+ if (conn->send_timeout > 0) {
+ close_timeout = conn->send_timeout;
+ }
+#endif /* LWIP_SO_SNDTIMEO */
+#if LWIP_SO_LINGER
+ if (conn->linger >= 0) {
+ /* use linger timeout (seconds) */
+ close_timeout = conn->linger * 1000U;
+ }
+#endif
+ if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= close_timeout) {
+#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */
+ if (conn->current_msg->msg.sd.polls_left == 0) {
+#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */
+ close_finished = 1;
+ if (close) {
+ /* in this case, we want to RST the connection */
+ tcp_abort(tpcb);
+ err = ERR_OK;
+ }
+ }
+ } else {
+ /* Closing failed for a non-memory error: give up */
+ close_finished = 1;
+ }
+ }
+ if (close_finished) {
+ /* Closing done (succeeded, non-memory error, nonblocking error or timeout) */
+ sys_sem_t* op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg);
+ conn->current_msg->err = err;
+ conn->current_msg = NULL;
+ conn->state = NETCONN_NONE;
+ if (err == ERR_OK) {
+ if (close) {
+ /* Set back some callback pointers as conn is going away */
+ conn->pcb.tcp = NULL;
+ /* Trigger select() in socket layer. Make sure everybody notices activity
+ on the connection, error first! */
+ API_EVENT(conn, NETCONN_EVT_ERROR, 0);
+ }
+ if (shut_rx) {
+ API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
+ }
+ if (shut_tx) {
+ API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0);
+ }
+ }
+ NETCONN_SET_SAFE_ERR(conn, err);
+#if LWIP_TCPIP_CORE_LOCKING
+ if (delayed)
+#endif
+ {
+ /* wake up the application task */
+ sys_sem_signal(op_completed_sem);
+ }
+ return ERR_OK;
+ }
+ if (!close_finished) {
+ /* Closing failed and we want to wait: restore some of the callbacks */
+ /* Closing of listen pcb will never fail! */
+ LWIP_ASSERT("Closing a listen pcb may not fail!", (tpcb->state != LISTEN));
+ if (shut_tx) {
+ tcp_sent(tpcb, sent_tcp);
+ }
+ /* when waiting for close, set up poll interval to 500ms */
+ tcp_poll(tpcb, poll_tcp, 1);
+ tcp_err(tpcb, err_tcp);
+ tcp_arg(tpcb, conn);
+ /* don't restore recv callback: we don't want to receive any more data */
+ }
+ /* If closing didn't succeed, we get called again either
+ from poll_tcp or from sent_tcp */
+ LWIP_ASSERT("err != ERR_OK", err != ERR_OK);
+ return err;
+}
+#endif /* LWIP_TCP */
+
+/**
+ * Delete the pcb inside a netconn.
+ * Called from netconn_delete.
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_delconn(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ enum netconn_state state = msg->conn->state;
+ LWIP_ASSERT("netconn state error", /* this only happens for TCP netconns */
+ (state == NETCONN_NONE) || (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP));
+#if LWIP_NETCONN_FULLDUPLEX
+ /* In full duplex mode, blocking write/connect is aborted with ERR_CLSD */
+ if (state != NETCONN_NONE) {
+ if ((state == NETCONN_WRITE) ||
+ ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) {
+ /* close requested, abort running write/connect */
+ sys_sem_t* op_completed_sem;
+ LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL);
+ op_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg);
+ msg->conn->current_msg->err = ERR_CLSD;
+ msg->conn->current_msg = NULL;
+ msg->conn->write_offset = 0;
+ msg->conn->state = NETCONN_NONE;
+ NETCONN_SET_SAFE_ERR(msg->conn, ERR_CLSD);
+ sys_sem_signal(op_completed_sem);
+ }
+ }
+#else /* LWIP_NETCONN_FULLDUPLEX */
+ if (((state != NETCONN_NONE) &&
+ (state != NETCONN_LISTEN) &&
+ (state != NETCONN_CONNECT)) ||
+ ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) {
+ /* This means either a blocking write or blocking connect is running
+ (nonblocking write returns and sets state to NONE) */
+ msg->err = ERR_INPROGRESS;
+ } else
+#endif /* LWIP_NETCONN_FULLDUPLEX */
+ {
+ LWIP_ASSERT("blocking connect in progress",
+ (state != NETCONN_CONNECT) || IN_NONBLOCKING_CONNECT(msg->conn));
+ msg->err = ERR_OK;
+ /* Drain and delete mboxes */
+ netconn_drain(msg->conn);
+
+ if (msg->conn->pcb.tcp != NULL) {
+
+ switch (NETCONNTYPE_GROUP(msg->conn->type)) {
+#if LWIP_RAW
+ case NETCONN_RAW:
+ raw_remove(msg->conn->pcb.raw);
+ break;
+#endif /* LWIP_RAW */
+#if LWIP_UDP
+ case NETCONN_UDP:
+ msg->conn->pcb.udp->recv_arg = NULL;
+ udp_remove(msg->conn->pcb.udp);
+ break;
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ case NETCONN_TCP:
+ LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL &&
+ msg->conn->write_offset == 0);
+ msg->conn->state = NETCONN_CLOSE;
+ msg->msg.sd.shut = NETCONN_SHUT_RDWR;
+ msg->conn->current_msg = msg;
+#if LWIP_TCPIP_CORE_LOCKING
+ if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) {
+ LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE);
+ UNLOCK_TCPIP_CORE();
+ sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0);
+ LOCK_TCPIP_CORE();
+ LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE);
+ }
+#else /* LWIP_TCPIP_CORE_LOCKING */
+ lwip_netconn_do_close_internal(msg->conn);
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+ /* API_EVENT is called inside lwip_netconn_do_close_internal, before releasing
+ the application thread, so we can return at this point! */
+ return;
+#endif /* LWIP_TCP */
+ default:
+ break;
+ }
+ msg->conn->pcb.tcp = NULL;
+ }
+ /* tcp netconns don't come here! */
+
+ /* @todo: this lets select make the socket readable and writable,
+ which is wrong! errfd instead? */
+ API_EVENT(msg->conn, NETCONN_EVT_RCVPLUS, 0);
+ API_EVENT(msg->conn, NETCONN_EVT_SENDPLUS, 0);
+ }
+ if (sys_sem_valid(LWIP_API_MSG_SEM(msg))) {
+ TCPIP_APIMSG_ACK(msg);
+ }
+}
+
+/**
+ * Bind a pcb contained in a netconn
+ * Called from netconn_bind.
+ *
+ * @param m the api_msg_msg pointing to the connection and containing
+ * the IP address and port to bind to
+ */
+void
+lwip_netconn_do_bind(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ if (ERR_IS_FATAL(msg->conn->last_err)) {
+ msg->err = msg->conn->last_err;
+ } else {
+ msg->err = ERR_VAL;
+ if (msg->conn->pcb.tcp != NULL) {
+ switch (NETCONNTYPE_GROUP(msg->conn->type)) {
+#if LWIP_RAW
+ case NETCONN_RAW:
+ msg->err = raw_bind(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr));
+ break;
+#endif /* LWIP_RAW */
+#if LWIP_UDP
+ case NETCONN_UDP:
+ msg->err = udp_bind(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port);
+ break;
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ case NETCONN_TCP:
+ msg->err = tcp_bind(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port);
+ break;
+#endif /* LWIP_TCP */
+ default:
+ break;
+ }
+ }
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+
+#if LWIP_TCP
+/**
+ * TCP callback function if a connection (opened by tcp_connect/lwip_netconn_do_connect) has
+ * been established (or reset by the remote host).
+ *
+ * @see tcp.h (struct tcp_pcb.connected) for parameters and return values
+ */
+static err_t
+lwip_netconn_do_connected(void *arg, struct tcp_pcb *pcb, err_t err)
+{
+ struct netconn *conn;
+ int was_blocking;
+ sys_sem_t* op_completed_sem = NULL;
+
+ LWIP_UNUSED_ARG(pcb);
+
+ conn = (struct netconn *)arg;
+
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+
+ LWIP_ASSERT("conn->state == NETCONN_CONNECT", conn->state == NETCONN_CONNECT);
+ LWIP_ASSERT("(conn->current_msg != NULL) || conn->in_non_blocking_connect",
+ (conn->current_msg != NULL) || IN_NONBLOCKING_CONNECT(conn));
+
+ if (conn->current_msg != NULL) {
+ conn->current_msg->err = err;
+ op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg);
+ }
+ if ((NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) && (err == ERR_OK)) {
+ setup_tcp(conn);
+ }
+ was_blocking = !IN_NONBLOCKING_CONNECT(conn);
+ SET_NONBLOCKING_CONNECT(conn, 0);
+ LWIP_ASSERT("blocking connect state error",
+ (was_blocking && op_completed_sem != NULL) ||
+ (!was_blocking && op_completed_sem == NULL));
+ conn->current_msg = NULL;
+ conn->state = NETCONN_NONE;
+ NETCONN_SET_SAFE_ERR(conn, ERR_OK);
+ API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0);
+
+ if (was_blocking) {
+ sys_sem_signal(op_completed_sem);
+ }
+ return ERR_OK;
+}
+#endif /* LWIP_TCP */
+
+/**
+ * Connect a pcb contained inside a netconn
+ * Called from netconn_connect.
+ *
+ * @param m the api_msg_msg pointing to the connection and containing
+ * the IP address and port to connect to
+ */
+void
+lwip_netconn_do_connect(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ if (msg->conn->pcb.tcp == NULL) {
+ /* This may happen when calling netconn_connect() a second time */
+ msg->err = ERR_CLSD;
+ } else {
+ switch (NETCONNTYPE_GROUP(msg->conn->type)) {
+#if LWIP_RAW
+ case NETCONN_RAW:
+ msg->err = raw_connect(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr));
+ break;
+#endif /* LWIP_RAW */
+#if LWIP_UDP
+ case NETCONN_UDP:
+ msg->err = udp_connect(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port);
+ break;
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ case NETCONN_TCP:
+ /* Prevent connect while doing any other action. */
+ if (msg->conn->state == NETCONN_CONNECT) {
+ msg->err = ERR_ALREADY;
+ } else if (msg->conn->state != NETCONN_NONE) {
+ msg->err = ERR_ISCONN;
+ } else {
+ setup_tcp(msg->conn);
+ msg->err = tcp_connect(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr),
+ msg->msg.bc.port, lwip_netconn_do_connected);
+ if (msg->err == ERR_OK) {
+ u8_t non_blocking = netconn_is_nonblocking(msg->conn);
+ msg->conn->state = NETCONN_CONNECT;
+ SET_NONBLOCKING_CONNECT(msg->conn, non_blocking);
+ if (non_blocking) {
+ msg->err = ERR_INPROGRESS;
+ } else {
+ msg->conn->current_msg = msg;
+ /* sys_sem_signal() is called from lwip_netconn_do_connected (or err_tcp()),
+ when the connection is established! */
+#if LWIP_TCPIP_CORE_LOCKING
+ LWIP_ASSERT("state!", msg->conn->state == NETCONN_CONNECT);
+ UNLOCK_TCPIP_CORE();
+ sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0);
+ LOCK_TCPIP_CORE();
+ LWIP_ASSERT("state!", msg->conn->state != NETCONN_CONNECT);
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+ return;
+ }
+ }
+ }
+ break;
+#endif /* LWIP_TCP */
+ default:
+ LWIP_ERROR("Invalid netconn type", 0, do{ msg->err = ERR_VAL; }while(0));
+ break;
+ }
+ }
+ /* For all other protocols, netconn_connect() calls TCPIP_APIMSG(),
+ so use TCPIP_APIMSG_ACK() here. */
+ TCPIP_APIMSG_ACK(msg);
+}
+
+/**
+ * Disconnect a pcb contained inside a netconn
+ * Only used for UDP netconns.
+ * Called from netconn_disconnect.
+ *
+ * @param m the api_msg_msg pointing to the connection to disconnect
+ */
+void
+lwip_netconn_do_disconnect(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+#if LWIP_UDP
+ if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) {
+ udp_disconnect(msg->conn->pcb.udp);
+ msg->err = ERR_OK;
+ } else
+#endif /* LWIP_UDP */
+ {
+ msg->err = ERR_VAL;
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+
+#if LWIP_TCP
+/**
+ * Set a TCP pcb contained in a netconn into listen mode
+ * Called from netconn_listen.
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_listen(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ if (ERR_IS_FATAL(msg->conn->last_err)) {
+ msg->err = msg->conn->last_err;
+ } else {
+ msg->err = ERR_CONN;
+ if (msg->conn->pcb.tcp != NULL) {
+ if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) {
+ if (msg->conn->state == NETCONN_NONE) {
+ struct tcp_pcb* lpcb;
+ if (msg->conn->pcb.tcp->state != CLOSED) {
+ /* connection is not closed, cannot listen */
+ msg->err = ERR_VAL;
+ } else {
+ err_t err;
+ u8_t backlog;
+#if TCP_LISTEN_BACKLOG
+ backlog = msg->msg.lb.backlog;
+#else /* TCP_LISTEN_BACKLOG */
+ backlog = TCP_DEFAULT_LISTEN_BACKLOG;
+#endif /* TCP_LISTEN_BACKLOG */
+#if LWIP_IPV4 && LWIP_IPV6
+ /* "Socket API like" dual-stack support: If IP to listen to is IP6_ADDR_ANY,
+ * and NETCONN_FLAG_IPV6_V6ONLY is NOT set, use IP_ANY_TYPE to listen
+ */
+ if (ip_addr_cmp(&msg->conn->pcb.ip->local_ip, IP6_ADDR_ANY) &&
+ (netconn_get_ipv6only(msg->conn) == 0)) {
+ /* change PCB type to IPADDR_TYPE_ANY */
+ IP_SET_TYPE_VAL(msg->conn->pcb.tcp->local_ip, IPADDR_TYPE_ANY);
+ IP_SET_TYPE_VAL(msg->conn->pcb.tcp->remote_ip, IPADDR_TYPE_ANY);
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ lpcb = tcp_listen_with_backlog_and_err(msg->conn->pcb.tcp, backlog, &err);
+
+ if (lpcb == NULL) {
+ /* in this case, the old pcb is still allocated */
+ msg->err = err;
+ } else {
+ /* delete the recvmbox and allocate the acceptmbox */
+ if (sys_mbox_valid(&msg->conn->recvmbox)) {
+ /** @todo: should we drain the recvmbox here? */
+ sys_mbox_free(&msg->conn->recvmbox);
+ sys_mbox_set_invalid(&msg->conn->recvmbox);
+ }
+ msg->err = ERR_OK;
+ if (!sys_mbox_valid(&msg->conn->acceptmbox)) {
+ msg->err = sys_mbox_new(&msg->conn->acceptmbox, DEFAULT_ACCEPTMBOX_SIZE);
+ }
+ if (msg->err == ERR_OK) {
+ msg->conn->state = NETCONN_LISTEN;
+ msg->conn->pcb.tcp = lpcb;
+ tcp_arg(msg->conn->pcb.tcp, msg->conn);
+ tcp_accept(msg->conn->pcb.tcp, accept_function);
+ } else {
+ /* since the old pcb is already deallocated, free lpcb now */
+ tcp_close(lpcb);
+ msg->conn->pcb.tcp = NULL;
+ }
+ }
+ }
+ } else if (msg->conn->state == NETCONN_LISTEN) {
+ /* already listening, allow updating of the backlog */
+ msg->err = ERR_OK;
+ tcp_backlog_set(msg->conn->pcb.tcp, msg->msg.lb.backlog);
+ }
+ } else {
+ msg->err = ERR_ARG;
+ }
+ }
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+#endif /* LWIP_TCP */
+
+/**
+ * Send some data on a RAW or UDP pcb contained in a netconn
+ * Called from netconn_send
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_send(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ if (ERR_IS_FATAL(msg->conn->last_err)) {
+ msg->err = msg->conn->last_err;
+ } else {
+ msg->err = ERR_CONN;
+ if (msg->conn->pcb.tcp != NULL) {
+ switch (NETCONNTYPE_GROUP(msg->conn->type)) {
+#if LWIP_RAW
+ case NETCONN_RAW:
+ if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) {
+ msg->err = raw_send(msg->conn->pcb.raw, msg->msg.b->p);
+ } else {
+ msg->err = raw_sendto(msg->conn->pcb.raw, msg->msg.b->p, &msg->msg.b->addr);
+ }
+ break;
+#endif
+#if LWIP_UDP
+ case NETCONN_UDP:
+#if LWIP_CHECKSUM_ON_COPY
+ if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) {
+ msg->err = udp_send_chksum(msg->conn->pcb.udp, msg->msg.b->p,
+ msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum);
+ } else {
+ msg->err = udp_sendto_chksum(msg->conn->pcb.udp, msg->msg.b->p,
+ &msg->msg.b->addr, msg->msg.b->port,
+ msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum);
+ }
+#else /* LWIP_CHECKSUM_ON_COPY */
+ if (ip_addr_isany_val(msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) {
+ msg->err = udp_send(msg->conn->pcb.udp, msg->msg.b->p);
+ } else {
+ msg->err = udp_sendto(msg->conn->pcb.udp, msg->msg.b->p, &msg->msg.b->addr, msg->msg.b->port);
+ }
+#endif /* LWIP_CHECKSUM_ON_COPY */
+ break;
+#endif /* LWIP_UDP */
+ default:
+ break;
+ }
+ }
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+
+#if LWIP_TCP
+/**
+ * Indicate data has been received from a TCP pcb contained in a netconn
+ * Called from netconn_recv
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_recv(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ msg->err = ERR_OK;
+ if (msg->conn->pcb.tcp != NULL) {
+ if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) {
+ u32_t remaining = msg->msg.r.len;
+ do {
+ u16_t recved = (remaining > 0xffff) ? 0xffff : (u16_t)remaining;
+ tcp_recved(msg->conn->pcb.tcp, recved);
+ remaining -= recved;
+ } while (remaining != 0);
+ }
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+
+#if TCP_LISTEN_BACKLOG
+/** Indicate that a TCP pcb has been accepted
+ * Called from netconn_accept
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_accepted(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ msg->err = ERR_OK;
+ if (msg->conn->pcb.tcp != NULL) {
+ if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) {
+ tcp_backlog_accepted(msg->conn->pcb.tcp);
+ }
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+#endif /* TCP_LISTEN_BACKLOG */
+
+/**
+ * See if more data needs to be written from a previous call to netconn_write.
+ * Called initially from lwip_netconn_do_write. If the first call can't send all data
+ * (because of low memory or empty send-buffer), this function is called again
+ * from sent_tcp() or poll_tcp() to send more data. If all data is sent, the
+ * blocking application thread (waiting in netconn_write) is released.
+ *
+ * @param conn netconn (that is currently in state NETCONN_WRITE) to process
+ * @return ERR_OK
+ * ERR_MEM if LWIP_TCPIP_CORE_LOCKING=1 and sending hasn't yet finished
+ */
+static err_t
+lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM)
+{
+ err_t err;
+ const void *dataptr;
+ u16_t len, available;
+ u8_t write_finished = 0;
+ size_t diff;
+ u8_t dontblock;
+ u8_t apiflags;
+
+ LWIP_ASSERT("conn != NULL", conn != NULL);
+ LWIP_ASSERT("conn->state == NETCONN_WRITE", (conn->state == NETCONN_WRITE));
+ LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL);
+ LWIP_ASSERT("conn->pcb.tcp != NULL", conn->pcb.tcp != NULL);
+ LWIP_ASSERT("conn->write_offset < conn->current_msg->msg.w.len",
+ conn->write_offset < conn->current_msg->msg.w.len);
+
+ apiflags = conn->current_msg->msg.w.apiflags;
+ dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK);
+
+#if LWIP_SO_SNDTIMEO
+ if ((conn->send_timeout != 0) &&
+ ((s32_t)(sys_now() - conn->current_msg->msg.w.time_started) >= conn->send_timeout)) {
+ write_finished = 1;
+ if (conn->write_offset == 0) {
+ /* nothing has been written */
+ err = ERR_WOULDBLOCK;
+ conn->current_msg->msg.w.len = 0;
+ } else {
+ /* partial write */
+ err = ERR_OK;
+ conn->current_msg->msg.w.len = conn->write_offset;
+ conn->write_offset = 0;
+ }
+ } else
+#endif /* LWIP_SO_SNDTIMEO */
+ {
+ dataptr = (const u8_t*)conn->current_msg->msg.w.dataptr + conn->write_offset;
+ diff = conn->current_msg->msg.w.len - conn->write_offset;
+ if (diff > 0xffffUL) { /* max_u16_t */
+ len = 0xffff;
+ apiflags |= TCP_WRITE_FLAG_MORE;
+ } else {
+ len = (u16_t)diff;
+ }
+ available = tcp_sndbuf(conn->pcb.tcp);
+ if (available < len) {
+ /* don't try to write more than sendbuf */
+ len = available;
+ if (dontblock) {
+ if (!len) {
+ err = ERR_WOULDBLOCK;
+ goto err_mem;
+ }
+ } else {
+ apiflags |= TCP_WRITE_FLAG_MORE;
+ }
+ }
+ LWIP_ASSERT("lwip_netconn_do_writemore: invalid length!", ((conn->write_offset + len) <= conn->current_msg->msg.w.len));
+ err = tcp_write(conn->pcb.tcp, dataptr, len, apiflags);
+ /* if OK or memory error, check available space */
+ if ((err == ERR_OK) || (err == ERR_MEM)) {
+err_mem:
+ if (dontblock && (len < conn->current_msg->msg.w.len)) {
+ /* non-blocking write did not write everything: mark the pcb non-writable
+ and let poll_tcp check writable space to mark the pcb writable again */
+ API_EVENT(conn, NETCONN_EVT_SENDMINUS, len);
+ conn->flags |= NETCONN_FLAG_CHECK_WRITESPACE;
+ } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT) ||
+ (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT)) {
+ /* The queued byte- or pbuf-count exceeds the configured low-water limit,
+ let select mark this pcb as non-writable. */
+ API_EVENT(conn, NETCONN_EVT_SENDMINUS, len);
+ }
+ }
+
+ if (err == ERR_OK) {
+ err_t out_err;
+ conn->write_offset += len;
+ if ((conn->write_offset == conn->current_msg->msg.w.len) || dontblock) {
+ /* return sent length */
+ conn->current_msg->msg.w.len = conn->write_offset;
+ /* everything was written */
+ write_finished = 1;
+ }
+ out_err = tcp_output(conn->pcb.tcp);
+ if (ERR_IS_FATAL(out_err) || (out_err == ERR_RTE)) {
+ /* If tcp_output fails with fatal error or no route is found,
+ don't try writing any more but return the error
+ to the application thread. */
+ err = out_err;
+ write_finished = 1;
+ conn->current_msg->msg.w.len = 0;
+ }
+ } else if (err == ERR_MEM) {
+ /* If ERR_MEM, we wait for sent_tcp or poll_tcp to be called.
+ For blocking sockets, we do NOT return to the application
+ thread, since ERR_MEM is only a temporary error! Non-blocking
+ will remain non-writable until sent_tcp/poll_tcp is called */
+
+ /* tcp_write returned ERR_MEM, try tcp_output anyway */
+ err_t out_err = tcp_output(conn->pcb.tcp);
+ if (ERR_IS_FATAL(out_err) || (out_err == ERR_RTE)) {
+ /* If tcp_output fails with fatal error or no route is found,
+ don't try writing any more but return the error
+ to the application thread. */
+ err = out_err;
+ write_finished = 1;
+ conn->current_msg->msg.w.len = 0;
+ } else if (dontblock) {
+ /* non-blocking write is done on ERR_MEM */
+ err = ERR_WOULDBLOCK;
+ write_finished = 1;
+ conn->current_msg->msg.w.len = 0;
+ }
+ } else {
+ /* On errors != ERR_MEM, we don't try writing any more but return
+ the error to the application thread. */
+ write_finished = 1;
+ conn->current_msg->msg.w.len = 0;
+ }
+ }
+ if (write_finished) {
+ /* everything was written: set back connection state
+ and back to application task */
+ sys_sem_t* op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg);
+ conn->current_msg->err = err;
+ conn->current_msg = NULL;
+ conn->write_offset = 0;
+ conn->state = NETCONN_NONE;
+ NETCONN_SET_SAFE_ERR(conn, err);
+#if LWIP_TCPIP_CORE_LOCKING
+ if (delayed)
+#endif
+ {
+ sys_sem_signal(op_completed_sem);
+ }
+ }
+#if LWIP_TCPIP_CORE_LOCKING
+ else {
+ return ERR_MEM;
+ }
+#endif
+ return ERR_OK;
+}
+#endif /* LWIP_TCP */
+
+/**
+ * Send some data on a TCP pcb contained in a netconn
+ * Called from netconn_write
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_write(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ if (ERR_IS_FATAL(msg->conn->last_err)) {
+ msg->err = msg->conn->last_err;
+ } else {
+ if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) {
+#if LWIP_TCP
+ if (msg->conn->state != NETCONN_NONE) {
+ /* netconn is connecting, closing or in blocking write */
+ msg->err = ERR_INPROGRESS;
+ } else if (msg->conn->pcb.tcp != NULL) {
+ msg->conn->state = NETCONN_WRITE;
+ /* set all the variables used by lwip_netconn_do_writemore */
+ LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL &&
+ msg->conn->write_offset == 0);
+ LWIP_ASSERT("msg->msg.w.len != 0", msg->msg.w.len != 0);
+ msg->conn->current_msg = msg;
+ msg->conn->write_offset = 0;
+#if LWIP_TCPIP_CORE_LOCKING
+ if (lwip_netconn_do_writemore(msg->conn, 0) != ERR_OK) {
+ LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE);
+ UNLOCK_TCPIP_CORE();
+ sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0);
+ LOCK_TCPIP_CORE();
+ LWIP_ASSERT("state!", msg->conn->state != NETCONN_WRITE);
+ }
+#else /* LWIP_TCPIP_CORE_LOCKING */
+ lwip_netconn_do_writemore(msg->conn);
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+ /* for both cases: if lwip_netconn_do_writemore was called, don't ACK the APIMSG
+ since lwip_netconn_do_writemore ACKs it! */
+ return;
+ } else {
+ msg->err = ERR_CONN;
+ }
+#else /* LWIP_TCP */
+ msg->err = ERR_VAL;
+#endif /* LWIP_TCP */
+#if (LWIP_UDP || LWIP_RAW)
+ } else {
+ msg->err = ERR_VAL;
+#endif /* (LWIP_UDP || LWIP_RAW) */
+ }
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+
+/**
+ * Return a connection's local or remote address
+ * Called from netconn_getaddr
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_getaddr(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ if (msg->conn->pcb.ip != NULL) {
+ if (msg->msg.ad.local) {
+ ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr),
+ msg->conn->pcb.ip->local_ip);
+ } else {
+ ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr),
+ msg->conn->pcb.ip->remote_ip);
+ }
+
+ msg->err = ERR_OK;
+ switch (NETCONNTYPE_GROUP(msg->conn->type)) {
+#if LWIP_RAW
+ case NETCONN_RAW:
+ if (msg->msg.ad.local) {
+ API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.raw->protocol;
+ } else {
+ /* return an error as connecting is only a helper for upper layers */
+ msg->err = ERR_CONN;
+ }
+ break;
+#endif /* LWIP_RAW */
+#if LWIP_UDP
+ case NETCONN_UDP:
+ if (msg->msg.ad.local) {
+ API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->local_port;
+ } else {
+ if ((msg->conn->pcb.udp->flags & UDP_FLAGS_CONNECTED) == 0) {
+ msg->err = ERR_CONN;
+ } else {
+ API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->remote_port;
+ }
+ }
+ break;
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ case NETCONN_TCP:
+ if ((msg->msg.ad.local == 0) &&
+ ((msg->conn->pcb.tcp->state == CLOSED) || (msg->conn->pcb.tcp->state == LISTEN))) {
+ /* pcb is not connected and remote name is requested */
+ msg->err = ERR_CONN;
+ } else {
+ API_EXPR_DEREF(msg->msg.ad.port) = (msg->msg.ad.local ? msg->conn->pcb.tcp->local_port : msg->conn->pcb.tcp->remote_port);
+ }
+ break;
+#endif /* LWIP_TCP */
+ default:
+ LWIP_ASSERT("invalid netconn_type", 0);
+ break;
+ }
+ } else {
+ msg->err = ERR_CONN;
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+
+/**
+ * Close or half-shutdown a TCP pcb contained in a netconn
+ * Called from netconn_close
+ * In contrast to closing sockets, the netconn is not deallocated.
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_close(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+#if LWIP_TCP
+ enum netconn_state state = msg->conn->state;
+ /* First check if this is a TCP netconn and if it is in a correct state
+ (LISTEN doesn't support half shutdown) */
+ if ((msg->conn->pcb.tcp != NULL) &&
+ (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) &&
+ ((msg->msg.sd.shut == NETCONN_SHUT_RDWR) || (state != NETCONN_LISTEN))) {
+ /* Check if we are in a connected state */
+ if (state == NETCONN_CONNECT) {
+ /* TCP connect in progress: cannot shutdown */
+ msg->err = ERR_CONN;
+ } else if (state == NETCONN_WRITE) {
+#if LWIP_NETCONN_FULLDUPLEX
+ if (msg->msg.sd.shut & NETCONN_SHUT_WR) {
+ /* close requested, abort running write */
+ sys_sem_t* write_completed_sem;
+ LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL);
+ write_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg);
+ msg->conn->current_msg->err = ERR_CLSD;
+ msg->conn->current_msg = NULL;
+ msg->conn->write_offset = 0;
+ msg->conn->state = NETCONN_NONE;
+ state = NETCONN_NONE;
+ NETCONN_SET_SAFE_ERR(msg->conn, ERR_CLSD);
+ sys_sem_signal(write_completed_sem);
+ } else {
+ LWIP_ASSERT("msg->msg.sd.shut == NETCONN_SHUT_RD", msg->msg.sd.shut == NETCONN_SHUT_RD);
+ /* In this case, let the write continue and do not interfere with
+ conn->current_msg or conn->state! */
+ msg->err = tcp_shutdown(msg->conn->pcb.tcp, 1, 0);
+ }
+ }
+ if (state == NETCONN_NONE) {
+#else /* LWIP_NETCONN_FULLDUPLEX */
+ msg->err = ERR_INPROGRESS;
+ } else {
+#endif /* LWIP_NETCONN_FULLDUPLEX */
+ if (msg->msg.sd.shut & NETCONN_SHUT_RD) {
+ /* Drain and delete mboxes */
+ netconn_drain(msg->conn);
+ }
+ LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL &&
+ msg->conn->write_offset == 0);
+ msg->conn->state = NETCONN_CLOSE;
+ msg->conn->current_msg = msg;
+#if LWIP_TCPIP_CORE_LOCKING
+ if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) {
+ LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE);
+ UNLOCK_TCPIP_CORE();
+ sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0);
+ LOCK_TCPIP_CORE();
+ LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE);
+ }
+#else /* LWIP_TCPIP_CORE_LOCKING */
+ lwip_netconn_do_close_internal(msg->conn);
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+ /* for tcp netconns, lwip_netconn_do_close_internal ACKs the message */
+ return;
+ }
+ } else
+#endif /* LWIP_TCP */
+ {
+ msg->err = ERR_CONN;
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+
+#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD)
+/**
+ * Join multicast groups for UDP netconns.
+ * Called from netconn_join_leave_group
+ *
+ * @param m the api_msg_msg pointing to the connection
+ */
+void
+lwip_netconn_do_join_leave_group(void *m)
+{
+ struct api_msg *msg = (struct api_msg*)m;
+
+ if (ERR_IS_FATAL(msg->conn->last_err)) {
+ msg->err = msg->conn->last_err;
+ } else {
+ if (msg->conn->pcb.tcp != NULL) {
+ if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) {
+#if LWIP_UDP
+#if LWIP_IPV6 && LWIP_IPV6_MLD
+ if (NETCONNTYPE_ISIPV6(msg->conn->type)) {
+ if (msg->msg.jl.join_or_leave == NETCONN_JOIN) {
+ msg->err = mld6_joingroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)),
+ ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr)));
+ } else {
+ msg->err = mld6_leavegroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)),
+ ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr)));
+ }
+ }
+ else
+#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */
+ {
+#if LWIP_IGMP
+ if (msg->msg.jl.join_or_leave == NETCONN_JOIN) {
+ msg->err = igmp_joingroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)),
+ ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr)));
+ } else {
+ msg->err = igmp_leavegroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)),
+ ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr)));
+ }
+#endif /* LWIP_IGMP */
+ }
+#endif /* LWIP_UDP */
+#if (LWIP_TCP || LWIP_RAW)
+ } else {
+ msg->err = ERR_VAL;
+#endif /* (LWIP_TCP || LWIP_RAW) */
+ }
+ } else {
+ msg->err = ERR_CONN;
+ }
+ }
+ TCPIP_APIMSG_ACK(msg);
+}
+#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */
+
+#if LWIP_DNS
+/**
+ * Callback function that is called when DNS name is resolved
+ * (or on timeout). A waiting application thread is waked up by
+ * signaling the semaphore.
+ */
+static void
+lwip_netconn_do_dns_found(const char *name, const ip_addr_t *ipaddr, void *arg)
+{
+ struct dns_api_msg *msg = (struct dns_api_msg*)arg;
+
+ /* we trust the internal implementation to be correct :-) */
+ LWIP_UNUSED_ARG(name);
+
+ if (ipaddr == NULL) {
+ /* timeout or memory error */
+ API_EXPR_DEREF(msg->err) = ERR_VAL;
+ } else {
+ /* address was resolved */
+ API_EXPR_DEREF(msg->err) = ERR_OK;
+ API_EXPR_DEREF(msg->addr) = *ipaddr;
+ }
+ /* wake up the application task waiting in netconn_gethostbyname */
+ sys_sem_signal(API_EXPR_REF_SEM(msg->sem));
+}
+
+/**
+ * Execute a DNS query
+ * Called from netconn_gethostbyname
+ *
+ * @param arg the dns_api_msg pointing to the query
+ */
+void
+lwip_netconn_do_gethostbyname(void *arg)
+{
+ struct dns_api_msg *msg = (struct dns_api_msg*)arg;
+ u8_t addrtype =
+#if LWIP_IPV4 && LWIP_IPV6
+ msg->dns_addrtype;
+#else
+ LWIP_DNS_ADDRTYPE_DEFAULT;
+#endif
+
+ API_EXPR_DEREF(msg->err) = dns_gethostbyname_addrtype(msg->name,
+ API_EXPR_REF(msg->addr), lwip_netconn_do_dns_found, msg, addrtype);
+ if (API_EXPR_DEREF(msg->err) != ERR_INPROGRESS) {
+ /* on error or immediate success, wake up the application
+ * task waiting in netconn_gethostbyname */
+ sys_sem_signal(API_EXPR_REF_SEM(msg->sem));
+ }
+}
+#endif /* LWIP_DNS */
+
+#endif /* LWIP_NETCONN */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/err.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/err.c
new file mode 100644
index 0000000..35e9c02
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/err.c
@@ -0,0 +1,115 @@
+/**
+ * @file
+ * Error Management module
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/err.h"
+#include "lwip/def.h"
+#include "lwip/sys.h"
+
+#include "lwip/errno.h"
+
+#if !NO_SYS
+/** Table to quickly map an lwIP error (err_t) to a socket error
+ * by using -err as an index */
+static const int err_to_errno_table[] = {
+ 0, /* ERR_OK 0 No error, everything OK. */
+ ENOMEM, /* ERR_MEM -1 Out of memory error. */
+ ENOBUFS, /* ERR_BUF -2 Buffer error. */
+ EWOULDBLOCK, /* ERR_TIMEOUT -3 Timeout */
+ EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */
+ EINPROGRESS, /* ERR_INPROGRESS -5 Operation in progress */
+ EINVAL, /* ERR_VAL -6 Illegal value. */
+ EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */
+ EADDRINUSE, /* ERR_USE -8 Address in use. */
+ EALREADY, /* ERR_ALREADY -9 Already connecting. */
+ EISCONN, /* ERR_ISCONN -10 Conn already established.*/
+ ENOTCONN, /* ERR_CONN -11 Not connected. */
+ -1, /* ERR_IF -12 Low-level netif error */
+ ECONNABORTED, /* ERR_ABRT -13 Connection aborted. */
+ ECONNRESET, /* ERR_RST -14 Connection reset. */
+ ENOTCONN, /* ERR_CLSD -15 Connection closed. */
+ EIO /* ERR_ARG -16 Illegal argument. */
+};
+
+int
+err_to_errno(err_t err)
+{
+ if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_to_errno_table))) {
+ return EIO;
+ }
+ return err_to_errno_table[-err];
+}
+#endif /* !NO_SYS */
+
+#ifdef LWIP_DEBUG
+
+static const char *err_strerr[] = {
+ "Ok.", /* ERR_OK 0 */
+ "Out of memory error.", /* ERR_MEM -1 */
+ "Buffer error.", /* ERR_BUF -2 */
+ "Timeout.", /* ERR_TIMEOUT -3 */
+ "Routing problem.", /* ERR_RTE -4 */
+ "Operation in progress.", /* ERR_INPROGRESS -5 */
+ "Illegal value.", /* ERR_VAL -6 */
+ "Operation would block.", /* ERR_WOULDBLOCK -7 */
+ "Address in use.", /* ERR_USE -8 */
+ "Already connecting.", /* ERR_ALREADY -9 */
+ "Already connected.", /* ERR_ISCONN -10 */
+ "Not connected.", /* ERR_CONN -11 */
+ "Low-level netif error.", /* ERR_IF -12 */
+ "Connection aborted.", /* ERR_ABRT -13 */
+ "Connection reset.", /* ERR_RST -14 */
+ "Connection closed.", /* ERR_CLSD -15 */
+ "Illegal argument." /* ERR_ARG -16 */
+};
+
+/**
+ * Convert an lwip internal error to a string representation.
+ *
+ * @param err an lwip internal err_t
+ * @return a string representation for err
+ */
+const char *
+lwip_strerr(err_t err)
+{
+ if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_strerr))) {
+ return "Unknown error.";
+ }
+ return err_strerr[-err];
+}
+
+#endif /* LWIP_DEBUG */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netbuf.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netbuf.c
new file mode 100644
index 0000000..eb25011
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netbuf.c
@@ -0,0 +1,246 @@
+/**
+ * @file
+ * Network buffer management
+ *
+ * @defgroup netbuf Network buffers
+ * @ingroup netconn
+ * Network buffer descriptor for @ref netconn. Based on @ref pbuf internally
+ * to avoid copying data around.\n
+ * Buffers must not be shared accross multiple threads, all functions except
+ * netbuf_new() and netbuf_delete() are not thread-safe.
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/netbuf.h"
+#include "lwip/memp.h"
+
+#include <string.h>
+
+/**
+ * @ingroup netbuf
+ * Create (allocate) and initialize a new netbuf.
+ * The netbuf doesn't yet contain a packet buffer!
+ *
+ * @return a pointer to a new netbuf
+ * NULL on lack of memory
+ */
+struct
+netbuf *netbuf_new(void)
+{
+ struct netbuf *buf;
+
+ buf = (struct netbuf *)memp_malloc(MEMP_NETBUF);
+ if (buf != NULL) {
+ memset(buf, 0, sizeof(struct netbuf));
+ }
+ return buf;
+}
+
+/**
+ * @ingroup netbuf
+ * Deallocate a netbuf allocated by netbuf_new().
+ *
+ * @param buf pointer to a netbuf allocated by netbuf_new()
+ */
+void
+netbuf_delete(struct netbuf *buf)
+{
+ if (buf != NULL) {
+ if (buf->p != NULL) {
+ pbuf_free(buf->p);
+ buf->p = buf->ptr = NULL;
+ }
+ memp_free(MEMP_NETBUF, buf);
+ }
+}
+
+/**
+ * @ingroup netbuf
+ * Allocate memory for a packet buffer for a given netbuf.
+ *
+ * @param buf the netbuf for which to allocate a packet buffer
+ * @param size the size of the packet buffer to allocate
+ * @return pointer to the allocated memory
+ * NULL if no memory could be allocated
+ */
+void *
+netbuf_alloc(struct netbuf *buf, u16_t size)
+{
+ LWIP_ERROR("netbuf_alloc: invalid buf", (buf != NULL), return NULL;);
+
+ /* Deallocate any previously allocated memory. */
+ if (buf->p != NULL) {
+ pbuf_free(buf->p);
+ }
+ buf->p = pbuf_alloc(PBUF_TRANSPORT, size, PBUF_RAM);
+ if (buf->p == NULL) {
+ return NULL;
+ }
+ LWIP_ASSERT("check that first pbuf can hold size",
+ (buf->p->len >= size));
+ buf->ptr = buf->p;
+ return buf->p->payload;
+}
+
+/**
+ * @ingroup netbuf
+ * Free the packet buffer included in a netbuf
+ *
+ * @param buf pointer to the netbuf which contains the packet buffer to free
+ */
+void
+netbuf_free(struct netbuf *buf)
+{
+ LWIP_ERROR("netbuf_free: invalid buf", (buf != NULL), return;);
+ if (buf->p != NULL) {
+ pbuf_free(buf->p);
+ }
+ buf->p = buf->ptr = NULL;
+}
+
+/**
+ * @ingroup netbuf
+ * Let a netbuf reference existing (non-volatile) data.
+ *
+ * @param buf netbuf which should reference the data
+ * @param dataptr pointer to the data to reference
+ * @param size size of the data
+ * @return ERR_OK if data is referenced
+ * ERR_MEM if data couldn't be referenced due to lack of memory
+ */
+err_t
+netbuf_ref(struct netbuf *buf, const void *dataptr, u16_t size)
+{
+ LWIP_ERROR("netbuf_ref: invalid buf", (buf != NULL), return ERR_ARG;);
+ if (buf->p != NULL) {
+ pbuf_free(buf->p);
+ }
+ buf->p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
+ if (buf->p == NULL) {
+ buf->ptr = NULL;
+ return ERR_MEM;
+ }
+ ((struct pbuf_rom*)buf->p)->payload = dataptr;
+ buf->p->len = buf->p->tot_len = size;
+ buf->ptr = buf->p;
+ return ERR_OK;
+}
+
+/**
+ * @ingroup netbuf
+ * Chain one netbuf to another (@see pbuf_chain)
+ *
+ * @param head the first netbuf
+ * @param tail netbuf to chain after head, freed by this function, may not be reference after returning
+ */
+void
+netbuf_chain(struct netbuf *head, struct netbuf *tail)
+{
+ LWIP_ERROR("netbuf_chain: invalid head", (head != NULL), return;);
+ LWIP_ERROR("netbuf_chain: invalid tail", (tail != NULL), return;);
+ pbuf_cat(head->p, tail->p);
+ head->ptr = head->p;
+ memp_free(MEMP_NETBUF, tail);
+}
+
+/**
+ * @ingroup netbuf
+ * Get the data pointer and length of the data inside a netbuf.
+ *
+ * @param buf netbuf to get the data from
+ * @param dataptr pointer to a void pointer where to store the data pointer
+ * @param len pointer to an u16_t where the length of the data is stored
+ * @return ERR_OK if the information was retrieved,
+ * ERR_BUF on error.
+ */
+err_t
+netbuf_data(struct netbuf *buf, void **dataptr, u16_t *len)
+{
+ LWIP_ERROR("netbuf_data: invalid buf", (buf != NULL), return ERR_ARG;);
+ LWIP_ERROR("netbuf_data: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
+ LWIP_ERROR("netbuf_data: invalid len", (len != NULL), return ERR_ARG;);
+
+ if (buf->ptr == NULL) {
+ return ERR_BUF;
+ }
+ *dataptr = buf->ptr->payload;
+ *len = buf->ptr->len;
+ return ERR_OK;
+}
+
+/**
+ * @ingroup netbuf
+ * Move the current data pointer of a packet buffer contained in a netbuf
+ * to the next part.
+ * The packet buffer itself is not modified.
+ *
+ * @param buf the netbuf to modify
+ * @return -1 if there is no next part
+ * 1 if moved to the next part but now there is no next part
+ * 0 if moved to the next part and there are still more parts
+ */
+s8_t
+netbuf_next(struct netbuf *buf)
+{
+ LWIP_ERROR("netbuf_next: invalid buf", (buf != NULL), return -1;);
+ if (buf->ptr->next == NULL) {
+ return -1;
+ }
+ buf->ptr = buf->ptr->next;
+ if (buf->ptr->next == NULL) {
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * @ingroup netbuf
+ * Move the current data pointer of a packet buffer contained in a netbuf
+ * to the beginning of the packet.
+ * The packet buffer itself is not modified.
+ *
+ * @param buf the netbuf to modify
+ */
+void
+netbuf_first(struct netbuf *buf)
+{
+ LWIP_ERROR("netbuf_first: invalid buf", (buf != NULL), return;);
+ buf->ptr = buf->p;
+}
+
+#endif /* LWIP_NETCONN */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netdb.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netdb.c
new file mode 100644
index 0000000..ccd9586
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netdb.c
@@ -0,0 +1,413 @@
+/**
+ * @file
+ * API functions for name resolving
+ *
+ * @defgroup netdbapi NETDB API
+ * @ingroup socket
+ */
+
+/*
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Simon Goldschmidt
+ *
+ */
+
+#include "lwip/netdb.h"
+
+#if LWIP_DNS && LWIP_SOCKET
+
+#include "lwip/err.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/ip_addr.h"
+#include "lwip/api.h"
+#include "lwip/dns.h"
+
+#include <string.h> /* memset */
+#include <stdlib.h> /* atoi */
+
+/** helper struct for gethostbyname_r to access the char* buffer */
+struct gethostbyname_r_helper {
+ ip_addr_t *addr_list[2];
+ ip_addr_t addr;
+ char *aliases;
+};
+
+/** h_errno is exported in netdb.h for access by applications. */
+#if LWIP_DNS_API_DECLARE_H_ERRNO
+int h_errno;
+#endif /* LWIP_DNS_API_DECLARE_H_ERRNO */
+
+/** define "hostent" variables storage: 0 if we use a static (but unprotected)
+ * set of variables for lwip_gethostbyname, 1 if we use a local storage */
+#ifndef LWIP_DNS_API_HOSTENT_STORAGE
+#define LWIP_DNS_API_HOSTENT_STORAGE 0
+#endif
+
+/** define "hostent" variables storage */
+#if LWIP_DNS_API_HOSTENT_STORAGE
+#define HOSTENT_STORAGE
+#else
+#define HOSTENT_STORAGE static
+#endif /* LWIP_DNS_API_STATIC_HOSTENT */
+
+/**
+ * Returns an entry containing addresses of address family AF_INET
+ * for the host with name name.
+ * Due to dns_gethostbyname limitations, only one address is returned.
+ *
+ * @param name the hostname to resolve
+ * @return an entry containing addresses of address family AF_INET
+ * for the host with name name
+ */
+struct hostent*
+lwip_gethostbyname(const char *name)
+{
+ err_t err;
+ ip_addr_t addr;
+
+ /* buffer variables for lwip_gethostbyname() */
+ HOSTENT_STORAGE struct hostent s_hostent;
+ HOSTENT_STORAGE char *s_aliases;
+ HOSTENT_STORAGE ip_addr_t s_hostent_addr;
+ HOSTENT_STORAGE ip_addr_t *s_phostent_addr[2];
+ HOSTENT_STORAGE char s_hostname[DNS_MAX_NAME_LENGTH + 1];
+
+ /* query host IP address */
+ err = netconn_gethostbyname(name, &addr);
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err));
+ h_errno = HOST_NOT_FOUND;
+ return NULL;
+ }
+
+ /* fill hostent */
+ s_hostent_addr = addr;
+ s_phostent_addr[0] = &s_hostent_addr;
+ s_phostent_addr[1] = NULL;
+ strncpy(s_hostname, name, DNS_MAX_NAME_LENGTH);
+ s_hostname[DNS_MAX_NAME_LENGTH] = 0;
+ s_hostent.h_name = s_hostname;
+ s_aliases = NULL;
+ s_hostent.h_aliases = &s_aliases;
+ s_hostent.h_addrtype = AF_INET;
+ s_hostent.h_length = sizeof(ip_addr_t);
+ s_hostent.h_addr_list = (char**)&s_phostent_addr;
+
+#if DNS_DEBUG
+ /* dump hostent */
+ LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_name == %s\n", s_hostent.h_name));
+ LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_aliases == %p\n", (void*)s_hostent.h_aliases));
+ /* h_aliases are always empty */
+ LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addrtype == %d\n", s_hostent.h_addrtype));
+ LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_length == %d\n", s_hostent.h_length));
+ LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list == %p\n", (void*)s_hostent.h_addr_list));
+ if (s_hostent.h_addr_list != NULL) {
+ u8_t idx;
+ for (idx=0; s_hostent.h_addr_list[idx]; idx++) {
+ LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i] == %p\n", idx, s_hostent.h_addr_list[idx]));
+ LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i]-> == %s\n", idx, ipaddr_ntoa((ip_addr_t*)s_hostent.h_addr_list[idx])));
+ }
+ }
+#endif /* DNS_DEBUG */
+
+#if LWIP_DNS_API_HOSTENT_STORAGE
+ /* this function should return the "per-thread" hostent after copy from s_hostent */
+ return sys_thread_hostent(&s_hostent);
+#else
+ return &s_hostent;
+#endif /* LWIP_DNS_API_HOSTENT_STORAGE */
+}
+
+/**
+ * Thread-safe variant of lwip_gethostbyname: instead of using a static
+ * buffer, this function takes buffer and errno pointers as arguments
+ * and uses these for the result.
+ *
+ * @param name the hostname to resolve
+ * @param ret pre-allocated struct where to store the result
+ * @param buf pre-allocated buffer where to store additional data
+ * @param buflen the size of buf
+ * @param result pointer to a hostent pointer that is set to ret on success
+ * and set to zero on error
+ * @param h_errnop pointer to an int where to store errors (instead of modifying
+ * the global h_errno)
+ * @return 0 on success, non-zero on error, additional error information
+ * is stored in *h_errnop instead of h_errno to be thread-safe
+ */
+int
+lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf,
+ size_t buflen, struct hostent **result, int *h_errnop)
+{
+ err_t err;
+ struct gethostbyname_r_helper *h;
+ char *hostname;
+ size_t namelen;
+ int lh_errno;
+
+ if (h_errnop == NULL) {
+ /* ensure h_errnop is never NULL */
+ h_errnop = &lh_errno;
+ }
+
+ if (result == NULL) {
+ /* not all arguments given */
+ *h_errnop = EINVAL;
+ return -1;
+ }
+ /* first thing to do: set *result to nothing */
+ *result = NULL;
+ if ((name == NULL) || (ret == NULL) || (buf == NULL)) {
+ /* not all arguments given */
+ *h_errnop = EINVAL;
+ return -1;
+ }
+
+ namelen = strlen(name);
+ if (buflen < (sizeof(struct gethostbyname_r_helper) + namelen + 1 + (MEM_ALIGNMENT - 1))) {
+ /* buf can't hold the data needed + a copy of name */
+ *h_errnop = ERANGE;
+ return -1;
+ }
+
+ h = (struct gethostbyname_r_helper*)LWIP_MEM_ALIGN(buf);
+ hostname = ((char*)h) + sizeof(struct gethostbyname_r_helper);
+
+ /* query host IP address */
+ err = netconn_gethostbyname(name, &h->addr);
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err));
+ *h_errnop = HOST_NOT_FOUND;
+ return -1;
+ }
+
+ /* copy the hostname into buf */
+ MEMCPY(hostname, name, namelen);
+ hostname[namelen] = 0;
+
+ /* fill hostent */
+ h->addr_list[0] = &h->addr;
+ h->addr_list[1] = NULL;
+ h->aliases = NULL;
+ ret->h_name = hostname;
+ ret->h_aliases = &h->aliases;
+ ret->h_addrtype = AF_INET;
+ ret->h_length = sizeof(ip_addr_t);
+ ret->h_addr_list = (char**)&h->addr_list;
+
+ /* set result != NULL */
+ *result = ret;
+
+ /* return success */
+ return 0;
+}
+
+/**
+ * Frees one or more addrinfo structures returned by getaddrinfo(), along with
+ * any additional storage associated with those structures. If the ai_next field
+ * of the structure is not null, the entire list of structures is freed.
+ *
+ * @param ai struct addrinfo to free
+ */
+void
+lwip_freeaddrinfo(struct addrinfo *ai)
+{
+ struct addrinfo *next;
+
+ while (ai != NULL) {
+ next = ai->ai_next;
+ memp_free(MEMP_NETDB, ai);
+ ai = next;
+ }
+}
+
+/**
+ * Translates the name of a service location (for example, a host name) and/or
+ * a service name and returns a set of socket addresses and associated
+ * information to be used in creating a socket with which to address the
+ * specified service.
+ * Memory for the result is allocated internally and must be freed by calling
+ * lwip_freeaddrinfo()!
+ *
+ * Due to a limitation in dns_gethostbyname, only the first address of a
+ * host is returned.
+ * Also, service names are not supported (only port numbers)!
+ *
+ * @param nodename descriptive name or address string of the host
+ * (may be NULL -> local address)
+ * @param servname port number as string of NULL
+ * @param hints structure containing input values that set socktype and protocol
+ * @param res pointer to a pointer where to store the result (set to NULL on failure)
+ * @return 0 on success, non-zero on failure
+ *
+ * @todo: implement AI_V4MAPPED, AI_ADDRCONFIG
+ */
+int
+lwip_getaddrinfo(const char *nodename, const char *servname,
+ const struct addrinfo *hints, struct addrinfo **res)
+{
+ err_t err;
+ ip_addr_t addr;
+ struct addrinfo *ai;
+ struct sockaddr_storage *sa = NULL;
+ int port_nr = 0;
+ size_t total_size;
+ size_t namelen = 0;
+ int ai_family;
+
+ if (res == NULL) {
+ return EAI_FAIL;
+ }
+ *res = NULL;
+ if ((nodename == NULL) && (servname == NULL)) {
+ return EAI_NONAME;
+ }
+
+ if (hints != NULL) {
+ ai_family = hints->ai_family;
+ if ((ai_family != AF_UNSPEC)
+#if LWIP_IPV4
+ && (ai_family != AF_INET)
+#endif /* LWIP_IPV4 */
+#if LWIP_IPV6
+ && (ai_family != AF_INET6)
+#endif /* LWIP_IPV6 */
+ ) {
+ return EAI_FAMILY;
+ }
+ } else {
+ ai_family = AF_UNSPEC;
+ }
+
+ if (servname != NULL) {
+ /* service name specified: convert to port number
+ * @todo?: currently, only ASCII integers (port numbers) are supported (AI_NUMERICSERV)! */
+ port_nr = atoi(servname);
+ if ((port_nr <= 0) || (port_nr > 0xffff)) {
+ return EAI_SERVICE;
+ }
+ }
+
+ if (nodename != NULL) {
+ /* service location specified, try to resolve */
+ if ((hints != NULL) && (hints->ai_flags & AI_NUMERICHOST)) {
+ /* no DNS lookup, just parse for an address string */
+ if (!ipaddr_aton(nodename, &addr)) {
+ return EAI_NONAME;
+ }
+#if LWIP_IPV4 && LWIP_IPV6
+ if ((IP_IS_V6_VAL(addr) && ai_family == AF_INET) ||
+ (IP_IS_V4_VAL(addr) && ai_family == AF_INET6)) {
+ return EAI_NONAME;
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ } else {
+#if LWIP_IPV4 && LWIP_IPV6
+ /* AF_UNSPEC: prefer IPv4 */
+ u8_t type = NETCONN_DNS_IPV4_IPV6;
+ if (ai_family == AF_INET) {
+ type = NETCONN_DNS_IPV4;
+ } else if (ai_family == AF_INET6) {
+ type = NETCONN_DNS_IPV6;
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ err = netconn_gethostbyname_addrtype(nodename, &addr, type);
+ if (err != ERR_OK) {
+ return EAI_FAIL;
+ }
+ }
+ } else {
+ /* service location specified, use loopback address */
+ if ((hints != NULL) && (hints->ai_flags & AI_PASSIVE)) {
+ ip_addr_set_any(ai_family == AF_INET6, &addr);
+ } else {
+ ip_addr_set_loopback(ai_family == AF_INET6, &addr);
+ }
+ }
+
+ total_size = sizeof(struct addrinfo) + sizeof(struct sockaddr_storage);
+ if (nodename != NULL) {
+ namelen = strlen(nodename);
+ if (namelen > DNS_MAX_NAME_LENGTH) {
+ /* invalid name length */
+ return EAI_FAIL;
+ }
+ LWIP_ASSERT("namelen is too long", total_size + namelen + 1 > total_size);
+ total_size += namelen + 1;
+ }
+ /* If this fails, please report to lwip-devel! :-) */
+ LWIP_ASSERT("total_size <= NETDB_ELEM_SIZE: please report this!",
+ total_size <= NETDB_ELEM_SIZE);
+ ai = (struct addrinfo *)memp_malloc(MEMP_NETDB);
+ if (ai == NULL) {
+ return EAI_MEMORY;
+ }
+ memset(ai, 0, total_size);
+ /* cast through void* to get rid of alignment warnings */
+ sa = (struct sockaddr_storage *)(void*)((u8_t*)ai + sizeof(struct addrinfo));
+ if (IP_IS_V6_VAL(addr)) {
+#if LWIP_IPV6
+ struct sockaddr_in6 *sa6 = (struct sockaddr_in6*)sa;
+ /* set up sockaddr */
+ inet6_addr_from_ip6addr(&sa6->sin6_addr, ip_2_ip6(&addr));
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_len = sizeof(struct sockaddr_in6);
+ sa6->sin6_port = lwip_htons((u16_t)port_nr);
+ ai->ai_family = AF_INET6;
+#endif /* LWIP_IPV6 */
+ } else {
+#if LWIP_IPV4
+ struct sockaddr_in *sa4 = (struct sockaddr_in*)sa;
+ /* set up sockaddr */
+ inet_addr_from_ip4addr(&sa4->sin_addr, ip_2_ip4(&addr));
+ sa4->sin_family = AF_INET;
+ sa4->sin_len = sizeof(struct sockaddr_in);
+ sa4->sin_port = lwip_htons((u16_t)port_nr);
+ ai->ai_family = AF_INET;
+#endif /* LWIP_IPV4 */
+ }
+
+ /* set up addrinfo */
+ if (hints != NULL) {
+ /* copy socktype & protocol from hints if specified */
+ ai->ai_socktype = hints->ai_socktype;
+ ai->ai_protocol = hints->ai_protocol;
+ }
+ if (nodename != NULL) {
+ /* copy nodename to canonname if specified */
+ ai->ai_canonname = ((char*)ai + sizeof(struct addrinfo) + sizeof(struct sockaddr_storage));
+ MEMCPY(ai->ai_canonname, nodename, namelen);
+ ai->ai_canonname[namelen] = 0;
+ }
+ ai->ai_addrlen = sizeof(struct sockaddr_storage);
+ ai->ai_addr = (struct sockaddr*)sa;
+
+ *res = ai;
+
+ return 0;
+}
+
+#endif /* LWIP_DNS && LWIP_SOCKET */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netifapi.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netifapi.c
new file mode 100644
index 0000000..fef05a3
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/netifapi.c
@@ -0,0 +1,221 @@
+/**
+ * @file
+ * Network Interface Sequential API module
+ *
+ * @defgroup netifapi NETIF API
+ * @ingroup sequential_api
+ * Thread-safe functions to be called from non-TCPIP threads
+ *
+ * @defgroup netifapi_netif NETIF related
+ * @ingroup netifapi
+ * To be called from non-TCPIP threads
+ */
+
+/*
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/netifapi.h"
+#include "lwip/memp.h"
+#include "lwip/priv/tcpip_priv.h"
+
+#define NETIFAPI_VAR_REF(name) API_VAR_REF(name)
+#define NETIFAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct netifapi_msg, name)
+#define NETIFAPI_VAR_ALLOC(name) API_VAR_ALLOC(struct netifapi_msg, MEMP_NETIFAPI_MSG, name, ERR_MEM)
+#define NETIFAPI_VAR_FREE(name) API_VAR_FREE(MEMP_NETIFAPI_MSG, name)
+
+/**
+ * Call netif_add() inside the tcpip_thread context.
+ */
+static err_t
+netifapi_do_netif_add(struct tcpip_api_call_data *m)
+{
+ /* cast through void* to silence alignment warnings.
+ * We know it works because the structs have been instantiated as struct netifapi_msg */
+ struct netifapi_msg *msg = (struct netifapi_msg*)(void*)m;
+
+ if (!netif_add( msg->netif,
+#if LWIP_IPV4
+ API_EXPR_REF(msg->msg.add.ipaddr),
+ API_EXPR_REF(msg->msg.add.netmask),
+ API_EXPR_REF(msg->msg.add.gw),
+#endif /* LWIP_IPV4 */
+ msg->msg.add.state,
+ msg->msg.add.init,
+ msg->msg.add.input)) {
+ return ERR_IF;
+ } else {
+ return ERR_OK;
+ }
+}
+
+#if LWIP_IPV4
+/**
+ * Call netif_set_addr() inside the tcpip_thread context.
+ */
+static err_t
+netifapi_do_netif_set_addr(struct tcpip_api_call_data *m)
+{
+ /* cast through void* to silence alignment warnings.
+ * We know it works because the structs have been instantiated as struct netifapi_msg */
+ struct netifapi_msg *msg = (struct netifapi_msg*)(void*)m;
+
+ netif_set_addr( msg->netif,
+ API_EXPR_REF(msg->msg.add.ipaddr),
+ API_EXPR_REF(msg->msg.add.netmask),
+ API_EXPR_REF(msg->msg.add.gw));
+ return ERR_OK;
+}
+#endif /* LWIP_IPV4 */
+
+/**
+ * Call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) inside the
+ * tcpip_thread context.
+ */
+static err_t
+netifapi_do_netif_common(struct tcpip_api_call_data *m)
+{
+ /* cast through void* to silence alignment warnings.
+ * We know it works because the structs have been instantiated as struct netifapi_msg */
+ struct netifapi_msg *msg = (struct netifapi_msg*)(void*)m;
+
+ if (msg->msg.common.errtfunc != NULL) {
+ return msg->msg.common.errtfunc(msg->netif);
+ } else {
+ msg->msg.common.voidfunc(msg->netif);
+ return ERR_OK;
+ }
+}
+
+/**
+ * @ingroup netifapi_netif
+ * Call netif_add() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ *
+ * @note for params @see netif_add()
+ */
+err_t
+netifapi_netif_add(struct netif *netif,
+#if LWIP_IPV4
+ const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw,
+#endif /* LWIP_IPV4 */
+ void *state, netif_init_fn init, netif_input_fn input)
+{
+ err_t err;
+ NETIFAPI_VAR_DECLARE(msg);
+ NETIFAPI_VAR_ALLOC(msg);
+
+#if LWIP_IPV4
+ if (ipaddr == NULL) {
+ ipaddr = IP4_ADDR_ANY4;
+ }
+ if (netmask == NULL) {
+ netmask = IP4_ADDR_ANY4;
+ }
+ if (gw == NULL) {
+ gw = IP4_ADDR_ANY4;
+ }
+#endif /* LWIP_IPV4 */
+
+ NETIFAPI_VAR_REF(msg).netif = netif;
+#if LWIP_IPV4
+ NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr);
+ NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask);
+ NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw);
+#endif /* LWIP_IPV4 */
+ NETIFAPI_VAR_REF(msg).msg.add.state = state;
+ NETIFAPI_VAR_REF(msg).msg.add.init = init;
+ NETIFAPI_VAR_REF(msg).msg.add.input = input;
+ err = tcpip_api_call(netifapi_do_netif_add, &API_VAR_REF(msg).call);
+ NETIFAPI_VAR_FREE(msg);
+ return err;
+}
+
+#if LWIP_IPV4
+/**
+ * @ingroup netifapi_netif
+ * Call netif_set_addr() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ *
+ * @note for params @see netif_set_addr()
+ */
+err_t
+netifapi_netif_set_addr(struct netif *netif,
+ const ip4_addr_t *ipaddr,
+ const ip4_addr_t *netmask,
+ const ip4_addr_t *gw)
+{
+ err_t err;
+ NETIFAPI_VAR_DECLARE(msg);
+ NETIFAPI_VAR_ALLOC(msg);
+
+ if (ipaddr == NULL) {
+ ipaddr = IP4_ADDR_ANY4;
+ }
+ if (netmask == NULL) {
+ netmask = IP4_ADDR_ANY4;
+ }
+ if (gw == NULL) {
+ gw = IP4_ADDR_ANY4;
+ }
+
+ NETIFAPI_VAR_REF(msg).netif = netif;
+ NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr);
+ NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask);
+ NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw);
+ err = tcpip_api_call(netifapi_do_netif_set_addr, &API_VAR_REF(msg).call);
+ NETIFAPI_VAR_FREE(msg);
+ return err;
+}
+#endif /* LWIP_IPV4 */
+
+/**
+ * call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) in a thread-safe
+ * way by running that function inside the tcpip_thread context.
+ *
+ * @note use only for functions where there is only "netif" parameter.
+ */
+err_t
+netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc,
+ netifapi_errt_fn errtfunc)
+{
+ err_t err;
+ NETIFAPI_VAR_DECLARE(msg);
+ NETIFAPI_VAR_ALLOC(msg);
+
+ NETIFAPI_VAR_REF(msg).netif = netif;
+ NETIFAPI_VAR_REF(msg).msg.common.voidfunc = voidfunc;
+ NETIFAPI_VAR_REF(msg).msg.common.errtfunc = errtfunc;
+ err = tcpip_api_call(netifapi_do_netif_common, &API_VAR_REF(msg).call);
+ NETIFAPI_VAR_FREE(msg);
+ return err;
+}
+
+#endif /* LWIP_NETIF_API */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/pppapi.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/pppapi.c
new file mode 100644
index 0000000..5ba190e
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/pppapi.c
@@ -0,0 +1,416 @@
+/**
+ * @file
+ * Point To Point Protocol Sequential API module
+ *
+ */
+
+/*
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/pppapi.h"
+#include "lwip/tcpip.h"
+#include "netif/ppp/pppoe.h"
+#include "netif/ppp/pppol2tp.h"
+#include "netif/ppp/pppos.h"
+
+/**
+ * Call ppp_set_default() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_ppp_set_default(struct pppapi_msg_msg *msg)
+{
+ ppp_set_default(msg->ppp);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call ppp_set_default() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+void
+pppapi_set_default(ppp_pcb *pcb)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_ppp_set_default;
+ msg.msg.ppp = pcb;
+ TCPIP_PPPAPI(&msg);
+}
+
+
+/**
+ * Call ppp_set_auth() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_ppp_set_auth(struct pppapi_msg_msg *msg)
+{
+ ppp_set_auth(msg->ppp, msg->msg.setauth.authtype,
+ msg->msg.setauth.user, msg->msg.setauth.passwd);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call ppp_set_auth() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+void
+pppapi_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_ppp_set_auth;
+ msg.msg.ppp = pcb;
+ msg.msg.msg.setauth.authtype = authtype;
+ msg.msg.msg.setauth.user = user;
+ msg.msg.msg.setauth.passwd = passwd;
+ TCPIP_PPPAPI(&msg);
+}
+
+
+#if PPP_NOTIFY_PHASE
+/**
+ * Call ppp_set_notify_phase_callback() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_ppp_set_notify_phase_callback(struct pppapi_msg_msg *msg)
+{
+ ppp_set_notify_phase_callback(msg->ppp, msg->msg.setnotifyphasecb.notify_phase_cb);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call ppp_set_notify_phase_callback() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+void
+pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_ppp_set_notify_phase_callback;
+ msg.msg.ppp = pcb;
+ msg.msg.msg.setnotifyphasecb.notify_phase_cb = notify_phase_cb;
+ TCPIP_PPPAPI(&msg);
+}
+#endif /* PPP_NOTIFY_PHASE */
+
+
+#if PPPOS_SUPPORT
+/**
+ * Call pppos_create() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_pppos_create(struct pppapi_msg_msg *msg)
+{
+ msg->ppp = pppos_create(msg->msg.serialcreate.pppif, msg->msg.serialcreate.fd,
+ msg->msg.serialcreate.link_status_cb, msg->msg.serialcreate.ctx_cb);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call pppos_create() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+ppp_pcb*
+pppapi_pppos_create(struct netif *pppif, sio_fd_t fd, ppp_link_status_cb_fn link_status_cb,
+ void *ctx_cb)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_pppos_create;
+ msg.msg.msg.serialcreate.pppif = pppif;
+ msg.msg.msg.serialcreate.fd = fd;
+ msg.msg.msg.serialcreate.link_status_cb = link_status_cb;
+ msg.msg.msg.serialcreate.ctx_cb = ctx_cb;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.ppp;
+}
+#endif /* PPPOS_SUPPORT */
+
+
+#if PPPOE_SUPPORT
+/**
+ * Call pppoe_create() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_pppoe_create(struct pppapi_msg_msg *msg)
+{
+
+ msg->ppp = pppoe_create(msg->msg.ethernetcreate.pppif, msg->msg.ethernetcreate.ethif,
+ msg->msg.ethernetcreate.service_name, msg->msg.ethernetcreate.concentrator_name,
+ msg->msg.ethernetcreate.link_status_cb, msg->msg.ethernetcreate.ctx_cb);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call pppoe_create() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+ppp_pcb*
+pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name,
+ const char *concentrator_name, ppp_link_status_cb_fn link_status_cb,
+ void *ctx_cb)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_pppoe_create;
+ msg.msg.msg.ethernetcreate.pppif = pppif;
+ msg.msg.msg.ethernetcreate.ethif = ethif;
+ msg.msg.msg.ethernetcreate.service_name = service_name;
+ msg.msg.msg.ethernetcreate.concentrator_name = concentrator_name;
+ msg.msg.msg.ethernetcreate.link_status_cb = link_status_cb;
+ msg.msg.msg.ethernetcreate.ctx_cb = ctx_cb;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.ppp;
+}
+#endif /* PPPOE_SUPPORT */
+
+
+#if PPPOL2TP_SUPPORT
+/**
+ * Call pppol2tp_create() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_pppol2tp_create(struct pppapi_msg_msg *msg)
+{
+ msg->ppp = pppol2tp_create(msg->msg.l2tpcreate.pppif,
+ msg->msg.l2tpcreate.netif, msg->msg.l2tpcreate.ipaddr, msg->msg.l2tpcreate.port,
+#if PPPOL2TP_AUTH_SUPPORT
+ msg->msg.l2tpcreate.secret,
+ msg->msg.l2tpcreate.secret_len,
+#else /* PPPOL2TP_AUTH_SUPPORT */
+ NULL,
+#endif /* PPPOL2TP_AUTH_SUPPORT */
+ msg->msg.l2tpcreate.link_status_cb, msg->msg.l2tpcreate.ctx_cb);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call pppol2tp_create() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+ppp_pcb*
+pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port,
+ u8_t *secret, u8_t secret_len,
+ ppp_link_status_cb_fn link_status_cb, void *ctx_cb)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_pppol2tp_create;
+ msg.msg.msg.l2tpcreate.pppif = pppif;
+ msg.msg.msg.l2tpcreate.netif = netif;
+ msg.msg.msg.l2tpcreate.ipaddr = ipaddr;
+ msg.msg.msg.l2tpcreate.port = port;
+#if PPPOL2TP_AUTH_SUPPORT
+ msg.msg.msg.l2tpcreate.secret = secret;
+ msg.msg.msg.l2tpcreate.secret_len = secret_len;
+#endif /* PPPOL2TP_AUTH_SUPPORT */
+ msg.msg.msg.l2tpcreate.link_status_cb = link_status_cb;
+ msg.msg.msg.l2tpcreate.ctx_cb = ctx_cb;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.ppp;
+}
+
+
+#if LWIP_IPV6
+/**
+ * Call pppol2tp_create_ip6() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_pppol2tp_create_ip6(struct pppapi_msg_msg *msg)
+{
+ msg->ppp = pppol2tp_create_ip6(msg->msg.l2tpcreateip6.pppif,
+ msg->msg.l2tpcreateip6.netif, msg->msg.l2tpcreateip6.ip6addr, msg->msg.l2tpcreateip6.port,
+#if PPPOL2TP_AUTH_SUPPORT
+ msg->msg.l2tpcreateip6.secret,
+ msg->msg.l2tpcreateip6.secret_len,
+#else /* PPPOL2TP_AUTH_SUPPORT */
+ NULL,
+#endif /* PPPOL2TP_AUTH_SUPPORT */
+ msg->msg.l2tpcreateip6.link_status_cb, msg->msg.l2tpcreateip6.ctx_cb);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call pppol2tp_create_ip6() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+ppp_pcb*
+pppapi_pppol2tp_create_ip6(struct netif *pppif, struct netif *netif, ip6_addr_t *ip6addr, u16_t port,
+ u8_t *secret, u8_t secret_len,
+ ppp_link_status_cb_fn link_status_cb, void *ctx_cb)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_pppol2tp_create_ip6;
+ msg.msg.msg.l2tpcreateip6.pppif = pppif;
+ msg.msg.msg.l2tpcreateip6.netif = netif;
+ msg.msg.msg.l2tpcreateip6.ip6addr = ip6addr;
+ msg.msg.msg.l2tpcreateip6.port = port;
+#if PPPOL2TP_AUTH_SUPPORT
+ msg.msg.msg.l2tpcreateip6.secret = secret;
+ msg.msg.msg.l2tpcreateip6.secret_len = secret_len;
+#endif /* PPPOL2TP_AUTH_SUPPORT */
+ msg.msg.msg.l2tpcreateip6.link_status_cb = link_status_cb;
+ msg.msg.msg.l2tpcreateip6.ctx_cb = ctx_cb;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.ppp;
+}
+#endif /* LWIP_IPV6 */
+#endif /* PPPOL2TP_SUPPORT */
+
+
+/**
+ * Call ppp_connect() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_ppp_connect(struct pppapi_msg_msg *msg)
+{
+ msg->err = ppp_connect(msg->ppp, msg->msg.connect.holdoff);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call ppp_connect() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+err_t
+pppapi_connect(ppp_pcb *pcb, u16_t holdoff)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_ppp_connect;
+ msg.msg.ppp = pcb;
+ msg.msg.msg.connect.holdoff = holdoff;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.err;
+}
+
+
+#if PPP_SERVER
+/**
+ * Call ppp_listen() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_ppp_listen(struct pppapi_msg_msg *msg)
+{
+ msg->err = ppp_listen(msg->ppp, msg->msg.listen.addrs);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call ppp_listen() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+err_t
+pppapi_listen(ppp_pcb *pcb, struct ppp_addrs *addrs)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_ppp_listen;
+ msg.msg.ppp = pcb;
+ msg.msg.msg.listen.addrs = addrs;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.err;
+}
+#endif /* PPP_SERVER */
+
+
+/**
+ * Call ppp_close() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_ppp_close(struct pppapi_msg_msg *msg)
+{
+ msg->err = ppp_close(msg->ppp, msg->msg.close.nocarrier);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call ppp_close() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+err_t
+pppapi_close(ppp_pcb *pcb, u8_t nocarrier)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_ppp_close;
+ msg.msg.ppp = pcb;
+ msg.msg.msg.close.nocarrier = nocarrier;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.err;
+}
+
+
+/**
+ * Call ppp_free() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_ppp_free(struct pppapi_msg_msg *msg)
+{
+ msg->err = ppp_free(msg->ppp);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call ppp_free() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+err_t
+pppapi_free(ppp_pcb *pcb)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_ppp_free;
+ msg.msg.ppp = pcb;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.err;
+}
+
+
+/**
+ * Call ppp_ioctl() inside the tcpip_thread context.
+ */
+static void
+pppapi_do_ppp_ioctl(struct pppapi_msg_msg *msg)
+{
+ msg->err = ppp_ioctl(msg->ppp, msg->msg.ioctl.cmd, msg->msg.ioctl.arg);
+ TCPIP_PPPAPI_ACK(msg);
+}
+
+/**
+ * Call ppp_ioctl() in a thread-safe way by running that function inside the
+ * tcpip_thread context.
+ */
+err_t
+pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg)
+{
+ struct pppapi_msg msg;
+ msg.function = pppapi_do_ppp_ioctl;
+ msg.msg.ppp = pcb;
+ msg.msg.msg.ioctl.cmd = cmd;
+ msg.msg.msg.ioctl.arg = arg;
+ TCPIP_PPPAPI(&msg);
+ return msg.msg.err;
+}
+
+
+#endif /* LWIP_PPP_API */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/sockets.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/sockets.c
new file mode 100644
index 0000000..b763248
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/sockets.c
@@ -0,0 +1,2827 @@
+/**
+ * @file
+ * Sockets BSD-Like API module
+ *
+ * @defgroup socket Socket API
+ * @ingroup sequential_api
+ * BSD-style socket API.\n
+ * Thread-safe, to be called from non-TCPIP threads only.\n
+ * Can be activated by defining @ref LWIP_SOCKET to 1.\n
+ * Header is in posix/sys/socket.h\b
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/sockets.h"
+#include "lwip/api.h"
+#include "lwip/sys.h"
+#include "lwip/igmp.h"
+#include "lwip/inet.h"
+#include "lwip/tcp.h"
+#include "lwip/raw.h"
+#include "lwip/udp.h"
+#include "lwip/memp.h"
+#include "lwip/pbuf.h"
+#include "lwip/priv/tcpip_priv.h"
+#if LWIP_CHECKSUM_ON_COPY
+#include "lwip/inet_chksum.h"
+#endif
+
+#include <string.h>
+
+/* If the netconn API is not required publicly, then we include the necessary
+ files here to get the implementation */
+#if !LWIP_NETCONN
+#undef LWIP_NETCONN
+#define LWIP_NETCONN 1
+#include "api_msg.c"
+#include "api_lib.c"
+#include "netbuf.c"
+#undef LWIP_NETCONN
+#define LWIP_NETCONN 0
+#endif
+
+#if LWIP_IPV4
+#define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
+ (sin)->sin_len = sizeof(struct sockaddr_in); \
+ (sin)->sin_family = AF_INET; \
+ (sin)->sin_port = lwip_htons((port)); \
+ inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
+ memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
+#define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
+ inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
+ (port) = lwip_ntohs((sin)->sin_port); }while(0)
+#endif /* LWIP_IPV4 */
+
+#if LWIP_IPV6
+#define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
+ (sin6)->sin6_len = sizeof(struct sockaddr_in6); \
+ (sin6)->sin6_family = AF_INET6; \
+ (sin6)->sin6_port = lwip_htons((port)); \
+ (sin6)->sin6_flowinfo = 0; \
+ inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
+ (sin6)->sin6_scope_id = 0; }while(0)
+#define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
+ inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
+ (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
+#endif /* LWIP_IPV6 */
+
+#if LWIP_IPV4 && LWIP_IPV6
+static void sockaddr_to_ipaddr_port(const struct sockaddr* sockaddr, ip_addr_t* ipaddr, u16_t* port);
+
+#define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
+ ((namelen) == sizeof(struct sockaddr_in6)))
+#define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
+ ((name)->sa_family == AF_INET6))
+#define SOCK_ADDR_TYPE_MATCH(name, sock) \
+ ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
+ (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
+#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
+ if (IP_IS_V6(ipaddr)) { \
+ IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
+ } else { \
+ IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
+ } } while(0)
+#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
+#define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
+ (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
+#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
+#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
+#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
+#define SOCK_ADDR_TYPE_MATCH(name, sock) 1
+#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
+ IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
+#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
+ SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
+#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
+#else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
+#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
+#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
+#define SOCK_ADDR_TYPE_MATCH(name, sock) 1
+#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
+ IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
+#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
+ SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
+#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
+#endif /* LWIP_IPV6 */
+
+#define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
+ IS_SOCK_ADDR_TYPE_VALID(name))
+#define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
+ SOCK_ADDR_TYPE_MATCH(name, sock))
+#define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0)
+
+
+#define LWIP_SOCKOPT_CHECK_OPTLEN(optlen, opttype) do { if ((optlen) < sizeof(opttype)) { return EINVAL; }}while(0)
+#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
+ LWIP_SOCKOPT_CHECK_OPTLEN(optlen, opttype); \
+ if ((sock)->conn == NULL) { return EINVAL; } }while(0)
+#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
+ LWIP_SOCKOPT_CHECK_OPTLEN(optlen, opttype); \
+ if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { return EINVAL; } }while(0)
+#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
+ if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { return ENOPROTOOPT; } }while(0)
+
+
+#define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
+#define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
+#define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
+#if LWIP_MPU_COMPATIBLE
+#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
+ name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
+ if (name == NULL) { \
+ sock_set_errno(sock, ENOMEM); \
+ return -1; \
+ } }while(0)
+#else /* LWIP_MPU_COMPATIBLE */
+#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
+#endif /* LWIP_MPU_COMPATIBLE */
+
+#if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
+#define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
+#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
+#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((s32_t)*(const int*)(optval))
+#else
+#define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
+#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
+ s32_t loc = (val); \
+ ((struct timeval *)(optval))->tv_sec = (loc) / 1000U; \
+ ((struct timeval *)(optval))->tv_usec = ((loc) % 1000U) * 1000U; }while(0)
+#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000U) + (((const struct timeval *)(optval))->tv_usec / 1000U))
+#endif
+
+#define NUM_SOCKETS MEMP_NUM_NETCONN
+
+/** This is overridable for the rare case where more than 255 threads
+ * select on the same socket...
+ */
+#ifndef SELWAIT_T
+#define SELWAIT_T u8_t
+#endif
+
+/** Contains all internal pointers and states used for a socket */
+struct lwip_sock {
+ /** sockets currently are built on netconns, each socket has one netconn */
+ struct netconn *conn;
+ /** data that was left from the previous read */
+ void *lastdata;
+ /** offset in the data that was left from the previous read */
+ u16_t lastoffset;
+ /** number of times data was received, set by event_callback(),
+ tested by the receive and select functions */
+ s16_t rcvevent;
+ /** number of times data was ACKed (free send buffer), set by event_callback(),
+ tested by select */
+ u16_t sendevent;
+ /** error happened for this socket, set by event_callback(), tested by select */
+ u16_t errevent;
+ /** last error that occurred on this socket (in fact, all our errnos fit into an u8_t) */
+ u8_t err;
+ /** counter of how many threads are waiting for this socket using select */
+ SELWAIT_T select_waiting;
+};
+
+#if LWIP_NETCONN_SEM_PER_THREAD
+#define SELECT_SEM_T sys_sem_t*
+#define SELECT_SEM_PTR(sem) (sem)
+#else /* LWIP_NETCONN_SEM_PER_THREAD */
+#define SELECT_SEM_T sys_sem_t
+#define SELECT_SEM_PTR(sem) (&(sem))
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+
+/** Description for a task waiting in select */
+struct lwip_select_cb {
+ /** Pointer to the next waiting task */
+ struct lwip_select_cb *next;
+ /** Pointer to the previous waiting task */
+ struct lwip_select_cb *prev;
+ /** readset passed to select */
+ fd_set *readset;
+ /** writeset passed to select */
+ fd_set *writeset;
+ /** unimplemented: exceptset passed to select */
+ fd_set *exceptset;
+ /** don't signal the same semaphore twice: set to 1 when signalled */
+ int sem_signalled;
+ /** semaphore to wake up a task waiting for select */
+ SELECT_SEM_T sem;
+};
+
+/** A struct sockaddr replacement that has the same alignment as sockaddr_in/
+ * sockaddr_in6 if instantiated.
+ */
+union sockaddr_aligned {
+ struct sockaddr sa;
+#if LWIP_IPV6
+ struct sockaddr_in6 sin6;
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4
+ struct sockaddr_in sin;
+#endif /* LWIP_IPV4 */
+};
+
+#if LWIP_IGMP
+/* Define the number of IPv4 multicast memberships, default is one per socket */
+#ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
+#define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
+#endif
+
+/* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
+ a socket is closed */
+struct lwip_socket_multicast_pair {
+ /** the socket */
+ struct lwip_sock* sock;
+ /** the interface address */
+ ip4_addr_t if_addr;
+ /** the group address */
+ ip4_addr_t multi_addr;
+};
+
+struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
+
+static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
+static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
+static void lwip_socket_drop_registered_memberships(int s);
+#endif /* LWIP_IGMP */
+
+/** The global array of available sockets */
+static struct lwip_sock sockets[NUM_SOCKETS];
+/** The global list of tasks waiting for select */
+static struct lwip_select_cb *select_cb_list;
+/** This counter is increased from lwip_select when the list is changed
+ and checked in event_callback to see if it has changed. */
+static volatile int select_cb_ctr;
+
+#if LWIP_SOCKET_SET_ERRNO
+#ifndef set_errno
+#define set_errno(err) do { if (err) { errno = (err); } } while(0)
+#endif
+#else /* LWIP_SOCKET_SET_ERRNO */
+#define set_errno(err)
+#endif /* LWIP_SOCKET_SET_ERRNO */
+
+#define sock_set_errno(sk, e) do { \
+ const int sockerr = (e); \
+ sk->err = (u8_t)sockerr; \
+ set_errno(sockerr); \
+} while (0)
+
+/* Forward declaration of some functions */
+static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
+#if !LWIP_TCPIP_CORE_LOCKING
+static void lwip_getsockopt_callback(void *arg);
+static void lwip_setsockopt_callback(void *arg);
+#endif
+static u8_t lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
+static u8_t lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
+
+#if LWIP_IPV4 && LWIP_IPV6
+static void
+sockaddr_to_ipaddr_port(const struct sockaddr* sockaddr, ip_addr_t* ipaddr, u16_t* port)
+{
+ if ((sockaddr->sa_family) == AF_INET6) {
+ SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, *port);
+ ipaddr->type = IPADDR_TYPE_V6;
+ } else {
+ SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, *port);
+ ipaddr->type = IPADDR_TYPE_V4;
+ }
+}
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+/** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
+void
+lwip_socket_thread_init(void)
+{
+ netconn_thread_init();
+}
+
+/** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
+void
+lwip_socket_thread_cleanup(void)
+{
+ netconn_thread_cleanup();
+}
+
+/**
+ * Map a externally used socket index to the internal socket representation.
+ *
+ * @param s externally used socket index
+ * @return struct lwip_sock for the socket or NULL if not found
+ */
+static struct lwip_sock *
+get_socket(int s)
+{
+ struct lwip_sock *sock;
+
+ s -= LWIP_SOCKET_OFFSET;
+
+ if ((s < 0) || (s >= NUM_SOCKETS)) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", s + LWIP_SOCKET_OFFSET));
+ set_errno(EBADF);
+ return NULL;
+ }
+
+ sock = &sockets[s];
+
+ if (!sock->conn) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): not active\n", s + LWIP_SOCKET_OFFSET));
+ set_errno(EBADF);
+ return NULL;
+ }
+
+ return sock;
+}
+
+/**
+ * Same as get_socket but doesn't set errno
+ *
+ * @param s externally used socket index
+ * @return struct lwip_sock for the socket or NULL if not found
+ */
+static struct lwip_sock *
+tryget_socket(int s)
+{
+ s -= LWIP_SOCKET_OFFSET;
+ if ((s < 0) || (s >= NUM_SOCKETS)) {
+ return NULL;
+ }
+ if (!sockets[s].conn) {
+ return NULL;
+ }
+ return &sockets[s];
+}
+
+/**
+ * Allocate a new socket for a given netconn.
+ *
+ * @param newconn the netconn for which to allocate a socket
+ * @param accepted 1 if socket has been created by accept(),
+ * 0 if socket has been created by socket()
+ * @return the index of the new socket; -1 on error
+ */
+static int
+alloc_socket(struct netconn *newconn, int accepted)
+{
+ int i;
+ SYS_ARCH_DECL_PROTECT(lev);
+
+ /* allocate a new socket identifier */
+ for (i = 0; i < NUM_SOCKETS; ++i) {
+ /* Protect socket array */
+ SYS_ARCH_PROTECT(lev);
+ if (!sockets[i].conn && (sockets[i].select_waiting == 0)) {
+ sockets[i].conn = newconn;
+ /* The socket is not yet known to anyone, so no need to protect
+ after having marked it as used. */
+ SYS_ARCH_UNPROTECT(lev);
+ sockets[i].lastdata = NULL;
+ sockets[i].lastoffset = 0;
+ sockets[i].rcvevent = 0;
+ /* TCP sendbuf is empty, but the socket is not yet writable until connected
+ * (unless it has been created by accept()). */
+ sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
+ sockets[i].errevent = 0;
+ sockets[i].err = 0;
+ return i + LWIP_SOCKET_OFFSET;
+ }
+ SYS_ARCH_UNPROTECT(lev);
+ }
+ return -1;
+}
+
+/** Free a socket. The socket's netconn must have been
+ * delete before!
+ *
+ * @param sock the socket to free
+ * @param is_tcp != 0 for TCP sockets, used to free lastdata
+ */
+static void
+free_socket(struct lwip_sock *sock, int is_tcp)
+{
+ void *lastdata;
+
+ lastdata = sock->lastdata;
+ sock->lastdata = NULL;
+ sock->lastoffset = 0;
+ sock->err = 0;
+
+ /* Protect socket array */
+ SYS_ARCH_SET(sock->conn, NULL);
+ /* don't use 'sock' after this line, as another task might have allocated it */
+
+ if (lastdata != NULL) {
+ if (is_tcp) {
+ pbuf_free((struct pbuf *)lastdata);
+ } else {
+ netbuf_delete((struct netbuf *)lastdata);
+ }
+ }
+}
+
+/* Below this, the well-known socket functions are implemented.
+ * Use google.com or opengroup.org to get a good description :-)
+ *
+ * Exceptions are documented!
+ */
+
+int
+lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
+{
+ struct lwip_sock *sock, *nsock;
+ struct netconn *newconn;
+ ip_addr_t naddr;
+ u16_t port = 0;
+ int newsock;
+ err_t err;
+ SYS_ARCH_DECL_PROTECT(lev);
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ if (netconn_is_nonblocking(sock->conn) && (sock->rcvevent <= 0)) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): returning EWOULDBLOCK\n", s));
+ set_errno(EWOULDBLOCK);
+ return -1;
+ }
+
+ /* wait for a new connection */
+ err = netconn_accept(sock->conn, &newconn);
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
+ sock_set_errno(sock, EOPNOTSUPP);
+ } else if (err == ERR_CLSD) {
+ sock_set_errno(sock, EINVAL);
+ } else {
+ sock_set_errno(sock, err_to_errno(err));
+ }
+ return -1;
+ }
+ LWIP_ASSERT("newconn != NULL", newconn != NULL);
+
+ newsock = alloc_socket(newconn, 1);
+ if (newsock == -1) {
+ netconn_delete(newconn);
+ sock_set_errno(sock, ENFILE);
+ return -1;
+ }
+ LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
+ LWIP_ASSERT("newconn->callback == event_callback", newconn->callback == event_callback);
+ nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
+
+ /* See event_callback: If data comes in right away after an accept, even
+ * though the server task might not have created a new socket yet.
+ * In that case, newconn->socket is counted down (newconn->socket--),
+ * so nsock->rcvevent is >= 1 here!
+ */
+ SYS_ARCH_PROTECT(lev);
+ nsock->rcvevent += (s16_t)(-1 - newconn->socket);
+ newconn->socket = newsock;
+ SYS_ARCH_UNPROTECT(lev);
+
+ /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
+ * not be NULL if addr is valid.
+ */
+ if (addr != NULL) {
+ union sockaddr_aligned tempaddr;
+ /* get the IP address and port of the remote host */
+ err = netconn_peer(newconn, &naddr, &port);
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
+ netconn_delete(newconn);
+ free_socket(nsock, 1);
+ sock_set_errno(sock, err_to_errno(err));
+ return -1;
+ }
+ LWIP_ASSERT("addr valid but addrlen NULL", addrlen != NULL);
+
+ IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
+ if (*addrlen > tempaddr.sa.sa_len) {
+ *addrlen = tempaddr.sa.sa_len;
+ }
+ MEMCPY(addr, &tempaddr, *addrlen);
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
+ ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
+ LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
+ } else {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock));
+ }
+
+ sock_set_errno(sock, 0);
+ return newsock;
+}
+
+int
+lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
+{
+ struct lwip_sock *sock;
+ ip_addr_t local_addr;
+ u16_t local_port;
+ err_t err;
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
+ /* sockaddr does not match socket type (IPv4/IPv6) */
+ sock_set_errno(sock, err_to_errno(ERR_VAL));
+ return -1;
+ }
+
+ /* check size, family and alignment of 'name' */
+ LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
+ IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
+ sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;);
+ LWIP_UNUSED_ARG(namelen);
+
+ SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
+ ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
+ LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
+
+#if LWIP_IPV4 && LWIP_IPV6
+ /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
+ if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
+ unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
+ IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ err = netconn_bind(sock->conn, &local_addr, local_port);
+
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
+ sock_set_errno(sock, err_to_errno(err));
+ return -1;
+ }
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
+ sock_set_errno(sock, 0);
+ return 0;
+}
+
+int
+lwip_close(int s)
+{
+ struct lwip_sock *sock;
+ int is_tcp = 0;
+ err_t err;
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ if (sock->conn != NULL) {
+ is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
+ } else {
+ LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata == NULL);
+ }
+
+#if LWIP_IGMP
+ /* drop all possibly joined IGMP memberships */
+ lwip_socket_drop_registered_memberships(s);
+#endif /* LWIP_IGMP */
+
+ err = netconn_delete(sock->conn);
+ if (err != ERR_OK) {
+ sock_set_errno(sock, err_to_errno(err));
+ return -1;
+ }
+
+ free_socket(sock, is_tcp);
+ set_errno(0);
+ return 0;
+}
+
+int
+lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
+{
+ struct lwip_sock *sock;
+ err_t err;
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
+ /* sockaddr does not match socket type (IPv4/IPv6) */
+ sock_set_errno(sock, err_to_errno(ERR_VAL));
+ return -1;
+ }
+
+ LWIP_UNUSED_ARG(namelen);
+ if (name->sa_family == AF_UNSPEC) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
+ err = netconn_disconnect(sock->conn);
+ } else {
+ ip_addr_t remote_addr;
+ u16_t remote_port;
+
+ /* check size, family and alignment of 'name' */
+ LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
+ IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
+ sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;);
+
+ SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
+ ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
+ LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
+
+#if LWIP_IPV4 && LWIP_IPV6
+ /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
+ if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
+ unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
+ IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ err = netconn_connect(sock->conn, &remote_addr, remote_port);
+ }
+
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
+ sock_set_errno(sock, err_to_errno(err));
+ return -1;
+ }
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
+ sock_set_errno(sock, 0);
+ return 0;
+}
+
+/**
+ * Set a socket into listen mode.
+ * The socket may not have been used for another connection previously.
+ *
+ * @param s the socket to set to listening mode
+ * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
+ * @return 0 on success, non-zero on failure
+ */
+int
+lwip_listen(int s, int backlog)
+{
+ struct lwip_sock *sock;
+ err_t err;
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ /* limit the "backlog" parameter to fit in an u8_t */
+ backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
+
+ err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
+
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
+ sock_set_errno(sock, EOPNOTSUPP);
+ return -1;
+ }
+ sock_set_errno(sock, err_to_errno(err));
+ return -1;
+ }
+
+ sock_set_errno(sock, 0);
+ return 0;
+}
+
+int
+lwip_recvfrom(int s, void *mem, size_t len, int flags,
+ struct sockaddr *from, socklen_t *fromlen)
+{
+ struct lwip_sock *sock;
+ void *buf = NULL;
+ struct pbuf *p;
+ u16_t buflen, copylen;
+ int off = 0;
+ u8_t done = 0;
+ err_t err;
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ do {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: top while sock->lastdata=%p\n", sock->lastdata));
+ /* Check if there is data left from the last recv operation. */
+ if (sock->lastdata) {
+ buf = sock->lastdata;
+ } else {
+ /* If this is non-blocking call, then check first */
+ if (((flags & MSG_DONTWAIT) || netconn_is_nonblocking(sock->conn)) &&
+ (sock->rcvevent <= 0)) {
+ if (off > 0) {
+ /* already received data, return that */
+ sock_set_errno(sock, 0);
+ return off;
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d): returning EWOULDBLOCK\n", s));
+ set_errno(EWOULDBLOCK);
+ return -1;
+ }
+
+ /* No data was left from the previous operation, so we try to get
+ some from the network. */
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
+ err = netconn_recv_tcp_pbuf(sock->conn, (struct pbuf **)&buf);
+ } else {
+ err = netconn_recv(sock->conn, (struct netbuf **)&buf);
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: netconn_recv err=%d, netbuf=%p\n",
+ err, buf));
+
+ if (err != ERR_OK) {
+ if (off > 0) {
+ if (err == ERR_CLSD) {
+ /* closed but already received data, ensure select gets the FIN, too */
+ event_callback(sock->conn, NETCONN_EVT_RCVPLUS, 0);
+ }
+ /* already received data, return that */
+ sock_set_errno(sock, 0);
+ return off;
+ }
+ /* We should really do some error checking here. */
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d): buf == NULL, error is \"%s\"!\n",
+ s, lwip_strerr(err)));
+ sock_set_errno(sock, err_to_errno(err));
+ if (err == ERR_CLSD) {
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ LWIP_ASSERT("buf != NULL", buf != NULL);
+ sock->lastdata = buf;
+ }
+
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
+ p = (struct pbuf *)buf;
+ } else {
+ p = ((struct netbuf *)buf)->p;
+ }
+ buflen = p->tot_len;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: buflen=%"U16_F" len=%"SZT_F" off=%d sock->lastoffset=%"U16_F"\n",
+ buflen, len, off, sock->lastoffset));
+
+ buflen -= sock->lastoffset;
+
+ if (len > buflen) {
+ copylen = buflen;
+ } else {
+ copylen = (u16_t)len;
+ }
+
+ /* copy the contents of the received buffer into
+ the supplied memory pointer mem */
+ pbuf_copy_partial(p, (u8_t*)mem + off, copylen, sock->lastoffset);
+
+ off += copylen;
+
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
+ LWIP_ASSERT("invalid copylen, len would underflow", len >= copylen);
+ len -= copylen;
+ if ((len <= 0) ||
+ (p->flags & PBUF_FLAG_PUSH) ||
+ (sock->rcvevent <= 0) ||
+ ((flags & MSG_PEEK) != 0)) {
+ done = 1;
+ }
+ } else {
+ done = 1;
+ }
+
+ /* Check to see from where the data was.*/
+ if (done) {
+#if !SOCKETS_DEBUG
+ if (from && fromlen)
+#endif /* !SOCKETS_DEBUG */
+ {
+ u16_t port;
+ ip_addr_t tmpaddr;
+ ip_addr_t *fromaddr;
+ union sockaddr_aligned saddr;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d): addr=", s));
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
+ fromaddr = &tmpaddr;
+ netconn_getaddr(sock->conn, fromaddr, &port, 0);
+ } else {
+ port = netbuf_fromport((struct netbuf *)buf);
+ fromaddr = netbuf_fromaddr((struct netbuf *)buf);
+ }
+
+#if LWIP_IPV4 && LWIP_IPV6
+ /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
+ if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) && IP_IS_V4(fromaddr)) {
+ ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
+ IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
+ ip_addr_debug_print(SOCKETS_DEBUG, fromaddr);
+ LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, off));
+#if SOCKETS_DEBUG
+ if (from && fromlen)
+#endif /* SOCKETS_DEBUG */
+ {
+ if (*fromlen > saddr.sa.sa_len) {
+ *fromlen = saddr.sa.sa_len;
+ }
+ MEMCPY(from, &saddr, *fromlen);
+ }
+ }
+ }
+
+ /* If we don't peek the incoming message... */
+ if ((flags & MSG_PEEK) == 0) {
+ /* If this is a TCP socket, check if there is data left in the
+ buffer. If so, it should be saved in the sock structure for next
+ time around. */
+ if ((NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) && (buflen - copylen > 0)) {
+ sock->lastdata = buf;
+ sock->lastoffset += copylen;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: lastdata now netbuf=%p\n", buf));
+ } else {
+ sock->lastdata = NULL;
+ sock->lastoffset = 0;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: deleting netbuf=%p\n", buf));
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
+ pbuf_free((struct pbuf *)buf);
+ } else {
+ netbuf_delete((struct netbuf *)buf);
+ }
+ buf = NULL;
+ }
+ }
+ } while (!done);
+
+ sock_set_errno(sock, 0);
+ return off;
+}
+
+int
+lwip_read(int s, void *mem, size_t len)
+{
+ return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
+}
+
+int
+lwip_recv(int s, void *mem, size_t len, int flags)
+{
+ return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
+}
+
+int
+lwip_send(int s, const void *data, size_t size, int flags)
+{
+ struct lwip_sock *sock;
+ err_t err;
+ u8_t write_flags;
+ size_t written;
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
+ s, data, size, flags));
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
+#if (LWIP_UDP || LWIP_RAW)
+ return lwip_sendto(s, data, size, flags, NULL, 0);
+#else /* (LWIP_UDP || LWIP_RAW) */
+ sock_set_errno(sock, err_to_errno(ERR_ARG));
+ return -1;
+#endif /* (LWIP_UDP || LWIP_RAW) */
+ }
+
+ write_flags = NETCONN_COPY |
+ ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
+ ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0);
+ written = 0;
+ err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
+ sock_set_errno(sock, err_to_errno(err));
+ return (err == ERR_OK ? (int)written : -1);
+}
+
+int
+lwip_sendmsg(int s, const struct msghdr *msg, int flags)
+{
+ struct lwip_sock *sock;
+ int i;
+#if LWIP_TCP
+ u8_t write_flags;
+ size_t written;
+#endif
+ int size = 0;
+ err_t err = ERR_OK;
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
+ sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;);
+
+ LWIP_UNUSED_ARG(msg->msg_control);
+ LWIP_UNUSED_ARG(msg->msg_controllen);
+ LWIP_UNUSED_ARG(msg->msg_flags);
+ LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", (msg->msg_iov != NULL && msg->msg_iovlen != 0),
+ sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;);
+
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
+#if LWIP_TCP
+ write_flags = NETCONN_COPY |
+ ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
+ ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0);
+
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ u8_t apiflags = write_flags;
+ if (i + 1 < msg->msg_iovlen) {
+ apiflags |= NETCONN_MORE;
+ }
+ written = 0;
+ err = netconn_write_partly(sock->conn, msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len, write_flags, &written);
+ if (err == ERR_OK) {
+ size += written;
+ /* check that the entire IO vector was accepected, if not return a partial write */
+ if (written != msg->msg_iov[i].iov_len)
+ break;
+ }
+ /* none of this IO vector was accepted, but previous was, return partial write and conceal ERR_WOULDBLOCK */
+ else if (err == ERR_WOULDBLOCK && size > 0) {
+ err = ERR_OK;
+ /* let ERR_WOULDBLOCK persist on the netconn since we are returning ERR_OK */
+ break;
+ } else {
+ size = -1;
+ break;
+ }
+ }
+ sock_set_errno(sock, err_to_errno(err));
+ return size;
+#else /* LWIP_TCP */
+ sock_set_errno(sock, err_to_errno(ERR_ARG));
+ return -1;
+#endif /* LWIP_TCP */
+ }
+ /* else, UDP and RAW NETCONNs */
+#if LWIP_UDP || LWIP_RAW
+ {
+ struct netbuf *chain_buf;
+
+ LWIP_UNUSED_ARG(flags);
+ LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
+ IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)) ,
+ sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;);
+
+ /* initialize chain buffer with destination */
+ chain_buf = netbuf_new();
+ if (!chain_buf) {
+ sock_set_errno(sock, err_to_errno(ERR_MEM));
+ return -1;
+ }
+ if (msg->msg_name) {
+ u16_t remote_port;
+ SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf->addr, remote_port);
+ netbuf_fromport(chain_buf) = remote_port;
+ }
+#if LWIP_NETIF_TX_SINGLE_PBUF
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ size += msg->msg_iov[i].iov_len;
+ }
+ /* Allocate a new netbuf and copy the data into it. */
+ if (netbuf_alloc(chain_buf, (u16_t)size) == NULL) {
+ err = ERR_MEM;
+ } else {
+ /* flatten the IO vectors */
+ size_t offset = 0;
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ MEMCPY(&((u8_t*)chain_buf->p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
+ offset += msg->msg_iov[i].iov_len;
+ }
+#if LWIP_CHECKSUM_ON_COPY
+ {
+ /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
+ u16_t chksum = ~inet_chksum_pbuf(chain_buf->p);
+ netbuf_set_chksum(chain_buf, chksum);
+ }
+#endif /* LWIP_CHECKSUM_ON_COPY */
+ err = ERR_OK;
+ }
+#else /* LWIP_NETIF_TX_SINGLE_PBUF */
+ /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
+ manually to avoid having to allocate, chain, and delete a netbuf for each iov */
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ struct pbuf *p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
+ if (p == NULL) {
+ err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
+ break;
+ }
+ p->payload = msg->msg_iov[i].iov_base;
+ LWIP_ASSERT("iov_len < u16_t", msg->msg_iov[i].iov_len <= 0xFFFF);
+ p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
+ /* netbuf empty, add new pbuf */
+ if (chain_buf->p == NULL) {
+ chain_buf->p = chain_buf->ptr = p;
+ /* add pbuf to existing pbuf chain */
+ } else {
+ pbuf_cat(chain_buf->p, p);
+ }
+ }
+ /* save size of total chain */
+ if (err == ERR_OK) {
+ size = netbuf_len(chain_buf);
+ }
+#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
+
+ if (err == ERR_OK) {
+#if LWIP_IPV4 && LWIP_IPV6
+ /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
+ if (IP_IS_V6_VAL(chain_buf->addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf->addr))) {
+ unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf->addr), ip_2_ip6(&chain_buf->addr));
+ IP_SET_TYPE_VAL(chain_buf->addr, IPADDR_TYPE_V4);
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ /* send the data */
+ err = netconn_send(sock->conn, chain_buf);
+ }
+
+ /* deallocated the buffer */
+ netbuf_delete(chain_buf);
+
+ sock_set_errno(sock, err_to_errno(err));
+ return (err == ERR_OK ? size : -1);
+ }
+#else /* LWIP_UDP || LWIP_RAW */
+ sock_set_errno(sock, err_to_errno(ERR_ARG));
+ return -1;
+#endif /* LWIP_UDP || LWIP_RAW */
+}
+
+int
+lwip_sendto(int s, const void *data, size_t size, int flags,
+ const struct sockaddr *to, socklen_t tolen)
+{
+ struct lwip_sock *sock;
+ err_t err;
+ u16_t short_size;
+ u16_t remote_port;
+ struct netbuf buf;
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
+#if LWIP_TCP
+ return lwip_send(s, data, size, flags);
+#else /* LWIP_TCP */
+ LWIP_UNUSED_ARG(flags);
+ sock_set_errno(sock, err_to_errno(ERR_ARG));
+ return -1;
+#endif /* LWIP_TCP */
+ }
+
+ /* @todo: split into multiple sendto's? */
+ LWIP_ASSERT("lwip_sendto: size must fit in u16_t", size <= 0xffff);
+ short_size = (u16_t)size;
+ LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
+ (IS_SOCK_ADDR_LEN_VALID(tolen) &&
+ IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))),
+ sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;);
+ LWIP_UNUSED_ARG(tolen);
+
+ /* initialize a buffer */
+ buf.p = buf.ptr = NULL;
+#if LWIP_CHECKSUM_ON_COPY
+ buf.flags = 0;
+#endif /* LWIP_CHECKSUM_ON_COPY */
+ if (to) {
+ SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
+ } else {
+ remote_port = 0;
+ ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
+ }
+ netbuf_fromport(&buf) = remote_port;
+
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
+ s, data, short_size, flags));
+ ip_addr_debug_print(SOCKETS_DEBUG, &buf.addr);
+ LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
+
+ /* make the buffer point to the data that should be sent */
+#if LWIP_NETIF_TX_SINGLE_PBUF
+ /* Allocate a new netbuf and copy the data into it. */
+ if (netbuf_alloc(&buf, short_size) == NULL) {
+ err = ERR_MEM;
+ } else {
+#if LWIP_CHECKSUM_ON_COPY
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
+ u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
+ netbuf_set_chksum(&buf, chksum);
+ } else
+#endif /* LWIP_CHECKSUM_ON_COPY */
+ {
+ MEMCPY(buf.p->payload, data, short_size);
+ }
+ err = ERR_OK;
+ }
+#else /* LWIP_NETIF_TX_SINGLE_PBUF */
+ err = netbuf_ref(&buf, data, short_size);
+#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
+ if (err == ERR_OK) {
+#if LWIP_IPV4 && LWIP_IPV6
+ /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
+ if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
+ unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
+ IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ /* send the data */
+ err = netconn_send(sock->conn, &buf);
+ }
+
+ /* deallocated the buffer */
+ netbuf_free(&buf);
+
+ sock_set_errno(sock, err_to_errno(err));
+ return (err == ERR_OK ? short_size : -1);
+}
+
+int
+lwip_socket(int domain, int type, int protocol)
+{
+ struct netconn *conn;
+ int i;
+
+ LWIP_UNUSED_ARG(domain); /* @todo: check this */
+
+ /* create a netconn */
+ switch (type) {
+ case SOCK_RAW:
+ conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
+ (u8_t)protocol, event_callback);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
+ domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
+ break;
+ case SOCK_DGRAM:
+ conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
+ ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)) ,
+ event_callback);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
+ domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
+ break;
+ case SOCK_STREAM:
+ conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), event_callback);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
+ domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
+ break;
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
+ domain, type, protocol));
+ set_errno(EINVAL);
+ return -1;
+ }
+
+ if (!conn) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
+ set_errno(ENOBUFS);
+ return -1;
+ }
+
+ i = alloc_socket(conn, 0);
+
+ if (i == -1) {
+ netconn_delete(conn);
+ set_errno(ENFILE);
+ return -1;
+ }
+ conn->socket = i;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
+ set_errno(0);
+ return i;
+}
+
+int
+lwip_write(int s, const void *data, size_t size)
+{
+ return lwip_send(s, data, size, 0);
+}
+
+int
+lwip_writev(int s, const struct iovec *iov, int iovcnt)
+{
+ struct msghdr msg;
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
+ Blame the opengroup standard for this inconsistency. */
+ msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
+ msg.msg_iovlen = iovcnt;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+ return lwip_sendmsg(s, &msg, 0);
+}
+
+/**
+ * Go through the readset and writeset lists and see which socket of the sockets
+ * set in the sets has events. On return, readset, writeset and exceptset have
+ * the sockets enabled that had events.
+ *
+ * @param maxfdp1 the highest socket index in the sets
+ * @param readset_in set of sockets to check for read events
+ * @param writeset_in set of sockets to check for write events
+ * @param exceptset_in set of sockets to check for error events
+ * @param readset_out set of sockets that had read events
+ * @param writeset_out set of sockets that had write events
+ * @param exceptset_out set os sockets that had error events
+ * @return number of sockets that had events (read/write/exception) (>= 0)
+ */
+static int
+lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
+ fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
+{
+ int i, nready = 0;
+ fd_set lreadset, lwriteset, lexceptset;
+ struct lwip_sock *sock;
+ SYS_ARCH_DECL_PROTECT(lev);
+
+ FD_ZERO(&lreadset);
+ FD_ZERO(&lwriteset);
+ FD_ZERO(&lexceptset);
+
+ /* Go through each socket in each list to count number of sockets which
+ currently match */
+ for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
+ /* if this FD is not in the set, continue */
+ if (!(readset_in && FD_ISSET(i, readset_in)) &&
+ !(writeset_in && FD_ISSET(i, writeset_in)) &&
+ !(exceptset_in && FD_ISSET(i, exceptset_in))) {
+ continue;
+ }
+ /* First get the socket's status (protected)... */
+ SYS_ARCH_PROTECT(lev);
+ sock = tryget_socket(i);
+ if (sock != NULL) {
+ void* lastdata = sock->lastdata;
+ s16_t rcvevent = sock->rcvevent;
+ u16_t sendevent = sock->sendevent;
+ u16_t errevent = sock->errevent;
+ SYS_ARCH_UNPROTECT(lev);
+
+ /* ... then examine it: */
+ /* See if netconn of this socket is ready for read */
+ if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
+ FD_SET(i, &lreadset);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
+ nready++;
+ }
+ /* See if netconn of this socket is ready for write */
+ if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
+ FD_SET(i, &lwriteset);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
+ nready++;
+ }
+ /* See if netconn of this socket had an error */
+ if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
+ FD_SET(i, &lexceptset);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
+ nready++;
+ }
+ } else {
+ SYS_ARCH_UNPROTECT(lev);
+ /* continue on to next FD in list */
+ }
+ }
+ /* copy local sets to the ones provided as arguments */
+ *readset_out = lreadset;
+ *writeset_out = lwriteset;
+ *exceptset_out = lexceptset;
+
+ LWIP_ASSERT("nready >= 0", nready >= 0);
+ return nready;
+}
+
+int
+lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
+ struct timeval *timeout)
+{
+ u32_t waitres = 0;
+ int nready;
+ fd_set lreadset, lwriteset, lexceptset;
+ u32_t msectimeout;
+ struct lwip_select_cb select_cb;
+ int i;
+ int maxfdp2;
+#if LWIP_NETCONN_SEM_PER_THREAD
+ int waited = 0;
+#endif
+ SYS_ARCH_DECL_PROTECT(lev);
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
+ maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
+ timeout ? (s32_t)timeout->tv_sec : (s32_t)-1,
+ timeout ? (s32_t)timeout->tv_usec : (s32_t)-1));
+
+ /* Go through each socket in each list to count number of sockets which
+ currently match */
+ nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
+
+ /* If we don't have any current events, then suspend if we are supposed to */
+ if (!nready) {
+ if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
+ /* This is OK as the local fdsets are empty and nready is zero,
+ or we would have returned earlier. */
+ goto return_copy_fdsets;
+ }
+
+ /* None ready: add our semaphore to list:
+ We don't actually need any dynamic memory. Our entry on the
+ list is only valid while we are in this function, so it's ok
+ to use local variables. */
+
+ select_cb.next = NULL;
+ select_cb.prev = NULL;
+ select_cb.readset = readset;
+ select_cb.writeset = writeset;
+ select_cb.exceptset = exceptset;
+ select_cb.sem_signalled = 0;
+#if LWIP_NETCONN_SEM_PER_THREAD
+ select_cb.sem = LWIP_NETCONN_THREAD_SEM_GET();
+#else /* LWIP_NETCONN_SEM_PER_THREAD */
+ if (sys_sem_new(&select_cb.sem, 0) != ERR_OK) {
+ /* failed to create semaphore */
+ set_errno(ENOMEM);
+ return -1;
+ }
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+
+ /* Protect the select_cb_list */
+ SYS_ARCH_PROTECT(lev);
+
+ /* Put this select_cb on top of list */
+ select_cb.next = select_cb_list;
+ if (select_cb_list != NULL) {
+ select_cb_list->prev = &select_cb;
+ }
+ select_cb_list = &select_cb;
+ /* Increasing this counter tells event_callback that the list has changed. */
+ select_cb_ctr++;
+
+ /* Now we can safely unprotect */
+ SYS_ARCH_UNPROTECT(lev);
+
+ /* Increase select_waiting for each socket we are interested in */
+ maxfdp2 = maxfdp1;
+ for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
+ if ((readset && FD_ISSET(i, readset)) ||
+ (writeset && FD_ISSET(i, writeset)) ||
+ (exceptset && FD_ISSET(i, exceptset))) {
+ struct lwip_sock *sock;
+ SYS_ARCH_PROTECT(lev);
+ sock = tryget_socket(i);
+ if (sock != NULL) {
+ sock->select_waiting++;
+ LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
+ } else {
+ /* Not a valid socket */
+ nready = -1;
+ maxfdp2 = i;
+ SYS_ARCH_UNPROTECT(lev);
+ break;
+ }
+ SYS_ARCH_UNPROTECT(lev);
+ }
+ }
+
+ if (nready >= 0) {
+ /* Call lwip_selscan again: there could have been events between
+ the last scan (without us on the list) and putting us on the list! */
+ nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
+ if (!nready) {
+ /* Still none ready, just wait to be woken */
+ if (timeout == 0) {
+ /* Wait forever */
+ msectimeout = 0;
+ } else {
+ msectimeout = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500)/1000));
+ if (msectimeout == 0) {
+ /* Wait 1ms at least (0 means wait forever) */
+ msectimeout = 1;
+ }
+ }
+
+ waitres = sys_arch_sem_wait(SELECT_SEM_PTR(select_cb.sem), msectimeout);
+#if LWIP_NETCONN_SEM_PER_THREAD
+ waited = 1;
+#endif
+ }
+ }
+
+ /* Decrease select_waiting for each socket we are interested in */
+ for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
+ if ((readset && FD_ISSET(i, readset)) ||
+ (writeset && FD_ISSET(i, writeset)) ||
+ (exceptset && FD_ISSET(i, exceptset))) {
+ struct lwip_sock *sock;
+ SYS_ARCH_PROTECT(lev);
+ sock = tryget_socket(i);
+ if (sock != NULL) {
+ /* for now, handle select_waiting==0... */
+ LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
+ if (sock->select_waiting > 0) {
+ sock->select_waiting--;
+ }
+ } else {
+ /* Not a valid socket */
+ nready = -1;
+ }
+ SYS_ARCH_UNPROTECT(lev);
+ }
+ }
+ /* Take us off the list */
+ SYS_ARCH_PROTECT(lev);
+ if (select_cb.next != NULL) {
+ select_cb.next->prev = select_cb.prev;
+ }
+ if (select_cb_list == &select_cb) {
+ LWIP_ASSERT("select_cb.prev == NULL", select_cb.prev == NULL);
+ select_cb_list = select_cb.next;
+ } else {
+ LWIP_ASSERT("select_cb.prev != NULL", select_cb.prev != NULL);
+ select_cb.prev->next = select_cb.next;
+ }
+ /* Increasing this counter tells event_callback that the list has changed. */
+ select_cb_ctr++;
+ SYS_ARCH_UNPROTECT(lev);
+
+#if LWIP_NETCONN_SEM_PER_THREAD
+ if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
+ /* don't leave the thread-local semaphore signalled */
+ sys_arch_sem_wait(select_cb.sem, 1);
+ }
+#else /* LWIP_NETCONN_SEM_PER_THREAD */
+ sys_sem_free(&select_cb.sem);
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+
+ if (nready < 0) {
+ /* This happens when a socket got closed while waiting */
+ set_errno(EBADF);
+ return -1;
+ }
+
+ if (waitres == SYS_ARCH_TIMEOUT) {
+ /* Timeout */
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
+ /* This is OK as the local fdsets are empty and nready is zero,
+ or we would have returned earlier. */
+ goto return_copy_fdsets;
+ }
+
+ /* See what's set */
+ nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
+ }
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
+return_copy_fdsets:
+ set_errno(0);
+ if (readset) {
+ *readset = lreadset;
+ }
+ if (writeset) {
+ *writeset = lwriteset;
+ }
+ if (exceptset) {
+ *exceptset = lexceptset;
+ }
+ return nready;
+}
+
+/**
+ * Callback registered in the netconn layer for each socket-netconn.
+ * Processes recvevent (data available) and wakes up tasks waiting for select.
+ */
+static void
+event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
+{
+ int s;
+ struct lwip_sock *sock;
+ struct lwip_select_cb *scb;
+ int last_select_cb_ctr;
+ SYS_ARCH_DECL_PROTECT(lev);
+
+ LWIP_UNUSED_ARG(len);
+
+ /* Get socket */
+ if (conn) {
+ s = conn->socket;
+ if (s < 0) {
+ /* Data comes in right away after an accept, even though
+ * the server task might not have created a new socket yet.
+ * Just count down (or up) if that's the case and we
+ * will use the data later. Note that only receive events
+ * can happen before the new socket is set up. */
+ SYS_ARCH_PROTECT(lev);
+ if (conn->socket < 0) {
+ if (evt == NETCONN_EVT_RCVPLUS) {
+ conn->socket--;
+ }
+ SYS_ARCH_UNPROTECT(lev);
+ return;
+ }
+ s = conn->socket;
+ SYS_ARCH_UNPROTECT(lev);
+ }
+
+ sock = get_socket(s);
+ if (!sock) {
+ return;
+ }
+ } else {
+ return;
+ }
+
+ SYS_ARCH_PROTECT(lev);
+ /* Set event as required */
+ switch (evt) {
+ case NETCONN_EVT_RCVPLUS:
+ sock->rcvevent++;
+ break;
+ case NETCONN_EVT_RCVMINUS:
+ sock->rcvevent--;
+ break;
+ case NETCONN_EVT_SENDPLUS:
+ sock->sendevent = 1;
+ break;
+ case NETCONN_EVT_SENDMINUS:
+ sock->sendevent = 0;
+ break;
+ case NETCONN_EVT_ERROR:
+ sock->errevent = 1;
+ break;
+ default:
+ LWIP_ASSERT("unknown event", 0);
+ break;
+ }
+
+ if (sock->select_waiting == 0) {
+ /* noone is waiting for this socket, no need to check select_cb_list */
+ SYS_ARCH_UNPROTECT(lev);
+ return;
+ }
+
+ /* Now decide if anyone is waiting for this socket */
+ /* NOTE: This code goes through the select_cb_list list multiple times
+ ONLY IF a select was actually waiting. We go through the list the number
+ of waiting select calls + 1. This list is expected to be small. */
+
+ /* At this point, SYS_ARCH is still protected! */
+again:
+ for (scb = select_cb_list; scb != NULL; scb = scb->next) {
+ /* remember the state of select_cb_list to detect changes */
+ last_select_cb_ctr = select_cb_ctr;
+ if (scb->sem_signalled == 0) {
+ /* semaphore not signalled yet */
+ int do_signal = 0;
+ /* Test this select call for our socket */
+ if (sock->rcvevent > 0) {
+ if (scb->readset && FD_ISSET(s, scb->readset)) {
+ do_signal = 1;
+ }
+ }
+ if (sock->sendevent != 0) {
+ if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
+ do_signal = 1;
+ }
+ }
+ if (sock->errevent != 0) {
+ if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
+ do_signal = 1;
+ }
+ }
+ if (do_signal) {
+ scb->sem_signalled = 1;
+ /* Don't call SYS_ARCH_UNPROTECT() before signaling the semaphore, as this might
+ lead to the select thread taking itself off the list, invalidating the semaphore. */
+ sys_sem_signal(SELECT_SEM_PTR(scb->sem));
+ }
+ }
+ /* unlock interrupts with each step */
+ SYS_ARCH_UNPROTECT(lev);
+ /* this makes sure interrupt protection time is short */
+ SYS_ARCH_PROTECT(lev);
+ if (last_select_cb_ctr != select_cb_ctr) {
+ /* someone has changed select_cb_list, restart at the beginning */
+ goto again;
+ }
+ }
+ SYS_ARCH_UNPROTECT(lev);
+}
+
+/**
+ * Close one end of a full-duplex connection.
+ */
+int
+lwip_shutdown(int s, int how)
+{
+ struct lwip_sock *sock;
+ err_t err;
+ u8_t shut_rx = 0, shut_tx = 0;
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ if (sock->conn != NULL) {
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
+ sock_set_errno(sock, EOPNOTSUPP);
+ return -1;
+ }
+ } else {
+ sock_set_errno(sock, ENOTCONN);
+ return -1;
+ }
+
+ if (how == SHUT_RD) {
+ shut_rx = 1;
+ } else if (how == SHUT_WR) {
+ shut_tx = 1;
+ } else if (how == SHUT_RDWR) {
+ shut_rx = 1;
+ shut_tx = 1;
+ } else {
+ sock_set_errno(sock, EINVAL);
+ return -1;
+ }
+ err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
+
+ sock_set_errno(sock, err_to_errno(err));
+ return (err == ERR_OK ? 0 : -1);
+}
+
+static int
+lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
+{
+ struct lwip_sock *sock;
+ union sockaddr_aligned saddr;
+ ip_addr_t naddr;
+ u16_t port;
+ err_t err;
+
+ sock = get_socket(s);
+ if (!sock) {
+ return -1;
+ }
+
+ /* get the IP address and port */
+ err = netconn_getaddr(sock->conn, &naddr, &port, local);
+ if (err != ERR_OK) {
+ sock_set_errno(sock, err_to_errno(err));
+ return -1;
+ }
+
+#if LWIP_IPV4 && LWIP_IPV6
+ /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
+ if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
+ IP_IS_V4_VAL(naddr)) {
+ ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
+ IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
+ ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
+ LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
+
+ if (*namelen > saddr.sa.sa_len) {
+ *namelen = saddr.sa.sa_len;
+ }
+ MEMCPY(name, &saddr, *namelen);
+
+ sock_set_errno(sock, 0);
+ return 0;
+}
+
+int
+lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
+{
+ return lwip_getaddrname(s, name, namelen, 0);
+}
+
+int
+lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
+{
+ return lwip_getaddrname(s, name, namelen, 1);
+}
+
+int
+lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
+{
+ u8_t err;
+ struct lwip_sock *sock = get_socket(s);
+#if !LWIP_TCPIP_CORE_LOCKING
+ LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
+#endif /* !LWIP_TCPIP_CORE_LOCKING */
+
+ if (!sock) {
+ return -1;
+ }
+
+ if ((NULL == optval) || (NULL == optlen)) {
+ sock_set_errno(sock, EFAULT);
+ return -1;
+ }
+
+#if LWIP_TCPIP_CORE_LOCKING
+ /* core-locking can just call the -impl function */
+ LOCK_TCPIP_CORE();
+ err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
+ UNLOCK_TCPIP_CORE();
+
+#else /* LWIP_TCPIP_CORE_LOCKING */
+
+#if LWIP_MPU_COMPATIBLE
+ /* MPU_COMPATIBLE copies the optval data, so check for max size here */
+ if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
+ sock_set_errno(sock, ENOBUFS);
+ return -1;
+ }
+#endif /* LWIP_MPU_COMPATIBLE */
+
+ LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
+#if !LWIP_MPU_COMPATIBLE
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
+#endif /* !LWIP_MPU_COMPATIBLE */
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
+#if LWIP_NETCONN_SEM_PER_THREAD
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
+#else
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
+#endif
+ err = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
+ if (err != ERR_OK) {
+ LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
+ sock_set_errno(sock, err_to_errno(err));
+ return -1;
+ }
+ sys_arch_sem_wait((sys_sem_t*)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
+
+ /* write back optlen and optval */
+ *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
+#if LWIP_MPU_COMPATIBLE
+ MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
+#endif /* LWIP_MPU_COMPATIBLE */
+
+ /* maybe lwip_getsockopt_internal has changed err */
+ err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
+ LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+
+ sock_set_errno(sock, err);
+ return err ? -1 : 0;
+}
+
+#if !LWIP_TCPIP_CORE_LOCKING
+/** lwip_getsockopt_callback: only used without CORE_LOCKING
+ * to get into the tcpip_thread
+ */
+static void
+lwip_getsockopt_callback(void *arg)
+{
+ struct lwip_setgetsockopt_data *data;
+ LWIP_ASSERT("arg != NULL", arg != NULL);
+ data = (struct lwip_setgetsockopt_data*)arg;
+
+ data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
+#if LWIP_MPU_COMPATIBLE
+ data->optval,
+#else /* LWIP_MPU_COMPATIBLE */
+ data->optval.p,
+#endif /* LWIP_MPU_COMPATIBLE */
+ &data->optlen);
+
+ sys_sem_signal((sys_sem_t*)(data->completed_sem));
+}
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+
+/** lwip_getsockopt_impl: the actual implementation of getsockopt:
+ * same argument as lwip_getsockopt, either called directly or through callback
+ */
+static u8_t
+lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
+{
+ u8_t err = 0;
+ struct lwip_sock *sock = tryget_socket(s);
+ if (!sock) {
+ return EBADF;
+ }
+
+ switch (level) {
+
+/* Level: SOL_SOCKET */
+ case SOL_SOCKET:
+ switch (optname) {
+
+#if LWIP_TCP
+ case SO_ACCEPTCONN:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
+ if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
+ return ENOPROTOOPT;
+ }
+ if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
+ *(int*)optval = 1;
+ } else {
+ *(int*)optval = 0;
+ }
+ break;
+#endif /* LWIP_TCP */
+
+ /* The option flags */
+ case SO_BROADCAST:
+ case SO_KEEPALIVE:
+#if SO_REUSE
+ case SO_REUSEADDR:
+#endif /* SO_REUSE */
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
+ *(int*)optval = ip_get_option(sock->conn->pcb.ip, optname);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
+ s, optname, (*(int*)optval?"on":"off")));
+ break;
+
+ case SO_TYPE:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
+ switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
+ case NETCONN_RAW:
+ *(int*)optval = SOCK_RAW;
+ break;
+ case NETCONN_TCP:
+ *(int*)optval = SOCK_STREAM;
+ break;
+ case NETCONN_UDP:
+ *(int*)optval = SOCK_DGRAM;
+ break;
+ default: /* unrecognized socket type */
+ *(int*)optval = netconn_type(sock->conn);
+ LWIP_DEBUGF(SOCKETS_DEBUG,
+ ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
+ s, *(int *)optval));
+ } /* switch (netconn_type(sock->conn)) */
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
+ s, *(int *)optval));
+ break;
+
+ case SO_ERROR:
+ LWIP_SOCKOPT_CHECK_OPTLEN(*optlen, int);
+ /* only overwrite ERR_OK or temporary errors */
+ if (((sock->err == 0) || (sock->err == EINPROGRESS)) && (sock->conn != NULL)) {
+ sock_set_errno(sock, err_to_errno(sock->conn->last_err));
+ }
+ *(int *)optval = (sock->err == 0xFF ? (int)-1 : (int)sock->err);
+ sock->err = 0;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
+ s, *(int *)optval));
+ break;
+
+#if LWIP_SO_SNDTIMEO
+ case SO_SNDTIMEO:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
+ LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
+ break;
+#endif /* LWIP_SO_SNDTIMEO */
+#if LWIP_SO_RCVTIMEO
+ case SO_RCVTIMEO:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
+ LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
+ break;
+#endif /* LWIP_SO_RCVTIMEO */
+#if LWIP_SO_RCVBUF
+ case SO_RCVBUF:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
+ *(int *)optval = netconn_get_recvbufsize(sock->conn);
+ break;
+#endif /* LWIP_SO_RCVBUF */
+#if LWIP_SO_LINGER
+ case SO_LINGER:
+ {
+ s16_t conn_linger;
+ struct linger* linger = (struct linger*)optval;
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
+ conn_linger = sock->conn->linger;
+ if (conn_linger >= 0) {
+ linger->l_onoff = 1;
+ linger->l_linger = (int)conn_linger;
+ } else {
+ linger->l_onoff = 0;
+ linger->l_linger = 0;
+ }
+ }
+ break;
+#endif /* LWIP_SO_LINGER */
+#if LWIP_UDP
+ case SO_NO_CHECK:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
+#if LWIP_UDPLITE
+ if ((udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_UDPLITE) != 0) {
+ /* this flag is only available for UDP, not for UDP lite */
+ return EAFNOSUPPORT;
+ }
+#endif /* LWIP_UDPLITE */
+ *(int*)optval = (udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_NOCHKSUM) ? 1 : 0;
+ break;
+#endif /* LWIP_UDP*/
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+
+/* Level: IPPROTO_IP */
+ case IPPROTO_IP:
+ switch (optname) {
+ case IP_TTL:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
+ *(int*)optval = sock->conn->pcb.ip->ttl;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
+ s, *(int *)optval));
+ break;
+ case IP_TOS:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
+ *(int*)optval = sock->conn->pcb.ip->tos;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
+ s, *(int *)optval));
+ break;
+#if LWIP_MULTICAST_TX_OPTIONS
+ case IP_MULTICAST_TTL:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
+ return ENOPROTOOPT;
+ }
+ *(u8_t*)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
+ s, *(int *)optval));
+ break;
+ case IP_MULTICAST_IF:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
+ return ENOPROTOOPT;
+ }
+ inet_addr_from_ip4addr((struct in_addr*)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
+ s, *(u32_t *)optval));
+ break;
+ case IP_MULTICAST_LOOP:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
+ if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
+ *(u8_t*)optval = 1;
+ } else {
+ *(u8_t*)optval = 0;
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
+ s, *(int *)optval));
+ break;
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+
+#if LWIP_TCP
+/* Level: IPPROTO_TCP */
+ case IPPROTO_TCP:
+ /* Special case: all IPPROTO_TCP option take an int */
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
+ if (sock->conn->pcb.tcp->state == LISTEN) {
+ return EINVAL;
+ }
+ switch (optname) {
+ case TCP_NODELAY:
+ *(int*)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
+ s, (*(int*)optval)?"on":"off") );
+ break;
+ case TCP_KEEPALIVE:
+ *(int*)optval = (int)sock->conn->pcb.tcp->keep_idle;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
+ s, *(int *)optval));
+ break;
+
+#if LWIP_TCP_KEEPALIVE
+ case TCP_KEEPIDLE:
+ *(int*)optval = (int)(sock->conn->pcb.tcp->keep_idle/1000);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
+ s, *(int *)optval));
+ break;
+ case TCP_KEEPINTVL:
+ *(int*)optval = (int)(sock->conn->pcb.tcp->keep_intvl/1000);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
+ s, *(int *)optval));
+ break;
+ case TCP_KEEPCNT:
+ *(int*)optval = (int)sock->conn->pcb.tcp->keep_cnt;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
+ s, *(int *)optval));
+ break;
+#endif /* LWIP_TCP_KEEPALIVE */
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+#endif /* LWIP_TCP */
+
+#if LWIP_IPV6
+/* Level: IPPROTO_IPV6 */
+ case IPPROTO_IPV6:
+ switch (optname) {
+ case IPV6_V6ONLY:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
+ *(int*)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
+ s, *(int *)optval));
+ break;
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+#endif /* LWIP_IPV6 */
+
+#if LWIP_UDP && LWIP_UDPLITE
+ /* Level: IPPROTO_UDPLITE */
+ case IPPROTO_UDPLITE:
+ /* Special case: all IPPROTO_UDPLITE option take an int */
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
+ /* If this is no UDP lite socket, ignore any options. */
+ if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
+ return ENOPROTOOPT;
+ }
+ switch (optname) {
+ case UDPLITE_SEND_CSCOV:
+ *(int*)optval = sock->conn->pcb.udp->chksum_len_tx;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
+ s, (*(int*)optval)) );
+ break;
+ case UDPLITE_RECV_CSCOV:
+ *(int*)optval = sock->conn->pcb.udp->chksum_len_rx;
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
+ s, (*(int*)optval)) );
+ break;
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+#endif /* LWIP_UDP */
+ /* Level: IPPROTO_RAW */
+ case IPPROTO_RAW:
+ switch (optname) {
+#if LWIP_IPV6 && LWIP_RAW
+ case IPV6_CHECKSUM:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
+ if (sock->conn->pcb.raw->chksum_reqd == 0) {
+ *(int *)optval = -1;
+ } else {
+ *(int *)optval = sock->conn->pcb.raw->chksum_offset;
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
+ s, (*(int*)optval)) );
+ break;
+#endif /* LWIP_IPV6 && LWIP_RAW */
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
+ s, level, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (level) */
+
+ return err;
+}
+
+int
+lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
+{
+ u8_t err = 0;
+ struct lwip_sock *sock = get_socket(s);
+#if !LWIP_TCPIP_CORE_LOCKING
+ LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
+#endif /* !LWIP_TCPIP_CORE_LOCKING */
+
+ if (!sock) {
+ return -1;
+ }
+
+ if (NULL == optval) {
+ sock_set_errno(sock, EFAULT);
+ return -1;
+ }
+
+#if LWIP_TCPIP_CORE_LOCKING
+ /* core-locking can just call the -impl function */
+ LOCK_TCPIP_CORE();
+ err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
+ UNLOCK_TCPIP_CORE();
+
+#else /* LWIP_TCPIP_CORE_LOCKING */
+
+#if LWIP_MPU_COMPATIBLE
+ /* MPU_COMPATIBLE copies the optval data, so check for max size here */
+ if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
+ sock_set_errno(sock, ENOBUFS);
+ return -1;
+ }
+#endif /* LWIP_MPU_COMPATIBLE */
+
+ LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
+#if LWIP_MPU_COMPATIBLE
+ MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
+#else /* LWIP_MPU_COMPATIBLE */
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void*)optval;
+#endif /* LWIP_MPU_COMPATIBLE */
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
+#if LWIP_NETCONN_SEM_PER_THREAD
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
+#else
+ LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
+#endif
+ err = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
+ if (err != ERR_OK) {
+ LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
+ sock_set_errno(sock, err_to_errno(err));
+ return -1;
+ }
+ sys_arch_sem_wait((sys_sem_t*)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
+
+ /* maybe lwip_getsockopt_internal has changed err */
+ err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
+ LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+
+ sock_set_errno(sock, err);
+ return err ? -1 : 0;
+}
+
+#if !LWIP_TCPIP_CORE_LOCKING
+/** lwip_setsockopt_callback: only used without CORE_LOCKING
+ * to get into the tcpip_thread
+ */
+static void
+lwip_setsockopt_callback(void *arg)
+{
+ struct lwip_setgetsockopt_data *data;
+ LWIP_ASSERT("arg != NULL", arg != NULL);
+ data = (struct lwip_setgetsockopt_data*)arg;
+
+ data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
+#if LWIP_MPU_COMPATIBLE
+ data->optval,
+#else /* LWIP_MPU_COMPATIBLE */
+ data->optval.pc,
+#endif /* LWIP_MPU_COMPATIBLE */
+ data->optlen);
+
+ sys_sem_signal((sys_sem_t*)(data->completed_sem));
+}
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+
+/** lwip_setsockopt_impl: the actual implementation of setsockopt:
+ * same argument as lwip_setsockopt, either called directly or through callback
+ */
+static u8_t
+lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
+{
+ u8_t err = 0;
+ struct lwip_sock *sock = tryget_socket(s);
+ if (!sock) {
+ return EBADF;
+ }
+
+ switch (level) {
+
+/* Level: SOL_SOCKET */
+ case SOL_SOCKET:
+ switch (optname) {
+
+ /* SO_ACCEPTCONN is get-only */
+
+ /* The option flags */
+ case SO_BROADCAST:
+ case SO_KEEPALIVE:
+#if SO_REUSE
+ case SO_REUSEADDR:
+#endif /* SO_REUSE */
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
+ if (*(const int*)optval) {
+ ip_set_option(sock->conn->pcb.ip, optname);
+ } else {
+ ip_reset_option(sock->conn->pcb.ip, optname);
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
+ s, optname, (*(const int*)optval?"on":"off")));
+ break;
+
+ /* SO_TYPE is get-only */
+ /* SO_ERROR is get-only */
+
+#if LWIP_SO_SNDTIMEO
+ case SO_SNDTIMEO:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
+ netconn_set_sendtimeout(sock->conn, LWIP_SO_SNDRCVTIMEO_GET_MS(optval));
+ break;
+#endif /* LWIP_SO_SNDTIMEO */
+#if LWIP_SO_RCVTIMEO
+ case SO_RCVTIMEO:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
+ netconn_set_recvtimeout(sock->conn, (int)LWIP_SO_SNDRCVTIMEO_GET_MS(optval));
+ break;
+#endif /* LWIP_SO_RCVTIMEO */
+#if LWIP_SO_RCVBUF
+ case SO_RCVBUF:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
+ netconn_set_recvbufsize(sock->conn, *(const int*)optval);
+ break;
+#endif /* LWIP_SO_RCVBUF */
+#if LWIP_SO_LINGER
+ case SO_LINGER:
+ {
+ const struct linger* linger = (const struct linger*)optval;
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
+ if (linger->l_onoff) {
+ int lingersec = linger->l_linger;
+ if (lingersec < 0) {
+ return EINVAL;
+ }
+ if (lingersec > 0xFFFF) {
+ lingersec = 0xFFFF;
+ }
+ sock->conn->linger = (s16_t)lingersec;
+ } else {
+ sock->conn->linger = -1;
+ }
+ }
+ break;
+#endif /* LWIP_SO_LINGER */
+#if LWIP_UDP
+ case SO_NO_CHECK:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
+#if LWIP_UDPLITE
+ if ((udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_UDPLITE) != 0) {
+ /* this flag is only available for UDP, not for UDP lite */
+ return EAFNOSUPPORT;
+ }
+#endif /* LWIP_UDPLITE */
+ if (*(const int*)optval) {
+ udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) | UDP_FLAGS_NOCHKSUM);
+ } else {
+ udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) & ~UDP_FLAGS_NOCHKSUM);
+ }
+ break;
+#endif /* LWIP_UDP */
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+
+/* Level: IPPROTO_IP */
+ case IPPROTO_IP:
+ switch (optname) {
+ case IP_TTL:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
+ sock->conn->pcb.ip->ttl = (u8_t)(*(const int*)optval);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
+ s, sock->conn->pcb.ip->ttl));
+ break;
+ case IP_TOS:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
+ sock->conn->pcb.ip->tos = (u8_t)(*(const int*)optval);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
+ s, sock->conn->pcb.ip->tos));
+ break;
+#if LWIP_MULTICAST_TX_OPTIONS
+ case IP_MULTICAST_TTL:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
+ udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t*)optval));
+ break;
+ case IP_MULTICAST_IF:
+ {
+ ip4_addr_t if_addr;
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
+ inet_addr_to_ip4addr(&if_addr, (const struct in_addr*)optval);
+ udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
+ }
+ break;
+ case IP_MULTICAST_LOOP:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
+ if (*(const u8_t*)optval) {
+ udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) | UDP_FLAGS_MULTICAST_LOOP);
+ } else {
+ udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) & ~UDP_FLAGS_MULTICAST_LOOP);
+ }
+ break;
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+#if LWIP_IGMP
+ case IP_ADD_MEMBERSHIP:
+ case IP_DROP_MEMBERSHIP:
+ {
+ /* If this is a TCP or a RAW socket, ignore these options. */
+ /* @todo: assign membership to this socket so that it is dropped when closing the socket */
+ err_t igmp_err;
+ const struct ip_mreq *imr = (const struct ip_mreq *)optval;
+ ip4_addr_t if_addr;
+ ip4_addr_t multi_addr;
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
+ inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
+ inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
+ if (optname == IP_ADD_MEMBERSHIP) {
+ if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
+ /* cannot track membership (out of memory) */
+ err = ENOMEM;
+ igmp_err = ERR_OK;
+ } else {
+ igmp_err = igmp_joingroup(&if_addr, &multi_addr);
+ }
+ } else {
+ igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
+ lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
+ }
+ if (igmp_err != ERR_OK) {
+ err = EADDRNOTAVAIL;
+ }
+ }
+ break;
+#endif /* LWIP_IGMP */
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+
+#if LWIP_TCP
+/* Level: IPPROTO_TCP */
+ case IPPROTO_TCP:
+ /* Special case: all IPPROTO_TCP option take an int */
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
+ if (sock->conn->pcb.tcp->state == LISTEN) {
+ return EINVAL;
+ }
+ switch (optname) {
+ case TCP_NODELAY:
+ if (*(const int*)optval) {
+ tcp_nagle_disable(sock->conn->pcb.tcp);
+ } else {
+ tcp_nagle_enable(sock->conn->pcb.tcp);
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
+ s, (*(const int *)optval)?"on":"off") );
+ break;
+ case TCP_KEEPALIVE:
+ sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int*)optval);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
+ s, sock->conn->pcb.tcp->keep_idle));
+ break;
+
+#if LWIP_TCP_KEEPALIVE
+ case TCP_KEEPIDLE:
+ sock->conn->pcb.tcp->keep_idle = 1000*(u32_t)(*(const int*)optval);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
+ s, sock->conn->pcb.tcp->keep_idle));
+ break;
+ case TCP_KEEPINTVL:
+ sock->conn->pcb.tcp->keep_intvl = 1000*(u32_t)(*(const int*)optval);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
+ s, sock->conn->pcb.tcp->keep_intvl));
+ break;
+ case TCP_KEEPCNT:
+ sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int*)optval);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
+ s, sock->conn->pcb.tcp->keep_cnt));
+ break;
+#endif /* LWIP_TCP_KEEPALIVE */
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+#endif /* LWIP_TCP*/
+
+#if LWIP_IPV6
+/* Level: IPPROTO_IPV6 */
+ case IPPROTO_IPV6:
+ switch (optname) {
+ case IPV6_V6ONLY:
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
+ if (*(const int*)optval) {
+ netconn_set_ipv6only(sock->conn, 1);
+ } else {
+ netconn_set_ipv6only(sock->conn, 0);
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
+ s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
+ break;
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+#endif /* LWIP_IPV6 */
+
+#if LWIP_UDP && LWIP_UDPLITE
+ /* Level: IPPROTO_UDPLITE */
+ case IPPROTO_UDPLITE:
+ /* Special case: all IPPROTO_UDPLITE option take an int */
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
+ /* If this is no UDP lite socket, ignore any options. */
+ if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
+ return ENOPROTOOPT;
+ }
+ switch (optname) {
+ case UDPLITE_SEND_CSCOV:
+ if ((*(const int*)optval != 0) && ((*(const int*)optval < 8) || (*(const int*)optval > 0xffff))) {
+ /* don't allow illegal values! */
+ sock->conn->pcb.udp->chksum_len_tx = 8;
+ } else {
+ sock->conn->pcb.udp->chksum_len_tx = (u16_t)*(const int*)optval;
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
+ s, (*(const int*)optval)) );
+ break;
+ case UDPLITE_RECV_CSCOV:
+ if ((*(const int*)optval != 0) && ((*(const int*)optval < 8) || (*(const int*)optval > 0xffff))) {
+ /* don't allow illegal values! */
+ sock->conn->pcb.udp->chksum_len_rx = 8;
+ } else {
+ sock->conn->pcb.udp->chksum_len_rx = (u16_t)*(const int*)optval;
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
+ s, (*(const int*)optval)) );
+ break;
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+#endif /* LWIP_UDP */
+ /* Level: IPPROTO_RAW */
+ case IPPROTO_RAW:
+ switch (optname) {
+#if LWIP_IPV6 && LWIP_RAW
+ case IPV6_CHECKSUM:
+ /* It should not be possible to disable the checksum generation with ICMPv6
+ * as per RFC 3542 chapter 3.1 */
+ if(sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
+ return EINVAL;
+ }
+
+ LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
+ if (*(const int *)optval < 0) {
+ sock->conn->pcb.raw->chksum_reqd = 0;
+ } else if (*(const int *)optval & 1) {
+ /* Per RFC3542, odd offsets are not allowed */
+ return EINVAL;
+ } else {
+ sock->conn->pcb.raw->chksum_reqd = 1;
+ sock->conn->pcb.raw->chksum_offset = (u16_t)*(const int *)optval;
+ }
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
+ s, sock->conn->pcb.raw->chksum_reqd));
+ break;
+#endif /* LWIP_IPV6 && LWIP_RAW */
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
+ s, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (optname) */
+ break;
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
+ s, level, optname));
+ err = ENOPROTOOPT;
+ break;
+ } /* switch (level) */
+
+ return err;
+}
+
+int
+lwip_ioctl(int s, long cmd, void *argp)
+{
+ struct lwip_sock *sock = get_socket(s);
+ u8_t val;
+#if LWIP_SO_RCVBUF
+ u16_t buflen = 0;
+ int recv_avail;
+#endif /* LWIP_SO_RCVBUF */
+
+ if (!sock) {
+ return -1;
+ }
+
+ switch (cmd) {
+#if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
+ case FIONREAD:
+ if (!argp) {
+ sock_set_errno(sock, EINVAL);
+ return -1;
+ }
+#if LWIP_FIONREAD_LINUXMODE
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
+ struct pbuf *p;
+ if (sock->lastdata) {
+ p = ((struct netbuf *)sock->lastdata)->p;
+ *((int*)argp) = p->tot_len - sock->lastoffset;
+ } else {
+ struct netbuf *rxbuf;
+ err_t err;
+ if (sock->rcvevent <= 0) {
+ *((int*)argp) = 0;
+ } else {
+ err = netconn_recv(sock->conn, &rxbuf);
+ if (err != ERR_OK) {
+ *((int*)argp) = 0;
+ } else {
+ sock->lastdata = rxbuf;
+ sock->lastoffset = 0;
+ *((int*)argp) = rxbuf->p->tot_len;
+ }
+ }
+ }
+ return 0;
+ }
+#endif /* LWIP_FIONREAD_LINUXMODE */
+
+#if LWIP_SO_RCVBUF
+ /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
+ SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
+ if (recv_avail < 0) {
+ recv_avail = 0;
+ }
+ *((int*)argp) = recv_avail;
+
+ /* Check if there is data left from the last recv operation. /maq 041215 */
+ if (sock->lastdata) {
+ struct pbuf *p = (struct pbuf *)sock->lastdata;
+ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
+ p = ((struct netbuf *)p)->p;
+ }
+ buflen = p->tot_len;
+ buflen -= sock->lastoffset;
+
+ *((int*)argp) += buflen;
+ }
+
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t*)argp)));
+ sock_set_errno(sock, 0);
+ return 0;
+#else /* LWIP_SO_RCVBUF */
+ break;
+#endif /* LWIP_SO_RCVBUF */
+#endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
+
+ case (long)FIONBIO:
+ val = 0;
+ if (argp && *(u32_t*)argp) {
+ val = 1;
+ }
+ netconn_set_nonblocking(sock->conn, val);
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
+ sock_set_errno(sock, 0);
+ return 0;
+
+ default:
+ break;
+ } /* switch (cmd) */
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
+ sock_set_errno(sock, ENOSYS); /* not yet implemented */
+ return -1;
+}
+
+/** A minimal implementation of fcntl.
+ * Currently only the commands F_GETFL and F_SETFL are implemented.
+ * Only the flag O_NONBLOCK is implemented.
+ */
+int
+lwip_fcntl(int s, int cmd, int val)
+{
+ struct lwip_sock *sock = get_socket(s);
+ int ret = -1;
+
+ if (!sock) {
+ return -1;
+ }
+
+ switch (cmd) {
+ case F_GETFL:
+ ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
+ sock_set_errno(sock, 0);
+ break;
+ case F_SETFL:
+ if ((val & ~O_NONBLOCK) == 0) {
+ /* only O_NONBLOCK, all other bits are zero */
+ netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
+ ret = 0;
+ sock_set_errno(sock, 0);
+ } else {
+ sock_set_errno(sock, ENOSYS); /* not yet implemented */
+ }
+ break;
+ default:
+ LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
+ sock_set_errno(sock, ENOSYS); /* not yet implemented */
+ break;
+ }
+ return ret;
+}
+
+#if LWIP_IGMP
+/** Register a new IGMP membership. On socket close, the membership is dropped automatically.
+ *
+ * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
+ *
+ * @return 1 on success, 0 on failure
+ */
+static int
+lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
+{
+ struct lwip_sock *sock = get_socket(s);
+ int i;
+
+ if (!sock) {
+ return 0;
+ }
+
+ for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
+ if (socket_ipv4_multicast_memberships[i].sock == NULL) {
+ socket_ipv4_multicast_memberships[i].sock = sock;
+ ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
+ ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/** Unregister a previously registered membership. This prevents dropping the membership
+ * on socket close.
+ *
+ * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
+ */
+static void
+lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
+{
+ struct lwip_sock *sock = get_socket(s);
+ int i;
+
+ if (!sock) {
+ return;
+ }
+
+ for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
+ if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
+ ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
+ ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
+ socket_ipv4_multicast_memberships[i].sock = NULL;
+ ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
+ ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
+ return;
+ }
+ }
+}
+
+/** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
+ *
+ * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
+ */
+static void
+lwip_socket_drop_registered_memberships(int s)
+{
+ struct lwip_sock *sock = get_socket(s);
+ int i;
+
+ if (!sock) {
+ return;
+ }
+
+ for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
+ if (socket_ipv4_multicast_memberships[i].sock == sock) {
+ ip_addr_t multi_addr, if_addr;
+ ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
+ ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
+ socket_ipv4_multicast_memberships[i].sock = NULL;
+ ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
+ ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
+
+ netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
+ }
+ }
+}
+#endif /* LWIP_IGMP */
+#endif /* LWIP_SOCKET */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/tcpip.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/tcpip.c
new file mode 100644
index 0000000..07b2f98
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/external/lwip/src/api/tcpip.c
@@ -0,0 +1,518 @@
+/**
+ * @file
+ * Sequential API Main thread module
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if !NO_SYS /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/priv/tcpip_priv.h"
+#include "lwip/sys.h"
+#include "lwip/memp.h"
+#include "lwip/mem.h"
+#include "lwip/init.h"
+#include "lwip/ip.h"
+#include "lwip/pbuf.h"
+#include "lwip/etharp.h"
+#include "netif/ethernet.h"
+
+#define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name)
+#define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name)
+#define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name, ERR_MEM)
+#define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name)
+
+/* global variables */
+static tcpip_init_done_fn tcpip_init_done;
+static void *tcpip_init_done_arg;
+static sys_mbox_t mbox;
+
+#if LWIP_TCPIP_CORE_LOCKING
+/** The global semaphore to lock the stack. */
+sys_mutex_t lock_tcpip_core;
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+
+#if LWIP_TIMERS
+/* wait for a message, timeouts are processed while waiting */
+#define TCPIP_MBOX_FETCH(mbox, msg) sys_timeouts_mbox_fetch(mbox, msg)
+#else /* LWIP_TIMERS */
+/* wait for a message with timers disabled (e.g. pass a timer-check trigger into tcpip_thread) */
+#define TCPIP_MBOX_FETCH(mbox, msg) sys_mbox_fetch(mbox, msg)
+#endif /* LWIP_TIMERS */
+
+/**
+ * The main lwIP thread. This thread has exclusive access to lwIP core functions
+ * (unless access to them is not locked). Other threads communicate with this
+ * thread using message boxes.
+ *
+ * It also starts all the timers to make sure they are running in the right
+ * thread context.
+ *
+ * @param arg unused argument
+ */
+static void
+tcpip_thread(void *arg)
+{
+ struct tcpip_msg *msg;
+ LWIP_UNUSED_ARG(arg);
+
+ if (tcpip_init_done != NULL) {
+ tcpip_init_done(tcpip_init_done_arg);
+ }
+
+ LOCK_TCPIP_CORE();
+ while (1) { /* MAIN Loop */
+ UNLOCK_TCPIP_CORE();
+ LWIP_TCPIP_THREAD_ALIVE();
+ /* wait for a message, timeouts are processed while waiting */
+ TCPIP_MBOX_FETCH(&mbox, (void **)&msg);
+ LOCK_TCPIP_CORE();
+ if (msg == NULL) {
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n"));
+ LWIP_ASSERT("tcpip_thread: invalid message", 0);
+ continue;
+ }
+ switch (msg->type) {
+#if !LWIP_TCPIP_CORE_LOCKING
+ case TCPIP_MSG_API:
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg));
+ msg->msg.api_msg.function(msg->msg.api_msg.msg);
+ break;
+ case TCPIP_MSG_API_CALL:
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg));
+ msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg);
+ sys_sem_signal(msg->msg.api_call.sem);
+ break;
+#endif /* !LWIP_TCPIP_CORE_LOCKING */
+
+#if !LWIP_TCPIP_CORE_LOCKING_INPUT
+ case TCPIP_MSG_INPKT:
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg));
+ msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif);
+ memp_free(MEMP_TCPIP_MSG_INPKT, msg);
+ break;
+#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */
+
+#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS
+ case TCPIP_MSG_TIMEOUT:
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg));
+ sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg);
+ memp_free(MEMP_TCPIP_MSG_API, msg);
+ break;
+ case TCPIP_MSG_UNTIMEOUT:
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg));
+ sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg);
+ memp_free(MEMP_TCPIP_MSG_API, msg);
+ break;
+#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */
+
+ case TCPIP_MSG_CALLBACK:
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg));
+ msg->msg.cb.function(msg->msg.cb.ctx);
+ memp_free(MEMP_TCPIP_MSG_API, msg);
+ break;
+
+ case TCPIP_MSG_CALLBACK_STATIC:
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg));
+ msg->msg.cb.function(msg->msg.cb.ctx);
+ break;
+
+ default:
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type));
+ LWIP_ASSERT("tcpip_thread: invalid message", 0);
+ break;
+ }
+ }
+}
+
+/**
+ * Pass a received packet to tcpip_thread for input processing
+ *
+ * @param p the received packet
+ * @param inp the network interface on which the packet was received
+ * @param input_fn input function to call
+ */
+err_t
+tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn)
+{
+#if LWIP_TCPIP_CORE_LOCKING_INPUT
+ err_t ret;
+ LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp));
+ LOCK_TCPIP_CORE();
+ ret = input_fn(p, inp);
+ UNLOCK_TCPIP_CORE();
+ return ret;
+#else /* LWIP_TCPIP_CORE_LOCKING_INPUT */
+ struct tcpip_msg *msg;
+
+ LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox));
+
+ msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT);
+ if (msg == NULL) {
+ return ERR_MEM;
+ }
+
+ msg->type = TCPIP_MSG_INPKT;
+ msg->msg.inp.p = p;
+ msg->msg.inp.netif = inp;
+ msg->msg.inp.input_fn = input_fn;
+ if (sys_mbox_trypost(&mbox, msg) != ERR_OK) {
+ memp_free(MEMP_TCPIP_MSG_INPKT, msg);
+ return ERR_MEM;
+ }
+ return ERR_OK;
+#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */
+}
+
+/**
+ * @ingroup lwip_os
+ * Pass a received packet to tcpip_thread for input processing with
+ * ethernet_input or ip_input. Don't call directly, pass to netif_add()
+ * and call netif->input().
+ *
+ * @param p the received packet, p->payload pointing to the Ethernet header or
+ * to an IP header (if inp doesn't have NETIF_FLAG_ETHARP or
+ * NETIF_FLAG_ETHERNET flags)
+ * @param inp the network interface on which the packet was received
+ */
+err_t
+tcpip_input(struct pbuf *p, struct netif *inp)
+{
+#if LWIP_ETHERNET
+ if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) {
+ return tcpip_inpkt(p, inp, ethernet_input);
+ } else
+#endif /* LWIP_ETHERNET */
+ return tcpip_inpkt(p, inp, ip_input);
+}
+
+/**
+ * Call a specific function in the thread context of
+ * tcpip_thread for easy access synchronization.
+ * A function called in that way may access lwIP core code
+ * without fearing concurrent access.
+ *
+ * @param function the function to call
+ * @param ctx parameter passed to f
+ * @param block 1 to block until the request is posted, 0 to non-blocking mode
+ * @return ERR_OK if the function was called, another err_t if not
+ */
+err_t
+tcpip_callback_with_block(tcpip_callback_fn function, void *ctx, u8_t block)
+{
+ struct tcpip_msg *msg;
+
+ LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox));
+
+ msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
+ if (msg == NULL) {
+ return ERR_MEM;
+ }
+
+ msg->type = TCPIP_MSG_CALLBACK;
+ msg->msg.cb.function = function;
+ msg->msg.cb.ctx = ctx;
+ if (block) {
+ sys_mbox_post(&mbox, msg);
+ } else {
+ if (sys_mbox_trypost(&mbox, msg) != ERR_OK) {
+ memp_free(MEMP_TCPIP_MSG_API, msg);
+ return ERR_MEM;
+ }
+ }
+ return ERR_OK;
+}
+
+#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS
+/**
+ * call sys_timeout in tcpip_thread
+ *
+ * @param msecs time in milliseconds for timeout
+ * @param h function to be called on timeout
+ * @param arg argument to pass to timeout function h
+ * @return ERR_MEM on memory error, ERR_OK otherwise
+ */
+err_t
+tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg)
+{
+ struct tcpip_msg *msg;
+
+ LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox));
+
+ msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
+ if (msg == NULL) {
+ return ERR_MEM;
+ }
+
+ msg->type = TCPIP_MSG_TIMEOUT;
+ msg->msg.tmo.msecs = msecs;
+ msg->msg.tmo.h = h;
+ msg->msg.tmo.arg = arg;
+ sys_mbox_post(&mbox, msg);
+ return ERR_OK;
+}
+
+/**
+ * call sys_untimeout in tcpip_thread
+ *
+ * @param h function to be called on timeout
+ * @param arg argument to pass to timeout function h
+ * @return ERR_MEM on memory error, ERR_OK otherwise
+ */
+err_t
+tcpip_untimeout(sys_timeout_handler h, void *arg)
+{
+ struct tcpip_msg *msg;
+
+ LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox));
+
+ msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
+ if (msg == NULL) {
+ return ERR_MEM;
+ }
+
+ msg->type = TCPIP_MSG_UNTIMEOUT;
+ msg->msg.tmo.h = h;
+ msg->msg.tmo.arg = arg;
+ sys_mbox_post(&mbox, msg);
+ return ERR_OK;
+}
+#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */
+
+
+/**
+ * Sends a message to TCPIP thread to call a function. Caller thread blocks on
+ * on a provided semaphore, which ist NOT automatically signalled by TCPIP thread,
+ * this has to be done by the user.
+ * It is recommended to use LWIP_TCPIP_CORE_LOCKING since this is the way
+ * with least runtime overhead.
+ *
+ * @param fn function to be called from TCPIP thread
+ * @param apimsg argument to API function
+ * @param sem semaphore to wait on
+ * @return ERR_OK if the function was called, another err_t if not
+ */
+err_t
+tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem)
+{
+#if LWIP_TCPIP_CORE_LOCKING
+ LWIP_UNUSED_ARG(sem);
+ LOCK_TCPIP_CORE();
+ fn(apimsg);
+ UNLOCK_TCPIP_CORE();
+ return ERR_OK;
+#else /* LWIP_TCPIP_CORE_LOCKING */
+ TCPIP_MSG_VAR_DECLARE(msg);
+
+ LWIP_ASSERT("semaphore not initialized", sys_sem_valid(sem));
+ LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox));
+
+ TCPIP_MSG_VAR_ALLOC(msg);
+ TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API;
+ TCPIP_MSG_VAR_REF(msg).msg.api_msg.function = fn;
+ TCPIP_MSG_VAR_REF(msg).msg.api_msg.msg = apimsg;
+ sys_mbox_post(&mbox, &TCPIP_MSG_VAR_REF(msg));
+ sys_arch_sem_wait(sem, 0);
+ TCPIP_MSG_VAR_FREE(msg);
+ return ERR_OK;
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+}
+
+/**
+ * Synchronously calls function in TCPIP thread and waits for its completion.
+ * It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or
+ * LWIP_NETCONN_SEM_PER_THREAD.
+ * If not, a semaphore is created and destroyed on every call which is usually
+ * an expensive/slow operation.
+ * @param fn Function to call
+ * @param call Call parameters
+ * @return Return value from tcpip_api_call_fn
+ */
+err_t
+tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call)
+{
+#if LWIP_TCPIP_CORE_LOCKING
+ err_t err;
+ LOCK_TCPIP_CORE();
+ err = fn(call);
+ UNLOCK_TCPIP_CORE();
+ return err;
+#else /* LWIP_TCPIP_CORE_LOCKING */
+ TCPIP_MSG_VAR_DECLARE(msg);
+
+#if !LWIP_NETCONN_SEM_PER_THREAD
+ err_t err = sys_sem_new(&call->sem, 0);
+ if (err != ERR_OK) {
+ return err;
+ }
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+
+ LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox));
+
+ TCPIP_MSG_VAR_ALLOC(msg);
+ TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL;
+ TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call;
+ TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn;
+#if LWIP_NETCONN_SEM_PER_THREAD
+ TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET();
+#else /* LWIP_NETCONN_SEM_PER_THREAD */
+ TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem;
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+ sys_mbox_post(&mbox, &TCPIP_MSG_VAR_REF(msg));
+ sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0);
+ TCPIP_MSG_VAR_FREE(msg);
+
+#if !LWIP_NETCONN_SEM_PER_THREAD
+ sys_sem_free(&call->sem);
+#endif /* LWIP_NETCONN_SEM_PER_THREAD */
+
+ return call->err;
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+}
+
+/**
+ * Allocate a structure for a static callback message and initialize it.
+ * This is intended to be used to send "static" messages from interrupt context.
+ *
+ * @param function the function to call
+ * @param ctx parameter passed to function
+ * @return a struct pointer to pass to tcpip_trycallback().
+ */
+struct tcpip_callback_msg*
+tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx)
+{
+ struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
+ if (msg == NULL) {
+ return NULL;
+ }
+ msg->type = TCPIP_MSG_CALLBACK_STATIC;
+ msg->msg.cb.function = function;
+ msg->msg.cb.ctx = ctx;
+ return (struct tcpip_callback_msg*)msg;
+}
+
+/**
+ * Free a callback message allocated by tcpip_callbackmsg_new().
+ *
+ * @param msg the message to free
+ */
+void
+tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg)
+{
+ memp_free(MEMP_TCPIP_MSG_API, msg);
+}
+
+/**
+ * Try to post a callback-message to the tcpip_thread mbox
+ * This is intended to be used to send "static" messages from interrupt context.
+ *
+ * @param msg pointer to the message to post
+ * @return sys_mbox_trypost() return code
+ */
+err_t
+tcpip_trycallback(struct tcpip_callback_msg* msg)
+{
+ LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox));
+ return sys_mbox_trypost(&mbox, msg);
+}
+
+/**
+ * @ingroup lwip_os
+ * Initialize this module:
+ * - initialize all sub modules
+ * - start the tcpip_thread
+ *
+ * @param initfunc a function to call when tcpip_thread is running and finished initializing
+ * @param arg argument to pass to initfunc
+ */
+void
+tcpip_init(tcpip_init_done_fn initfunc, void *arg)
+{
+ lwip_init();
+
+ tcpip_init_done = initfunc;
+ tcpip_init_done_arg = arg;
+ if (sys_mbox_new(&mbox, TCPIP_MBOX_SIZE) != ERR_OK) {
+ LWIP_ASSERT("failed to create tcpip_thread mbox", 0);
+ }
+#if LWIP_TCPIP_CORE_LOCKING
+ if (sys_mutex_new(&lock_tcpip_core) != ERR_OK) {
+ LWIP_ASSERT("failed to create lock_tcpip_core", 0);
+ }
+#endif /* LWIP_TCPIP_CORE_LOCKING */
+
+ sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO);
+}
+
+/**
+ * Simple callback function used with tcpip_callback to free a pbuf
+ * (pbuf_free has a wrong signature for tcpip_callback)
+ *
+ * @param p The pbuf (chain) to be dereferenced.
+ */
+static void
+pbuf_free_int(void *p)
+{
+ struct pbuf *q = (struct pbuf *)p;
+ pbuf_free(q);
+}
+
+/**
+ * A simple wrapper function that allows you to free a pbuf from interrupt context.
+ *
+ * @param p The pbuf (chain) to be dereferenced.
+ * @return ERR_OK if callback could be enqueued, an err_t if not
+ */
+err_t
+pbuf_free_callback(struct pbuf *p)
+{
+ return tcpip_callback_with_block(pbuf_free_int, p, 0);
+}
+
+/**
+ * A simple wrapper function that allows you to free heap memory from
+ * interrupt context.
+ *
+ * @param m the heap memory to free
+ * @return ERR_OK if callback could be enqueued, an err_t if not
+ */
+err_t
+mem_free_callback(void *m)
+{
+ return tcpip_callback_with_block(mem_free, m, 0);
+}
+
+#endif /* !NO_SYS */