aboutsummaryrefslogtreecommitdiff
path: root/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy')
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_config_5W_app.h52
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_config_app.h106
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_config_conn.h82
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_debug_app.h198
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_debug_conn.h166
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy.c88
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy.h308
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci.c1704
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci.h183
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci_slip.c689
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci_slip_cdc.c720
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_nohci.c382
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_5W_master.c823
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_5W_slave.c644
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_master.c804
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_slave.c613
-rw-r--r--thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_uart.c357
17 files changed, 7919 insertions, 0 deletions
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_config_5W_app.h b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_config_5W_app.h
new file mode 100644
index 0000000..c9081e1
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_config_5W_app.h
@@ -0,0 +1,52 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef SER_CONFIG_5W_APP_H__
+#define SER_CONFIG_5W_APP_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // SER_CONFIG_5W_APP_H__
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_config_app.h b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_config_app.h
new file mode 100644
index 0000000..04f9f74
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_config_app.h
@@ -0,0 +1,106 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef SER_PHY_CONFIG_APP_H__
+#define SER_PHY_CONFIG_APP_H__
+
+#include "boards.h"
+#include "ser_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(SPI_MASTER_0_ENABLE)
+#define SER_PHY_SPI_MASTER SPI_MASTER_0
+#endif
+#if defined(SPI_MASTER_1_ENABLE)
+#define SER_PHY_SPI_MASTER SPI_MASTER_1
+#endif
+#if defined(SPI_MASTER_2_ENABLE)
+#define SER_PHY_SPI_MASTER SPI_MASTER_2
+#endif
+
+#if (defined(SPI0_ENABLED) && (SPI0_ENABLED == 1)) || defined(SPI_MASTER_0_ENABLE)
+
+#define SER_PHY_SPI_MASTER_INSTANCE NRF_DRV_SPI_INSTANCE(0)
+#define SER_PHY_SPI_MASTER_PIN_SCK SER_APP_SPIM0_SCK_PIN
+#define SER_PHY_SPI_MASTER_PIN_MISO SER_APP_SPIM0_MISO_PIN
+#define SER_PHY_SPI_MASTER_PIN_MOSI SER_APP_SPIM0_MOSI_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT SER_APP_SPIM0_SS_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST SER_APP_SPIM0_REQ_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_READY SER_APP_SPIM0_RDY_PIN
+
+#elif (defined(SPI1_ENABLED) && (SPI1_ENABLED == 1)) || defined(SPI_MASTER_1_ENABLE)
+
+#define SER_PHY_SPI_MASTER_INSTANCE NRF_DRV_SPI_INSTANCE(1)
+#define SER_PHY_SPI_MASTER_PIN_SCK SER_APP_SPIM1_SCK_PIN
+#define SER_PHY_SPI_MASTER_PIN_MISO SER_APP_SPIM1_MISO_PIN
+#define SER_PHY_SPI_MASTER_PIN_MOSI SER_APP_SPIM1_MOSI_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT SER_APP_SPIM1_SS_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST SER_APP_SPIM1_REQ_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_READY SER_APP_SPIM1_RDY_PIN
+
+#elif (defined(SPI2_ENABLED) && (SPI2_ENABLED == 1)) || defined(SPI_MASTER_2_ENABLE)
+
+#define SER_PHY_SPI_MASTER_INSTANCE NRF_DRV_SPI_INSTANCE(2)
+#define SER_PHY_SPI_MASTER_PIN_SCK SER_APP_SPIM2_SCK_PIN
+#define SER_PHY_SPI_MASTER_PIN_MISO SER_APP_SPIM2_MISO_PIN
+#define SER_PHY_SPI_MASTER_PIN_MOSI SER_APP_SPIM2_MOSI_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT SER_APP_SPIM2_SS_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST SER_APP_SPIM2_REQ_PIN
+#define SER_PHY_SPI_MASTER_PIN_SLAVE_READY SER_APP_SPIM2_RDY_PIN
+
+#endif
+
+#define CONN_CHIP_RESET_PIN_NO SER_CONN_CHIP_RESET_PIN /**< Pin used for reseting the connectivity. */
+
+/* UART configuration */
+#define UART_IRQ_PRIORITY APP_IRQ_PRIORITY_MID
+#define SER_PHY_UART_RX SER_APP_RX_PIN
+#define SER_PHY_UART_TX SER_APP_TX_PIN
+#define SER_PHY_UART_CTS SER_APP_CTS_PIN
+#define SER_PHY_UART_RTS SER_APP_RTS_PIN
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // SER_PHY_CONFIG_APP_H__
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_config_conn.h b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_config_conn.h
new file mode 100644
index 0000000..baff210
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_config_conn.h
@@ -0,0 +1,82 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef SER_PHY_CONFIG_CONN_H__
+#define SER_PHY_CONFIG_CONN_H__
+
+#include "boards.h"
+#include "ser_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***********************************************************************************************//**
+ * SER_PHY layer configuration.
+ **************************************************************************************************/
+#define SER_PHY_SPI_PPI_RDY_CH 0
+#define SER_PHY_SPI_GPIOTE_RDY_CH 0
+
+#ifdef NRF_SPIS0
+#define SER_PHY_SPI_SLAVE_INSTANCE 0
+#else
+#define SER_PHY_SPI_SLAVE_INSTANCE 1
+#endif
+
+#define SER_PHY_SPI_SLAVE_REQ_PIN SER_CON_SPIS_REQ_PIN
+#define SER_PHY_SPI_SLAVE_RDY_PIN SER_CON_SPIS_RDY_PIN
+#define SER_PHY_SPI_SLAVE_SCK_PIN SER_CON_SPIS_SCK_PIN
+#define SER_PHY_SPI_SLAVE_MISO_PIN SER_CON_SPIS_MISO_PIN
+#define SER_PHY_SPI_SLAVE_MOSI_PIN SER_CON_SPIS_MOSI_PIN
+#define SER_PHY_SPI_SLAVE_SS_PIN SER_CON_SPIS_CSN_PIN
+
+/* UART configuration */
+#define UART_IRQ_PRIORITY APP_IRQ_PRIORITY_LOWEST
+
+#define SER_PHY_UART_RX SER_CON_RX_PIN
+#define SER_PHY_UART_TX SER_CON_TX_PIN
+#define SER_PHY_UART_CTS SER_CON_CTS_PIN
+#define SER_PHY_UART_RTS SER_CON_RTS_PIN
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // SER_PHY_CONFIG_CONN_H__
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_debug_app.h b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_debug_app.h
new file mode 100644
index 0000000..d60f15c
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_debug_app.h
@@ -0,0 +1,198 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef SER_PHY_DEBUG_APP_H__
+#define SER_PHY_DEBUG_APP_H__
+
+#ifndef SER_PHY_DEBUG_APP_ENABLE
+
+#define DEBUG_EVT_SPI_MASTER_RAW_REQUEST(data)
+#define DEBUG_EVT_SPI_MASTER_RAW_READY(data)
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_DONE(data)
+#define DEBUG_EVT_SPI_MASTER_RAW_API_CALL(data)
+#define DEBUG_EVT_SPI_MASTER_RAW_READY_EDGE(data)
+#define DEBUG_EVT_SPI_MASTER_RAW_REQUEST_EDGE(data)
+#define DEBUG_EVT_SPI_MASTER_PHY_TX_PKT_SENT(data)
+#define DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_DROPPED(data)
+#define DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_RECEIVED(data)
+#define DEBUG_EVT_SPI_MASTER_PHY_BUF_REQUEST(data)
+
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_GUARDED(data)
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_PASSED(data)
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_ABORTED(data)
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_RESTARTED(data)
+
+#else
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//Low level hardware events
+typedef enum
+{
+ SPI_MASTER_RAW_READY,
+ SPI_MASTER_RAW_REQUEST,
+ SPI_MASTER_RAW_XFER_DONE,
+ SPI_MASTER_RAW_API_CALL,
+ SPI_MASTER_RAW_READY_EDGE,
+ SPI_MASTER_RAW_REQUEST_EDGE,
+ SPI_MASTER_RAW_XFER_STARTED,
+ SPI_MASTER_RAW_XFER_GUARDED,
+ SPI_MASTER_RAW_XFER_PASSED,
+ SPI_MASTER_RAW_XFER_ABORTED,
+ SPI_MASTER_RAW_XFER_RESTARTED,
+ SPI_MASTER_PHY_TX_PKT_SENT,
+ SPI_MASTER_PHY_BUF_REQUEST,
+ SPI_MASTER_PHY_RX_PKT_RECEIVED,
+ SPI_MASTER_PHY_RX_PKT_DROPPED,
+ SPI_MASTER_EVT_MAX
+} spi_master_raw_evt_type_t;
+
+
+//Low level hardware event definition
+typedef struct
+{
+ spi_master_raw_evt_type_t evt;
+ uint32_t data;
+} spi_master_raw_evt_t;
+
+typedef void (*spi_master_raw_callback_t)(spi_master_raw_evt_t event);
+
+void debug_init(spi_master_raw_callback_t spi_master_raw_evt_callback);
+
+void debug_evt(spi_master_raw_evt_type_t evt, uint32_t data);
+
+
+#define DEBUG_EVT(evt, data) \
+do { \
+ debug_evt(evt, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_RAW_REQUEST(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_REQUEST, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_RAW_READY(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_READY, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_DONE(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_XFER_DONE, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_RAW_API_CALL(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_API_CALL, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_RAW_READY_EDGE(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_READY_EDGE, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_RAW_REQUEST_EDGE(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_REQUEST_EDGE, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_PHY_TX_PKT_SENT(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_PHY_TX_PKT_SENT, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_DROPPED(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_PHY_RX_PKT_DROPPED, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_RECEIVED(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_PHY_RX_PKT_RECEIVED, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_MASTER_PHY_BUF_REQUEST(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_PHY_BUF_REQUEST, data); \
+} while (0);
+
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_GUARDED(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_XFER_GUARDED, data); \
+} while (0);
+
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_PASSED(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_XFER_PASSED, data); \
+} while (0);
+
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_ABORTED(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_XFER_ABORTED, data); \
+} while (0);
+
+#define DEBUG_EVT_SPI_MASTER_RAW_XFER_RESTARTED(data) \
+do { \
+ DEBUG_EVT(SPI_MASTER_RAW_XFER_RESTARTED, data); \
+} while (0);
+
+
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //SER_PHY_DEBUG_APP_H__
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_debug_conn.h b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_debug_conn.h
new file mode 100644
index 0000000..f79b2d0
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/config/ser_phy_debug_conn.h
@@ -0,0 +1,166 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef SER_PHY_DEBUG_CONN_H__
+#define SER_PHY_DEBUG_CONN_H__
+
+#ifndef SER_PHY_DEBUG_CONN_ENABLE
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(data);
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(data);
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(data);
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(data);
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_REQ_CLEARED(data);
+
+#define DEBUG_EVT_SPI_SLAVE_PHY_BUF_REQUEST(data);
+
+#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_RECEIVED(data);
+
+#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_DROPPED(data);
+
+#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_SENT(data);
+
+#else
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// low level hardware event types
+typedef enum
+{
+ SPI_SLAVE_RAW_BUFFERS_SET,
+ SPI_SLAVE_RAW_RX_XFER_DONE,
+ SPI_SLAVE_RAW_TX_XFER_DONE,
+ SPI_SLAVE_RAW_REQ_SET,
+ SPI_SLAVE_RAW_REQ_CLEARED,
+ SPI_SLAVE_PHY_BUF_REQUEST,
+ SPI_SLAVE_PHY_PKT_SENT,
+ SPI_SLAVE_PHY_PKT_RECEIVED,
+ SPI_SLAVE_PHY_PKT_DROPPED,
+ SPI_SLAVE_RAW_EVT_TYPE_MAX
+} spi_slave_raw_evt_type_t;
+
+// low level hardware event definition
+typedef struct
+{
+ spi_slave_raw_evt_type_t evt_type;
+ uint32_t data;
+} spi_slave_raw_evt_t;
+
+typedef void (*spi_slave_raw_callback_t)(spi_slave_raw_evt_t event);
+
+void debug_init(spi_slave_raw_callback_t spi_slave_raw_evt_callback);
+
+void debug_evt(spi_slave_raw_evt_type_t evt_type, uint32_t data);
+
+#define DEBUG_EVT(evt, data) \
+do { \
+ debug_evt(evt, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_RAW_RX_XFER_DONE, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_RAW_TX_XFER_DONE, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_RAW_BUFFERS_SET, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_RAW_REQ_SET, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_RAW_REQ_CLEARED(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_RAW_REQ_CLEARED, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_PHY_BUF_REQUEST(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_PHY_BUF_REQUEST, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_RECEIVED(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_PHY_PKT_RECEIVED, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_DROPPED(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_PHY_PKT_DROPPED, data); \
+} while (0);
+
+
+#define DEBUG_EVT_SPI_SLAVE_PHY_PKT_SENT(data) \
+do { \
+ DEBUG_EVT(SPI_SLAVE_PHY_PKT_SENT, data); \
+} while (0);
+
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //SER_PHY_DEBUG_CONN_H__
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy.c
new file mode 100644
index 0000000..57220fe
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy.c
@@ -0,0 +1,88 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include "ser_phy.h"
+#include "app_error.h"
+
+
+__weak uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
+{
+ /* A function stub. Function should be implemented according to ser_phy.h API. */
+ APP_ERROR_CHECK_BOOL(false);
+
+ return NRF_SUCCESS;
+}
+
+__weak uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
+{
+ /* A function stub. Function should be implemented according to ser_phy.h API. */
+ APP_ERROR_CHECK_BOOL(false);
+
+ return NRF_SUCCESS;
+}
+
+__weak uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
+{
+ /* A function stub. Function should be implemented according to ser_phy.h API. */
+ APP_ERROR_CHECK_BOOL(false);
+
+ return NRF_SUCCESS;
+}
+
+__weak void ser_phy_close(void)
+{
+ /* A function stub. Function should be implemented according to ser_phy.h API. */
+ APP_ERROR_CHECK_BOOL(false);
+}
+
+
+__weak void ser_phy_interrupts_enable(void)
+{
+ /* A function stub. Function should be implemented according to ser_phy.h API. */
+ APP_ERROR_CHECK_BOOL(false);
+}
+
+
+__weak void ser_phy_interrupts_disable(void)
+{
+ /* A function stub. Function should be implemented according to ser_phy.h API. */
+ APP_ERROR_CHECK_BOOL(false);
+}
+
+
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy.h b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy.h
new file mode 100644
index 0000000..d40c06e
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy.h
@@ -0,0 +1,308 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/** @file
+ *
+ * @defgroup ser_phy Serialization PHY
+ * @{
+ * @ingroup ble_sdk_lib_serialization
+ *
+ * @brief PHY layer for serialization.
+ *
+ * @details The @ref ser_phy library declares functions and definitions of data structures and
+ * identifiers (typedef enum) that are used as API of the serialization PHY layer.
+ *
+ * \par Rationale
+ * Each specific PHY layer (SPI, I2C, UART, low power UART etc.) should provide the same API. This
+ * allows the layer above (the HAL Transport layer), which is responsible for controlling the PHY
+ * layer, memory management, CRC, retransmission etc., to be hardware independent.
+ *
+ *
+ * \par Interlayer communication and control
+ * The PHY layer is controlled by the HAL transport layer by calling functions declared in
+ * the @ref ser_phy library.
+ *
+ * @par
+ * The PHY layer communicates events to the HAL transport layer by calling a callback function.
+ * A handler to this function is passed in the @ref ser_phy_open function. This callback function
+ * should be called with a parameter of type @ref ser_phy_evt_t, filled accordingly to an event to be
+ * passed. Types of supported events are defined in @ref ser_phy_evt_type_t.
+ *
+ * @par
+ * For example, to pass an event indicating that an RX packet has been successfully received, first a
+ * struct of type @ref ser_phy_evt_t must be filled:
+ * @code
+ * ser_phy_evt_t phy_evt;
+ * phy_evt.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
+ * phy_evt.evt_params.rx_pkt_received.p_buffer = (pointer to the RX buffer);
+ * phy_evt.evt_params.rx_pkt_received.num_of_bytes = (number of received bytes);
+ * @endcode
+ * Then, the callback function must be called:
+ * @code
+ * events_handler(phy_evt);
+ * @endcode
+ * All functions declared in the @ref ser_phy file (ser_phy.h) must be implemented. Some events specified in
+ * @ref ser_phy_evt_type_t are optional to implement.
+ *
+ * \par Transmitting a packet
+ * Each PHY layer is responsible for adding the PHY header to a packet to be sent. This header
+ * consists of a 16-bit field that carries the packet length (the uint16_encode function defined in
+ * app_util.h should be used to ensure endianness independence). A pointer to a packet to be sent
+ * and length of the packet are parameters of the @ref ser_phy_tx_pkt_send function. When a packet
+ * has been transmitted, an event of type @ref SER_PHY_EVT_TX_PKT_SENT should be emitted.
+ *
+ * \image html ser_phy_transport_tx.svg "TX - interlayer communication"
+ *
+ * \par Receiving a packet
+ * The PHY layer should be able to store only the PHY header (16-bit field carrying the packet
+ * length). After the PHY header has been received, the transmission is stopped and the PHY
+ * layer must send a request to the HAL transport layer for memory to store the packet - an event
+ * of type @ref SER_PHY_EVT_RX_BUF_REQUEST with event parameters defined in
+ * @ref ser_phy_evt_rx_buf_request_params_t (the uint16_decode function defined in app_util.h should
+ * be used for header decoding to ensure endianness independence). The transmission should be
+ * resumed when the @ref ser_phy_rx_buf_set function has been called.
+ *
+ * @par
+ * When the @ref ser_phy_rx_buf_set function parameter equals NULL, there is not
+ * enough memory to store the packet. However, the packet will be received to a dummy location to
+ * ensure continuous communication. After receiving has finished, an event of type
+ * @ref SER_PHY_EVT_RX_PKT_DROPPED is generated.
+ *
+ * \image html ser_phy_transport_rx_dropped.svg "RX dropping - interlayer communication"
+ *
+ * @par
+ * When the @ref ser_phy_rx_buf_set function parameter is different than NULL, the packet is
+ * received to a buffer pointed to by it. After receiving has finished, an event of type
+ * @ref SER_PHY_EVT_RX_PKT_RECEIVED is generated with event parameters defined in
+ * @ref ser_phy_evt_rx_pkt_received_params_t.
+ *
+ * \image html ser_phy_transport_rx_received.svg "RX - interlayer communication"
+ *
+ * \par PHY layer errors
+ * PHY layer errors can be signaled by an event of type @ref SER_PHY_EVT_RX_OVERFLOW_ERROR or
+ * @ref SER_PHY_EVT_TX_OVERREAD_ERROR or @ref SER_PHY_EVT_HW_ERROR with event parameters defined in
+ * @ref ser_phy_evt_hw_error_params_t.
+ *
+ * @par Available PHY layers
+ * The following PHY layers are available:
+ * - @ref ser_phy_spi_page
+ * - @ref ser_phy_spi_5W_page
+ * - @ref ser_phy_uart_page
+ * - @ref ser_phy_uart_hci_page
+ * <!-- - @ref ser_phy_usb_hci_page -->
+ *
+ */
+
+#ifndef SER_PHY_H__
+#define SER_PHY_H__
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**@brief Serialization PHY module event types. */
+typedef enum
+{
+ SER_PHY_EVT_TX_PKT_SENT = 0, /**< Obligatory to implement. An event indicating that a TX packet
+ * has been transmitted. */
+ SER_PHY_EVT_RX_BUF_REQUEST, /**< Obligatory to implement. An event indicating that the PHY layer
+ * needs a buffer for an RX packet. The PHY flow should be blocked
+ * until the @ref ser_phy_rx_buf_set function is called. */
+ SER_PHY_EVT_RX_PKT_RECEIVED, /**< Obligatory to implement. An event indicating that an RX packet
+ * has been successfully received. */
+ SER_PHY_EVT_RX_PKT_DROPPED, /**< Obligatory to implement. An event indicating that the RX packet
+ * receiving has been finished but the packet was discarded because
+ * it was longer than available the buffer. */
+
+ SER_PHY_EVT_RX_OVERFLOW_ERROR, /**< Optional to implement. An event indicating that more
+ * information has been transmitted than the PHY module could
+ * handle. */
+ SER_PHY_EVT_TX_OVERREAD_ERROR, /**< Optional to implement. An event indicating that the PHY module
+ * was forced to transmit more information than possessed. */
+ SER_PHY_EVT_HW_ERROR, /**< Optional to implement. An event indicating a hardware error
+ * in the PHY module. */
+ SER_PHY_EVT_TYPE_MAX /**< Enumeration upper bound. */
+} ser_phy_evt_type_t;
+
+
+/**@brief A struct containing parameters of event of type @ref SER_PHY_EVT_RX_BUF_REQUEST. */
+typedef struct
+{
+ uint16_t num_of_bytes; /**< Length of a buffer in octets that the layer above the PHY module should
+ * deliver, so that the PHY module can receive a packet. */
+} ser_phy_evt_rx_buf_request_params_t;
+
+
+/**@brief A struct containing parameters of event of type @ref SER_PHY_EVT_RX_PKT_RECEIVED. */
+typedef struct
+{
+ uint8_t * p_buffer; /**< Pointer to a buffer containing the received packet. */
+ uint16_t num_of_bytes; /**< Length of the received packet in octets. */
+} ser_phy_evt_rx_pkt_received_params_t;
+
+
+/**@brief A struct containing parameters of event of type @ref SER_PHY_EVT_HW_ERROR. */
+typedef struct
+{
+ uint32_t error_code; /**< Hardware error code - specific for a microcontroller. */
+ uint8_t * p_buffer; /**< Pointer to the buffer that was processed when error occured. */
+} ser_phy_evt_hw_error_params_t;
+
+
+/**@brief A struct containing events from a Serialization PHY module.
+ *
+ * @note Some events do not have parameters, then whole information is contained in the evt_type.
+ */
+typedef struct
+{
+ ser_phy_evt_type_t evt_type; /**< Type of event. */
+
+ union /**< Union alternative identified by evt_type in enclosing struct. */
+ {
+ /** Parameters of event of type @ref SER_PHY_EVT_RX_BUF_REQUEST. */
+ ser_phy_evt_rx_buf_request_params_t rx_buf_request;
+ /** Parameters of event of type @ref SER_PHY_EVT_RX_PKT_RECEIVED. */
+ ser_phy_evt_rx_pkt_received_params_t rx_pkt_received;
+ /** Parameters of the event of type @ref SER_PHY_EVT_HW_ERROR. */
+ ser_phy_evt_hw_error_params_t hw_error;
+ } evt_params;
+} ser_phy_evt_t;
+
+
+/**@brief A type of generic callback function handler to be used by all PHY module events.
+ *
+ * @param[in] event Serialization PHY module event.
+ */
+typedef void (*ser_phy_events_handler_t)(ser_phy_evt_t event);
+
+
+/**@brief Function for opening and initializing the PHY module.
+ *
+ * @note The function initializes hardware and internal module states, and registers callback
+ * function to be used by all PHY module events.
+ *
+ * @warning If the function has been already called, the function @ref ser_phy_close has to be
+ * called before ser_phy_open can be called again.
+ *
+ * @param[in] events_handler Generic callback function handler to be used by all PHY module
+ * events.
+ *
+ * @retval NRF_SUCCESS Operation success.
+ * @retval NRF_ERROR_INVALID_STATE Operation failure. The function has been already called.
+ * To call it again, the function @ref ser_phy_close has to be
+ * called first.
+ * @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied.
+ * @retval NRF_ERROR_INVALID_PARAM Operation failure. Hardware initialization parameters are not
+ * supported.
+ */
+uint32_t ser_phy_open(ser_phy_events_handler_t events_handler);
+
+
+/**@brief Function for transmitting a packet.
+ *
+ * @note The function adds a packet pointed by p_buffer parameter to a transmission queue and
+ * schedules generation of an event of type @ref SER_PHY_EVT_TX_PKT_SENT upon transmission
+ * completion.
+ *
+ * @param[in] p_buffer Pointer to a buffer to transmit.
+ * @param[in] num_of_bytes Number of octets to transmit. Must be more than 0.
+ *
+ * @retval NRF_SUCCESS Operation success. Packet was added to the transmission queue
+ * and event will be send upon transmission completion.
+ * @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied.
+ * @retval NRF_ERROR_INVALID_PARAM Operation failure. The num_of_bytes parameter equal to 0.
+ * @retval NRF_ERROR_BUSY Operation failure. Transmitting of a packet in progress.
+ */
+uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes);
+
+
+/**@brief Function for setting an RX buffer and enabling reception of data (the PHY flow).
+ *
+ * @note The function has to be called as a response to an event of type
+ * @ref SER_PHY_EVT_RX_BUF_REQUEST. The function sets an RX buffer and enables reception of
+ * data (enables the PHY flow).
+ * Size of a buffer pointed by the p_buffer parameter should be at least equal to the
+ * num_of_bytes parameter passed within the event (@ref ser_phy_evt_rx_buf_request_params_t),
+ * or p_buffer should be equal to NULL if there is not enough memory.
+ * When p_buffer is different from NULL and num_of_bytes octets have been received, an event of
+ * type @ref SER_PHY_EVT_RX_PKT_RECEIVED is generated
+ * (@ref ser_phy_evt_rx_pkt_received_params_t).
+ * When p_buffer is equal to NULL, data is received to dummy location to ensure continuous
+ * communication. Then, if num_of_bytes octets have been received, an event of type
+ * @ref SER_PHY_EVT_RX_PKT_DROPPED is generated.
+ *
+ * @param[in] p_buffer Pointer to an RX buffer in which to receive.
+ *
+ * @retval NRF_SUCCESS Operation success.
+ * @retval NRF_ERROR_INVALID_STATE Operation failure. A buffer was set without request.
+ */
+uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer);
+
+
+/**@brief Function for closing the PHY module.
+ *
+ * @note The function disables hardware, resets internal module states, and unregisters the events
+ * callback function.
+ */
+void ser_phy_close(void);
+
+
+/**@brief Function for enabling the PHY module interrupts.
+ *
+ * @note The function enables all interrupts that are used by the PHY module (and only those).
+ */
+void ser_phy_interrupts_enable(void);
+
+
+/**@brief Function for disabling the PHY module interrupts.
+ *
+ * @note The function disables all interrupts that are used by the PHY module (and only those).
+ */
+void ser_phy_interrupts_disable(void);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SER_PHY_H__ */
+/** @} */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci.c
new file mode 100644
index 0000000..4e1f85a
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci.c
@@ -0,0 +1,1704 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <stddef.h>
+#include <string.h>
+
+#include "app_error.h"
+#include "app_util.h"
+#include "app_util_platform.h"
+#include "app_timer.h"
+#include "nrf_queue.h"
+#include "ser_phy.h"
+#include "ser_phy_hci.h"
+#include "crc16.h"
+#include "nrf_soc.h"
+#include "ser_config.h"
+#include "ser_phy_debug_comm.h"
+#define NRF_LOG_MODULE_NAME sphy_hci
+#include "nrf_log.h"
+NRF_LOG_MODULE_REGISTER();
+// hide globals for release version, expose for debug version
+#if defined(SER_PHY_HCI_DEBUG_ENABLE)
+#define _static
+#else
+#define _static static
+#endif
+
+#define PKT_HDR_SIZE 4 /**< Packet header size in number of bytes. */
+#define PKT_CRC_SIZE 2 /**< Packet CRC size in number of bytes. */
+#define MAX_PACKET_SIZE_IN_BITS (11uL * \
+ (SER_HAL_TRANSPORT_MAX_PKT_SIZE + PKT_HDR_SIZE + PKT_CRC_SIZE))
+#define BAUD_TIME_us (1000000uL / SER_PHY_UART_BAUDRATE_VAL)
+
+#define TX_EVT_QUEUE_SIZE 16
+#define RX_EVT_QUEUE_SIZE 16
+#define PKT_TYPE_VENDOR_SPECIFIC 14 /**< Packet type vendor specific. */
+#define PKT_TYPE_ACK 0 /**< Packet type acknowledgement. */
+#define PKT_TYPE_LINK_CONTROL 15 /**< Packet type link control. */
+#define PKT_TYPE_RESET 5 /**< Packet type reset. */
+#define DATA_INTEGRITY_MASK (1 << 6) /**< Mask for data integrity bit in the packet header. */
+#define RELIABLE_PKT_MASK (1 << 7) /**< Mask for reliable packet bit in the packet header. */
+#define INITIAL_ACK_NUMBER_EXPECTED 0 /**< Initial acknowledge number expected. */
+#define INITIAL_SEQ_NUMBER INITIAL_ACK_NUMBER_EXPECTED /**< Initial acknowledge number transmitted. */
+#define INVALID_PKT_TYPE 0xFFFFFFFFu /**< Internal invalid packet type value. */
+#define MAX_TRANSMISSION_TIME_ms (MAX_PACKET_SIZE_IN_BITS * BAUD_TIME_us / 1000uL) /**< Max transmission time of a single application packet over UART in units of mseconds. */
+#define RETRANSMISSION_TIMEOUT_IN_ms (50uL * MAX_TRANSMISSION_TIME_ms) /**< Retransmission timeout for application packet in units of mseconds. */
+
+#ifdef HCI_LINK_CONTROL
+#define HCI_PKT_SYNC 0x7E01u /**< Link Control Packet: type SYNC */
+#define HCI_PKT_SYNC_RSP 0x7D02u /**< Link Control Packet: type SYNC RESPONSE */
+#define HCI_PKT_CONFIG 0xFC03u /**< Link Control Packet: type CONFIG */
+#define HCI_PKT_CONFIG_RSP 0x7B04u /**< Link Control Packet: type CONFIG RESPONSE */
+#define HCI_CONFIG_FIELD 0x11u /**< Configuration field of CONFIG and CONFIG_RSP packet */
+#define HCI_PKT_SYNC_SIZE 6u /**< Size of SYNC and SYNC_RSP packet */
+#define HCI_PKT_CONFIG_SIZE 7u /**< Size of CONFIG and CONFIG_RSP packet */
+#define HCI_LINK_CONTROL_PKT_INVALID 0xFFFFu /**< Size of CONFIG and CONFIG_RSP packet */
+#define HCI_LINK_CONTROL_TIMEOUT 1u /**< Default link control timeout. */
+#endif /* HCI_LINK_CONTROL */
+
+
+#define RETRANSMISSION_TIMEOUT_IN_TICKS (APP_TIMER_TICKS(RETRANSMISSION_TIMEOUT_IN_ms)) /**< Retransmission timeout for application packet in units of timer ticks. */
+#define MAX_RETRY_COUNT 5 /**< Max retransmission retry count for application packets. */
+
+#if (defined(HCI_TIMER0))
+#define HCI_TIMER NRF_TIMER0
+#define HCI_TIMER_IRQn TIMER0_IRQn
+#define HCI_TIMER_IRQHandler TIMER0_IRQHandler
+#elif (defined(HCI_TIMER1))
+#define HCI_TIMER NRF_TIMER1
+#define HCI_TIMER_IRQn TIMER1_IRQn
+#define HCI_TIMER_IRQHandler TIMER1_IRQHandler
+#elif (defined(HCI_TIMER2))
+#define HCI_TIMER NRF_TIMER2
+#define HCI_TIMER_IRQn TIMER2_IRQn
+#define HCI_TIMER_IRQHandler TIMER2_IRQHandler
+#else
+#define HCI_APP_TIMER
+#endif
+
+
+/**@brief States of the hci event driven state machine. */
+typedef enum
+{
+ HCI_TX_STATE_DISABLE,
+ HCI_TX_STATE_SEND,
+ HCI_TX_STATE_WAIT_FOR_FIRST_TX_END,
+ HCI_TX_STATE_WAIT_FOR_ACK_OR_TX_END,
+ HCI_TX_STATE_WAIT_FOR_ACK,
+ HCI_TX_STATE_WAIT_FOR_TX_END
+} hci_tx_fsm_state_t;
+
+typedef enum
+{
+ HCI_RX_STATE_DISABLE,
+ HCI_RX_STATE_RECEIVE,
+ HCI_RX_STATE_WAIT_FOR_MEM,
+ HCI_RX_STATE_WAIT_FOR_SLIP_ACK_END,
+ HCI_RX_STATE_WAIT_FOR_SLIP_NACK_END,
+} hci_rx_fsm_state_t;
+
+typedef enum
+{
+ HCI_EVT_TIMEOUT,
+} hci_timer_evt_type_t;
+
+typedef enum
+{
+ HCI_SER_PHY_TX_REQUEST,
+ HCI_SER_PHY_RX_BUF_GRANTED,
+ HCI_SER_PHY_EVT_GEN_ENABLE,
+ HCI_SER_PHY_EVT_GEN_DISABLE
+} ser_phy_int_evt_type_t;
+
+typedef enum
+{
+ HCI_SER_PHY_EVT,
+ HCI_SLIP_EVT,
+ HCI_TIMER_EVT,
+} hci_evt_source_t;
+
+#ifdef HCI_LINK_CONTROL
+typedef enum
+{
+ HCI_MODE_DISABLE,
+ HCI_MODE_UNINITIALIZED,
+ HCI_MODE_INITIALIZED,
+ HCI_MODE_ACTIVE,
+} hci_mode_t;
+#endif /*HCI_LINK_CONTROL */
+
+typedef struct
+{
+ hci_timer_evt_type_t evt_type; /**< Type of an event. */
+} hci_timer_evt_t;
+
+typedef struct
+{
+ ser_phy_int_evt_type_t evt_type; /**< Type of an event. */
+} ser_phy_int_evt_t;
+
+typedef struct
+{
+ hci_evt_source_t evt_source; /**< source of an event. */
+ union
+ {
+ ser_phy_int_evt_t ser_phy_evt; /**< ser_phy event. */
+ ser_phy_hci_slip_evt_t ser_phy_slip_evt; /**< ser_phy_hci event. */
+ hci_timer_evt_t timer_evt; /**< timer event. */
+ } evt;
+} hci_evt_t;
+
+_static uint8_t m_tx_packet_header[PKT_HDR_SIZE];
+_static uint8_t m_tx_packet_crc[PKT_CRC_SIZE];
+_static uint8_t m_tx_ack_packet[PKT_HDR_SIZE];
+#ifdef HCI_LINK_CONTROL
+_static uint8_t m_tx_link_control_header[PKT_HDR_SIZE];
+_static uint8_t m_tx_link_control_payload[HCI_PKT_CONFIG_SIZE - PKT_HDR_SIZE];
+#endif /* HCI_LINK_CONTROL */
+
+_static uint32_t m_packet_ack_number; // Sequence number counter of the packet expected to be received
+_static uint32_t m_packet_seq_number; // Sequence number counter of the transmitted packet for which acknowledgement packet is waited for
+
+
+_static uint32_t m_tx_retry_count;
+
+
+// _static uint32_t m_tx_retx_counter = 0;
+// _static uint32_t m_rx_drop_counter = 0;
+
+NRF_QUEUE_DEF(hci_evt_t,
+ m_tx_evt_queue,
+ TX_EVT_QUEUE_SIZE,
+ NRF_QUEUE_MODE_NO_OVERFLOW);
+
+NRF_QUEUE_DEF(hci_evt_t,
+ m_rx_evt_queue,
+ RX_EVT_QUEUE_SIZE,
+ NRF_QUEUE_MODE_NO_OVERFLOW);
+
+_static hci_tx_fsm_state_t m_hci_tx_fsm_state = HCI_TX_STATE_DISABLE;
+_static hci_rx_fsm_state_t m_hci_rx_fsm_state = HCI_RX_STATE_DISABLE;
+
+#ifdef HCI_LINK_CONTROL
+_static hci_mode_t m_hci_mode = HCI_MODE_DISABLE;
+_static uint16_t m_hci_link_control_next_pkt = HCI_PKT_SYNC;
+_static bool m_hci_other_side_active = false;
+#endif /* HCI_LINK_CONTROL */
+
+#ifdef HCI_APP_TIMER
+APP_TIMER_DEF(m_app_timer_id);
+#endif
+
+_static bool m_tx_fsm_idle_flag = true;
+_static bool m_rx_fsm_idle_flag = true;
+
+_static bool m_buffer_reqested_flag = false;
+
+_static uint8_t * m_p_rx_buffer = NULL;
+_static uint16_t m_rx_packet_length;
+_static uint8_t * m_p_rx_packet;
+_static uint8_t * m_p_tx_payload = NULL;
+_static uint16_t m_tx_payload_length;
+
+_static ser_phy_events_handler_t m_ser_phy_callback = NULL;
+
+static void hci_tx_event_handler(hci_evt_t * p_event);
+static void hci_rx_event_handler(hci_evt_t * p_event);
+#ifdef HCI_LINK_CONTROL
+static void hci_link_control_event_handler(hci_evt_t * p_event);
+#endif /* HCI_LINK_CONTROL */
+
+_static bool m_hci_timer_enabled_flag = true;
+_static bool m_hci_timout_pending_flag = false;
+_static bool m_hci_global_enable_flag = true;
+
+#define ser_phy_hci_assert(cond) APP_ERROR_CHECK_BOOL(cond)
+
+static void hci_signal_timeout_event(void)
+{
+ hci_evt_t event;
+
+ event.evt_source = HCI_TIMER_EVT;
+ event.evt.timer_evt.evt_type = HCI_EVT_TIMEOUT;
+ DEBUG_EVT_TIMEOUT(0);
+
+#ifndef HCI_LINK_CONTROL
+ hci_tx_event_handler(&event);
+#else
+ hci_link_control_event_handler(&event);
+ if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
+ {
+ hci_tx_event_handler(&event);
+ }
+#endif /* HCI_LINK_CONTROL */
+}
+
+
+#ifndef HCI_APP_TIMER
+
+void HCI_TIMER_IRQHandler(void)
+{
+
+ if ((HCI_TIMER->EVENTS_COMPARE[1] == 1) && (HCI_TIMER->INTENSET & TIMER_INTENSET_COMPARE1_Msk))
+ {
+ HCI_TIMER->EVENTS_COMPARE[1] = 0;
+ HCI_TIMER->TASKS_CLEAR = 1;
+
+ if (m_hci_timer_enabled_flag)
+ {
+ hci_signal_timeout_event();
+ }
+ else
+ {
+ m_hci_timout_pending_flag = true;
+ }
+ }
+}
+
+
+static void hci_timeout_setup(uint32_t count)
+{
+
+ uint32_t time_msec;
+
+ if (count)
+ {
+ HCI_TIMER->INTENCLR = TIMER_INTENCLR_COMPARE1_Msk;
+ time_msec = count * RETRANSMISSION_TIMEOUT_IN_ms;
+ HCI_TIMER->CC[1] = time_msec * 31;
+ HCI_TIMER->CC[1] += time_msec / 4;
+ HCI_TIMER->TASKS_CLEAR = 1; // < Clear TIMER
+ HCI_TIMER->EVENTS_COMPARE[1] = 0;
+ HCI_TIMER->TASKS_START = 1; // < Start TIMER
+ HCI_TIMER->INTENSET = TIMER_INTENSET_COMPARE1_Msk;
+ }
+ else
+ {
+ HCI_TIMER->INTENCLR = TIMER_INTENCLR_COMPARE1_Msk;
+ HCI_TIMER->TASKS_STOP = 1; // < Start TIMER
+ }
+}
+
+
+#else
+
+_static bool m_hci_timer_setup_flag = false;
+_static uint32_t m_hci_timer_counter = 0;
+_static uint32_t m_hci_timer_setup;
+
+static void hci_timeout_setup(uint32_t count)
+{
+ m_hci_timer_setup = count;
+ m_hci_timer_setup_flag = true;
+}
+
+
+static void hci_timeout_handler(void * p_context)
+{
+
+ if (m_hci_timer_setup_flag)
+ {
+ m_hci_timer_setup_flag = false;
+ m_hci_timer_counter = m_hci_timer_setup; /* for 1 it will be always more than 1 tick - jitter is up to 1 tick */
+ }
+ else if ( m_hci_timer_counter )
+ {
+ m_hci_timer_counter--;
+
+ if (m_hci_timer_counter == 0)
+ {
+ if (m_hci_timer_enabled_flag)
+ {
+ hci_signal_timeout_event();
+ }
+ else
+ {
+ m_hci_timout_pending_flag = true;
+ }
+ }
+ }
+ return;
+}
+
+
+#endif
+
+
+/**@brief Function for validating a received packet.
+ *
+ * @param[in] p_buffer Pointer to the packet data.
+ * @param[in] length Length of packet data in bytes.
+ *
+ * @return true if received packet is valid, false in other case.
+ */
+static bool is_rx_pkt_valid(const uint8_t * p_buffer, uint32_t length)
+{
+ // Executed packet filtering algorithm order:
+ // - verify packet overall length
+ // - verify data integrity bit set
+ // - verify reliable packet bit set
+ // - verify supported packet type
+ // - verify header checksum
+ // - verify payload length field
+ // - verify CRC
+ if (length <= PKT_HDR_SIZE)
+ {
+ return false;
+ }
+
+ if (!(p_buffer[0] & DATA_INTEGRITY_MASK))
+ {
+ return false;
+ }
+
+ if (!(p_buffer[0] & RELIABLE_PKT_MASK))
+ {
+ return false;
+ }
+
+ if ((p_buffer[1] & 0x0Fu) != PKT_TYPE_VENDOR_SPECIFIC)
+ {
+ return false;
+ }
+
+ const uint32_t expected_checksum =
+ ((p_buffer[0] + p_buffer[1] + p_buffer[2] + p_buffer[3])) & 0xFFu;
+
+ if (expected_checksum != 0)
+ {
+ return false;
+ }
+
+ const uint16_t crc_calculated = crc16_compute(p_buffer, (length - PKT_CRC_SIZE), NULL);
+ const uint16_t crc_received = uint16_decode(&p_buffer[length - PKT_CRC_SIZE]);
+
+ if (crc_calculated != crc_received)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+
+/**@brief Function for getting the sequence number of the next reliable packet expected.
+ *
+ * @return sequence number of the next reliable packet expected.
+ */
+static __INLINE uint8_t packet_ack_get(void)
+{
+ return (uint8_t) m_packet_ack_number;
+}
+
+
+/**@brief Function for getting the sequence number of a reliable TX packet for which peer protocol
+ * entity acknowledgment is pending.
+ *
+ * @return sequence number of a reliable TX packet for which peer protocol entity acknowledgement
+ * is pending.
+ */
+static __INLINE uint8_t packet_seq_get(void)
+{
+ return m_packet_seq_number;
+}
+
+
+static __INLINE uint8_t packet_seq_nmbr_extract(const uint8_t * p_buffer)
+{
+ return (p_buffer[0] & 0x07u);
+}
+
+
+/**@brief Function for constructing 1st byte of the packet header of the packet to be transmitted.
+ *
+ * @return 1st byte of the packet header of the packet to be transmitted
+ */
+static __INLINE uint8_t tx_packet_byte_zero_construct(void)
+{
+ const uint32_t value = DATA_INTEGRITY_MASK | RELIABLE_PKT_MASK |
+ (packet_ack_get() << 3u) | packet_seq_get();
+
+ return (uint8_t) value;
+}
+
+
+/**@brief Function for calculating a packet header checksum.
+ *
+ * @param[in] p_hdr Pointer to the packet header.
+ *
+ * @return Calculated checksum.
+ */
+static __INLINE uint8_t header_checksum_calculate(const uint8_t * p_hdr)
+{
+ // @note: no pointer validation check needed as already checked by calling function.
+ uint32_t checksum;
+
+ checksum = p_hdr[0];
+ checksum += p_hdr[1];
+ checksum += p_hdr[2];
+ checksum &= 0xFFu;
+ checksum = (~checksum + 1u);
+
+ return (uint8_t)checksum;
+}
+
+
+/**@brief Function for getting the expected ACK number.
+ *
+ * @return expected ACK number.
+ */
+static __INLINE uint8_t expected_ack_number_get(void)
+{
+ uint8_t seq_nmbr = packet_seq_get();
+
+ ++seq_nmbr;
+ seq_nmbr &= 0x07u;
+
+ return seq_nmbr;
+}
+
+
+/**@brief Function for getting the expected ACK number.
+ *
+ * @return next expected ACK number.
+ */
+
+static __INLINE uint8_t next_expected_ack_number_get(void)
+{
+ uint8_t seq_nmbr = expected_ack_number_get();
+
+ ++seq_nmbr;
+ seq_nmbr &= 0x07u;
+
+ return seq_nmbr;
+}
+
+
+/**@brief Function for processing a received acknowledgement packet.
+ *
+ * Verifies does the received acknowledgement packet has the expected acknowledgement number and
+ * that the header checksum is correct.
+ *
+ * @param[in] p_buffer Pointer to the packet data.
+ *
+ * @return true if valid acknowledgement packet received.
+ */
+
+static bool rx_ack_pkt_valid(const uint8_t * p_buffer)
+{
+ // @note: no pointer validation check needed as allready checked by calling function.
+
+ // Verify header checksum.
+ const uint32_t expected_checksum =
+ ((p_buffer[0] + p_buffer[1] + p_buffer[2] + p_buffer[3])) & 0xFFu;
+
+ if (expected_checksum != 0)
+ {
+ return false;
+ }
+
+ const uint8_t ack_number = (p_buffer[0] >> 3u) & 0x07u;
+
+ // Verify expected acknowledgment number.
+ return ( (ack_number == expected_ack_number_get()) ||
+ (ack_number == next_expected_ack_number_get()) );
+}
+
+
+/**@brief Function for decoding a packet type field.
+ *
+ * @param[in] p_buffer Pointer to the packet data.
+ * @param[in] length Length of packet data in bytes.
+ *
+ * @return Packet type field or INVALID_PKT_TYPE in case of decode error.
+ */
+
+static uint32_t packet_type_decode(const uint8_t * p_buffer, uint32_t length)
+{
+ // @note: no pointer validation check needed as allready checked by calling function.
+ uint32_t return_value;
+
+ if (length >= PKT_HDR_SIZE)
+ {
+ return_value = (p_buffer[1] & 0x0Fu);
+ }
+ else
+ {
+ return_value = INVALID_PKT_TYPE;
+ }
+
+ return return_value;
+}
+
+#ifdef HCI_LINK_CONTROL
+/**@brief Function for decoding a link control packet.
+ *
+ * @param[in] p_buffer Pointer to the packet data.
+ * @param[in] length Length of packet data in bytes.
+ *
+ * @return Link Control Packet Type if decoding successful, HCI_LINK_CONTROL_PKT_INVALID otherwise.
+ */
+static uint16_t link_control_packet_decode(const uint8_t * p_buffer, uint32_t length)
+{
+ // @note: no pointer validation check needed as allready checked by calling function.
+ uint16_t packet_type = HCI_LINK_CONTROL_PKT_INVALID;
+
+ // Executed link control packet filtering algorithm order:
+ // - verify packet overall length
+ // - verify data integrity bit cleared
+ // - verify reliable packet bit cleared
+ // - verify header checksum
+ // - verify payload: length and value
+
+ if (length < HCI_PKT_SYNC_SIZE)
+ {
+ packet_type = HCI_LINK_CONTROL_PKT_INVALID;
+ }
+
+ packet_type = p_buffer[PKT_HDR_SIZE] | (p_buffer[PKT_HDR_SIZE + 1] << 8u);
+
+ if ((p_buffer[0] & DATA_INTEGRITY_MASK) || (p_buffer[0] & RELIABLE_PKT_MASK))
+ {
+ packet_type = HCI_LINK_CONTROL_PKT_INVALID;
+ }
+
+ const uint32_t expected_checksum =
+ ((p_buffer[0] + p_buffer[1] + p_buffer[2] + p_buffer[3])) & 0xFFu;
+
+ if (expected_checksum != 0)
+ {
+ packet_type = HCI_LINK_CONTROL_PKT_INVALID;
+ }
+
+ // This is a CONFIG or CONFIG_RSP packet
+ if ((packet_type == HCI_PKT_CONFIG) || (packet_type == HCI_PKT_CONFIG_RSP))
+ {
+ if (length != HCI_PKT_CONFIG_SIZE)
+ {
+ packet_type = HCI_LINK_CONTROL_PKT_INVALID;
+ }
+ // Verify configuration field (0x11):
+ // - Sliding Window Size == 1,
+ // - OOF Flow Control == 0,
+ // - Data Integrity Check Type == 1,
+ // - Version Number == 0
+ if (p_buffer[HCI_PKT_CONFIG_SIZE - 1] != HCI_CONFIG_FIELD)
+ {
+ packet_type = HCI_LINK_CONTROL_PKT_INVALID;
+ }
+ }
+ // This is a SYNC or SYNC_RSP packet
+ else if ((packet_type == HCI_PKT_SYNC) || (packet_type == HCI_PKT_SYNC_RSP))
+ {
+ if (length != HCI_PKT_SYNC_SIZE)
+ {
+ packet_type = HCI_LINK_CONTROL_PKT_INVALID;
+ }
+ }
+ else
+ {
+ packet_type = HCI_LINK_CONTROL_PKT_INVALID;
+ }
+
+ return packet_type;
+}
+#endif /* HCI_LINK_CONTROL */
+
+/**@brief Function for writing an acknowledgment packet for transmission.
+ */
+
+static void ack_transmit(void)
+{
+ uint32_t err_code;
+ // TX ACK packet format:
+ // - Unreliable Packet type
+ // - Payload Length set to 0
+ // - Sequence Number set to 0
+ // - Header checksum calculated
+ // - Acknowledge Number set correctly
+ m_tx_ack_packet[0] = (packet_ack_get() << 3u);
+ m_tx_ack_packet[1] = 0;
+ m_tx_ack_packet[2] = 0;
+ m_tx_ack_packet[3] = header_checksum_calculate(m_tx_ack_packet);
+
+ ser_phy_hci_pkt_params_t pkt_header;
+
+ pkt_header.p_buffer = m_tx_ack_packet;
+ pkt_header.num_of_bytes = PKT_HDR_SIZE;
+ DEBUG_EVT_SLIP_ACK_TX(0);
+ err_code = ser_phy_hci_slip_tx_pkt_send(&pkt_header, NULL, NULL);
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+
+ return;
+}
+
+
+static void ser_phy_event_callback(ser_phy_evt_t event)
+{
+ if (m_ser_phy_callback)
+ {
+ m_ser_phy_callback(event);
+ }
+
+ return;
+}
+
+
+static void memory_request_callback(uint16_t size)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_BUF_REQUEST(0);
+
+ event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
+ event.evt_params.rx_buf_request.num_of_bytes = size;
+ ser_phy_event_callback(event);
+}
+
+
+static void packet_received_callback(uint8_t * pBuffer, uint16_t size)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_RX_PKT_RECEIVED(0);
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
+ event.evt_params.rx_pkt_received.num_of_bytes = size;
+ event.evt_params.rx_pkt_received.p_buffer = pBuffer;
+ ser_phy_event_callback(event);
+}
+
+
+static void packet_dropped_callback(void)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_RX_PKT_DROPPED(0);
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
+ ser_phy_event_callback(event);
+}
+
+
+static void packet_transmitted_callback(void)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_TX_PKT_SENT(0);
+
+ event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
+ ser_phy_event_callback(event);
+}
+
+
+static void error_callback(void)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_TX_ERROR(0);
+
+ event.evt_type = SER_PHY_EVT_HW_ERROR;
+ event.evt_params.hw_error.p_buffer = m_p_tx_payload;
+ ser_phy_event_callback(event);
+}
+
+
+static void hci_slip_event_handler(ser_phy_hci_slip_evt_t * p_event)
+{
+ hci_evt_t event;
+ uint32_t packet_type;
+ uint32_t err_code;
+
+ if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT )
+ {
+ NRF_LOG_DEBUG("EVT_PKT_SENT");
+
+ DEBUG_EVT_SLIP_PACKET_TXED(0);
+ event.evt_source = HCI_SLIP_EVT;
+ event.evt.ser_phy_slip_evt.evt_type = p_event->evt_type;
+#ifndef HCI_LINK_CONTROL
+ hci_tx_event_handler(&event);
+#else
+ if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
+ {
+ hci_tx_event_handler(&event);
+ }
+#endif /*HCI_LINK_CONTROL*/
+ }
+ else if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_ACK_SENT )
+ {
+ NRF_LOG_DEBUG("EVT_ACK_SENT");
+
+ DEBUG_EVT_SLIP_ACK_TXED(0);
+ event.evt_source = HCI_SLIP_EVT;
+ event.evt.ser_phy_slip_evt.evt_type = p_event->evt_type;
+#ifndef HCI_LINK_CONTROL
+ hci_rx_event_handler(&event);
+#else
+ if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
+ {
+ hci_rx_event_handler(&event);
+ }
+#endif /* HCI_LINK_CONTROL */
+ }
+
+ else if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED )
+ {
+ event.evt_source = HCI_SLIP_EVT;
+ event.evt.ser_phy_slip_evt.evt_type = p_event->evt_type;
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer =
+ p_event->evt_params.received_pkt.p_buffer;
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes =
+ p_event->evt_params.received_pkt.num_of_bytes;
+ ser_phy_hci_assert(event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer != NULL);
+ ser_phy_hci_assert(event.evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes != 0);
+ packet_type = packet_type_decode(
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer,
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes);
+
+ NRF_LOG_DEBUG("EVT_PKT_RECEIVED 0x%X/%u", packet_type,
+ p_event->evt_params.received_pkt.num_of_bytes);
+
+ if (packet_type == PKT_TYPE_RESET)
+ {
+ NVIC_SystemReset();
+ }
+ else if (packet_type == PKT_TYPE_ACK )
+ {
+ DEBUG_EVT_SLIP_ACK_RXED(0);
+#ifndef HCI_LINK_CONTROL
+ hci_tx_event_handler(&event);
+#else
+ if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
+ {
+ hci_tx_event_handler(&event);
+ }
+ else
+ {
+ err_code = ser_phy_hci_slip_rx_buf_free(
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+ }
+#endif /* HCI_LINK_CONTROL */
+ }
+ else if ( packet_type == PKT_TYPE_VENDOR_SPECIFIC )
+ {
+ if (is_rx_pkt_valid(event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer,
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes))
+ {
+ DEBUG_EVT_SLIP_PACKET_RXED(0);
+#ifndef HCI_LINK_CONTROL
+ hci_rx_event_handler(&event);
+#else
+ if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
+ {
+ hci_rx_event_handler(&event);
+ }
+ else
+ {
+ err_code = ser_phy_hci_slip_rx_buf_free(
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+ }
+#endif /* HCI_LINK_CONTROL */
+ }
+ else
+ {
+ err_code = ser_phy_hci_slip_rx_buf_free(
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+ /* throw assert when in debug mode*/
+ DEBUG_EVT_SLIP_ERR_RXED(0);
+ }
+ }
+#ifdef HCI_LINK_CONTROL
+ else if (packet_type == PKT_TYPE_LINK_CONTROL)
+ {
+ hci_link_control_event_handler(&event);
+ }
+#endif /* HCI_LINK_CONTROL */
+ else
+ {
+ err_code = ser_phy_hci_slip_rx_buf_free(
+ event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+ /* throw assert when in debug mode*/
+ DEBUG_EVT_SLIP_ERR_RXED(0);
+ }
+ }
+ else
+ {
+ NRF_LOG_DEBUG("EVT_HW_ERROR");
+ }
+}
+
+
+static void hci_pkt_send(void)
+{
+ uint32_t err_code;
+
+ m_tx_packet_header[0] = tx_packet_byte_zero_construct();
+ uint16_t type_and_length_fields = ((m_tx_payload_length << 4u) | PKT_TYPE_VENDOR_SPECIFIC);
+ (void)uint16_encode(type_and_length_fields, &(m_tx_packet_header[1]));
+ m_tx_packet_header[3] = header_checksum_calculate(m_tx_packet_header);
+ uint16_t crc = crc16_compute(m_tx_packet_header, PKT_HDR_SIZE, NULL);
+ crc = crc16_compute(m_p_tx_payload, m_tx_payload_length, &crc);
+ (void)uint16_encode(crc, m_tx_packet_crc);
+
+ ser_phy_hci_pkt_params_t pkt_header;
+ ser_phy_hci_pkt_params_t pkt_payload;
+ ser_phy_hci_pkt_params_t pkt_crc;
+
+ pkt_header.p_buffer = m_tx_packet_header;
+ pkt_header.num_of_bytes = PKT_HDR_SIZE;
+ pkt_payload.p_buffer = m_p_tx_payload;
+ pkt_payload.num_of_bytes = m_tx_payload_length;
+ pkt_crc.p_buffer = m_tx_packet_crc;
+ pkt_crc.num_of_bytes = PKT_CRC_SIZE;
+ DEBUG_EVT_SLIP_PACKET_TX(0);
+ err_code = ser_phy_hci_slip_tx_pkt_send(&pkt_header, &pkt_payload, &pkt_crc);
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+
+ return;
+}
+
+#ifdef HCI_LINK_CONTROL
+static void hci_link_control_pkt_send(void)
+{
+ uint32_t err_code;
+ uint16_t link_control_payload_len = 0;
+
+ m_tx_link_control_header[0] = 0x00u; // SEQ, ACK, DI and RP are set to 0 for link control
+ if (m_hci_link_control_next_pkt == HCI_PKT_SYNC)
+ {
+ link_control_payload_len = HCI_PKT_SYNC_SIZE - PKT_HDR_SIZE;
+ (void)uint16_encode(HCI_PKT_SYNC, m_tx_link_control_payload);
+ }
+ else if (m_hci_link_control_next_pkt == HCI_PKT_SYNC_RSP)
+ {
+ link_control_payload_len = HCI_PKT_SYNC_SIZE - PKT_HDR_SIZE;
+ (void)uint16_encode(HCI_PKT_SYNC_RSP, m_tx_link_control_payload);
+ }
+ else if (m_hci_link_control_next_pkt == HCI_PKT_CONFIG)
+ {
+ link_control_payload_len = HCI_PKT_CONFIG_SIZE - PKT_HDR_SIZE;
+ (void)uint16_encode(HCI_PKT_CONFIG, m_tx_link_control_payload);
+ m_tx_link_control_payload[2] = HCI_CONFIG_FIELD;
+ }
+ else if (m_hci_link_control_next_pkt == HCI_PKT_CONFIG_RSP)
+ {
+ link_control_payload_len = HCI_PKT_CONFIG_SIZE - PKT_HDR_SIZE;
+ (void)uint16_encode(HCI_PKT_CONFIG_RSP, m_tx_link_control_payload);
+ m_tx_link_control_payload[2] = HCI_CONFIG_FIELD;
+ }
+ uint16_t type_and_length_fields = ((link_control_payload_len << 4u) | PKT_TYPE_LINK_CONTROL);
+ (void)uint16_encode(type_and_length_fields, &(m_tx_link_control_header[1]));
+ m_tx_link_control_header[3] = header_checksum_calculate(m_tx_link_control_header);
+
+ ser_phy_hci_pkt_params_t pkt_header;
+ ser_phy_hci_pkt_params_t pkt_payload;
+ ser_phy_hci_pkt_params_t pkt_crc;
+
+ pkt_header.p_buffer = m_tx_link_control_header;
+ pkt_header.num_of_bytes = PKT_HDR_SIZE;
+ pkt_payload.p_buffer = m_tx_link_control_payload;
+ pkt_payload.num_of_bytes = link_control_payload_len;
+ pkt_crc.p_buffer = NULL;
+ pkt_crc.num_of_bytes = 0;
+ DEBUG_EVT_SLIP_PACKET_TX(0);
+ err_code = ser_phy_hci_slip_tx_pkt_send(&pkt_header, &pkt_payload, &pkt_crc);
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+
+ return;
+}
+#endif /* HCI_LINK_CONTROL */
+
+static void hci_pkt_sent_upcall(void)
+{
+ m_packet_seq_number++; // incoming ACK is valid, increment SEQ
+ m_packet_seq_number &= 0x07u;
+ m_p_tx_payload = NULL;
+ packet_transmitted_callback();
+
+ return;
+}
+
+
+static void hci_release_ack_buffer(hci_evt_t * p_event)
+{
+ uint32_t err_code;
+
+ err_code = ser_phy_hci_slip_rx_buf_free(
+ p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+
+ return;
+}
+
+
+static void hci_process_orphaned_ack(hci_evt_t * p_event)
+{
+ hci_release_ack_buffer(p_event);
+ return;
+}
+
+/* main tx fsm */
+static void hci_tx_fsm_event_process(hci_evt_t * p_event)
+{
+
+ switch (m_hci_tx_fsm_state)
+ {
+ case HCI_TX_STATE_SEND:
+
+ if ((p_event->evt_source == HCI_SER_PHY_EVT) &&
+ (p_event->evt.ser_phy_evt.evt_type == HCI_SER_PHY_TX_REQUEST))
+ {
+ hci_pkt_send();
+ hci_timeout_setup(0);
+ m_tx_retry_count = MAX_RETRY_COUNT;
+ m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_FIRST_TX_END;
+ }
+ else if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
+ {
+ hci_process_orphaned_ack(p_event);
+ }
+
+ break;
+
+ case HCI_TX_STATE_WAIT_FOR_FIRST_TX_END:
+
+ if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT))
+ {
+ hci_timeout_setup(1);
+ m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_ACK;
+ }
+ else if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
+ {
+ hci_process_orphaned_ack(p_event);
+ }
+ break;
+
+ case HCI_TX_STATE_WAIT_FOR_ACK_OR_TX_END:
+
+ if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT))
+ {
+ hci_timeout_setup(1);
+ m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_ACK;
+ }
+ else if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
+ {
+ if (rx_ack_pkt_valid(p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer))
+ {
+ hci_timeout_setup(0);
+ m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_TX_END;
+ }
+ hci_release_ack_buffer(p_event);
+ }
+ break;
+
+ case HCI_TX_STATE_WAIT_FOR_ACK:
+
+ if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
+ {
+ if (rx_ack_pkt_valid(p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer))
+ {
+ hci_timeout_setup(0);
+ hci_pkt_sent_upcall();
+ m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
+ }
+ hci_release_ack_buffer(p_event);
+ }
+ else if (p_event->evt_source == HCI_TIMER_EVT)
+ {
+ m_tx_retry_count--;
+ // m_tx_retx_counter++; // global retransmissions counter
+ if (m_tx_retry_count)
+ {
+ hci_pkt_send();
+ DEBUG_HCI_RETX(0);
+ m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_ACK_OR_TX_END;
+ }
+ else
+ {
+ error_callback();
+ m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
+ }
+ }
+ break;
+
+ case HCI_TX_STATE_WAIT_FOR_TX_END:
+
+ if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT))
+ {
+ hci_pkt_sent_upcall();
+ m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
+ }
+ else if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
+ {
+ hci_process_orphaned_ack(p_event);
+ }
+
+ break;
+
+#ifdef HCI_LINK_CONTROL
+ case HCI_TX_STATE_DISABLE:
+ /* This case should not happen if HCI is in ACTIVE mode */
+ if (m_hci_mode == HCI_MODE_ACTIVE)
+ {
+ ser_phy_hci_assert(false);
+ }
+ break;
+#endif /* HCI_LINK_CONTROL */
+
+ default:
+ ser_phy_hci_assert(false);
+ break;
+ }
+}
+
+
+static void hci_mem_request(hci_evt_t * p_event)
+{
+ m_buffer_reqested_flag = true;
+ m_p_rx_packet = p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer;
+ m_rx_packet_length = p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes;
+ ser_phy_hci_assert(m_rx_packet_length > PKT_HDR_SIZE + PKT_CRC_SIZE);
+ memory_request_callback(m_rx_packet_length - PKT_HDR_SIZE - PKT_CRC_SIZE);
+ return;
+}
+
+
+static void hci_inc_ack()
+{
+ m_packet_ack_number++;
+ m_packet_ack_number &= 0x07u;
+}
+
+
+static void hci_rx_fsm_event_process(hci_evt_t * p_event)
+{
+ switch (m_hci_rx_fsm_state)
+ {
+ case HCI_RX_STATE_RECEIVE:
+
+ if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
+ {
+ /* type and crc and check sum are validated by slip handler */
+ uint8_t rx_seq_number = packet_seq_nmbr_extract(
+ p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+
+ if (packet_ack_get() == rx_seq_number)
+ {
+ hci_mem_request(p_event);
+ m_hci_rx_fsm_state = HCI_RX_STATE_WAIT_FOR_MEM;
+ }
+ else
+ {
+ // m_rx_drop_counter++;
+ m_hci_rx_fsm_state = HCI_RX_STATE_WAIT_FOR_SLIP_NACK_END;
+ (void) ser_phy_hci_slip_rx_buf_free(m_p_rx_packet); // and drop a packet
+ ack_transmit(); // send NACK with valid ACK
+ }
+ }
+ break;
+
+ case HCI_RX_STATE_WAIT_FOR_MEM:
+
+ if ((p_event->evt_source == HCI_SER_PHY_EVT) &&
+ (p_event->evt.ser_phy_evt.evt_type == HCI_SER_PHY_RX_BUF_GRANTED))
+ {
+ if (m_p_rx_buffer)
+ {
+ memcpy(m_p_rx_buffer,
+ m_p_rx_packet + PKT_HDR_SIZE,
+ m_rx_packet_length - PKT_HDR_SIZE - PKT_CRC_SIZE);
+ (void) ser_phy_hci_slip_rx_buf_free(m_p_rx_packet);
+ }
+ m_hci_rx_fsm_state = HCI_RX_STATE_WAIT_FOR_SLIP_ACK_END;
+ hci_inc_ack(); // SEQ was valid for good packet, we will send incremented SEQ as ACK
+ ack_transmit();
+ }
+
+ break;
+
+ case HCI_RX_STATE_WAIT_FOR_SLIP_ACK_END:
+
+ if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_ACK_SENT))
+ {
+
+ if (m_p_rx_buffer)
+ {
+ packet_received_callback(m_p_rx_buffer,
+ m_rx_packet_length - PKT_HDR_SIZE - PKT_CRC_SIZE);
+ }
+ else
+ {
+ packet_dropped_callback();
+ }
+ m_hci_rx_fsm_state = HCI_RX_STATE_RECEIVE;
+ }
+ else if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
+ {
+ (void) ser_phy_hci_slip_rx_buf_free(p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+ }
+ break;
+
+ case HCI_RX_STATE_WAIT_FOR_SLIP_NACK_END:
+ if ((p_event->evt_source == HCI_SLIP_EVT) &&
+ (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_ACK_SENT))
+ {
+ m_hci_rx_fsm_state = HCI_RX_STATE_RECEIVE;
+ }
+ else
+ {
+ (void) ser_phy_hci_slip_rx_buf_free(p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+ }
+ break;
+
+
+#ifdef HCI_LINK_CONTROL
+ case HCI_RX_STATE_DISABLE:
+ if (m_hci_mode == HCI_MODE_ACTIVE)
+ {
+ ser_phy_hci_assert(false);
+ }
+ break;
+#endif /* HCI_LINK_CONTROL */
+
+ default:
+ ser_phy_hci_assert(false);
+ break;
+ }
+}
+
+
+/* this function might be entered only via hci_tx_event_handler */
+static void hci_tx_fsm(void)
+{
+ hci_evt_t event;
+ uint32_t err_code = NRF_SUCCESS;
+
+ while (err_code == NRF_SUCCESS)
+ {
+
+ CRITICAL_REGION_ENTER();
+ err_code = nrf_queue_pop(&m_tx_evt_queue, &event);
+
+ if (err_code != NRF_SUCCESS)
+ {
+ m_tx_fsm_idle_flag = true;
+ }
+ CRITICAL_REGION_EXIT();
+
+ if (err_code == NRF_SUCCESS)
+ {
+ hci_tx_fsm_event_process(&event); /* this is the only entry to the TX_FSM */
+ }
+ }
+ return;
+}
+
+
+/* this function might be entered only via hci_rx_event_handler */
+static void hci_rx_fsm(void)
+{
+ hci_evt_t event;
+ uint32_t err_code = NRF_SUCCESS;
+
+ while (err_code == NRF_SUCCESS)
+ {
+ CRITICAL_REGION_ENTER();
+ err_code = nrf_queue_pop(&m_rx_evt_queue, &event);
+
+ if (err_code != NRF_SUCCESS)
+ {
+ m_rx_fsm_idle_flag = true;
+ }
+ CRITICAL_REGION_EXIT();
+
+ if (err_code == NRF_SUCCESS)
+ {
+ hci_rx_fsm_event_process(&event); /* this is the only entry to the RX_FSM */
+ }
+ }
+ return;
+}
+
+
+/* something might have been queued by API with disabled 'PHY-interrupts' */
+static void hci_tx_reschedule()
+{
+ bool tx_exec_flag = false;
+ uint32_t tx_queue_length;
+
+ CRITICAL_REGION_ENTER();
+ tx_queue_length = nrf_queue_utilization_get(&m_tx_evt_queue);
+
+#ifndef HCI_LINK_CONTROL
+ if (m_tx_fsm_idle_flag && m_hci_global_enable_flag && tx_queue_length)
+#else
+ if (m_tx_fsm_idle_flag && m_hci_global_enable_flag && tx_queue_length && (m_hci_mode == HCI_MODE_ACTIVE))
+#endif /* HCI_LINK_CONTROL */
+ {
+ tx_exec_flag = true; // FSM should be activated
+ m_tx_fsm_idle_flag = false; // FSM will be busy from now on till the queue is exhausted
+ }
+ CRITICAL_REGION_EXIT();
+
+ if (tx_exec_flag)
+ {
+ hci_tx_fsm();
+ }
+ return;
+}
+
+
+/* entry to TX state machine, might be called asynchronously from different contexts */
+/* Puts event into the TX event queue and execute if FSM was idle */
+static void hci_tx_event_handler(hci_evt_t * p_event)
+{
+ bool tx_exec_flag = false;
+ uint32_t err_code;
+
+ CRITICAL_REGION_ENTER();
+ err_code = nrf_queue_push(&m_tx_evt_queue, p_event);
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+
+ // CRITICAL_REGION_ENTER();
+ /* only one process can acquire tx_exec_flag */
+ if (m_tx_fsm_idle_flag && m_hci_global_enable_flag)
+ {
+ tx_exec_flag = true; // FSM should be activated
+ m_tx_fsm_idle_flag = false; // FSM will be busy from now on till the queue is exhausted
+ }
+ CRITICAL_REGION_EXIT();
+
+ if (tx_exec_flag)
+ {
+ hci_tx_fsm();
+ }
+ return;
+}
+
+
+/* Something might have been queued by API with disabled 'PHY-interrupts' */
+static void hci_rx_reschedule()
+{
+ bool rx_exec_flag = false;
+ uint32_t rx_queue_length;
+
+ CRITICAL_REGION_ENTER();
+ rx_queue_length = nrf_queue_utilization_get(&m_rx_evt_queue);
+
+#ifndef HCI_LINK_CONTROL
+ if (m_rx_fsm_idle_flag && m_hci_global_enable_flag && rx_queue_length)
+#else
+ if (m_rx_fsm_idle_flag && m_hci_global_enable_flag && rx_queue_length && (m_hci_mode == HCI_MODE_ACTIVE))
+#endif /* HCI_LINK_CONTROL */
+ {
+ rx_exec_flag = true; // FSM should be activated
+ m_rx_fsm_idle_flag = false; // FSM will be busy from now on till the queue is exhausted
+ }
+ CRITICAL_REGION_EXIT();
+
+ if (rx_exec_flag)
+ {
+ hci_rx_fsm();
+ }
+
+}
+
+
+/* Entry to RX state machine, might be called asynchronously from different contexts */
+/* Puts event into the RX event queue and execute if FSM was idle */
+static void hci_rx_event_handler(hci_evt_t * p_event)
+{
+ bool rx_exec_flag = false;
+ uint32_t err_code;
+
+ CRITICAL_REGION_ENTER();
+ err_code = nrf_queue_push(&m_rx_evt_queue, p_event);
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+
+ /* only one process can acquire rx_exec_flag */
+ // CRITICAL_REGION_ENTER();
+ if (m_rx_fsm_idle_flag && m_hci_global_enable_flag)
+ {
+ rx_exec_flag = true; // FSM should be activated
+ m_rx_fsm_idle_flag = false; // FSM will be busy from now on till the queue is exhausted
+ }
+ CRITICAL_REGION_EXIT();
+
+ if (rx_exec_flag)
+ {
+ hci_rx_fsm();
+ }
+
+ return;
+}
+
+#ifdef HCI_LINK_CONTROL
+/* Link control event handler - used only for Link Control packets */
+/* This handler will be called only in 2 cases:
+ - when SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED event is received
+ - when HCI_TIMER_EVT event is reveived */
+static void hci_link_control_event_handler(hci_evt_t * p_event)
+{
+ uint16_t pkt_type = HCI_LINK_CONTROL_PKT_INVALID;
+
+ switch (p_event->evt_source)
+ {
+ case HCI_SLIP_EVT:
+ pkt_type = link_control_packet_decode(
+ p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer,
+ p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes);
+ /* Perform HCI mode transition if needed */
+ CRITICAL_REGION_ENTER();
+ switch (pkt_type)
+ {
+ case HCI_PKT_SYNC:
+ m_hci_link_control_next_pkt = HCI_PKT_SYNC_RSP;
+ /* Restart HCI communication if it was in ACTIVE mode */
+ if (m_hci_mode == HCI_MODE_ACTIVE)
+ {
+ m_hci_mode = HCI_MODE_UNINITIALIZED;
+ m_packet_ack_number = INITIAL_ACK_NUMBER_EXPECTED;
+ m_packet_seq_number = INITIAL_SEQ_NUMBER;
+ m_hci_tx_fsm_state = HCI_TX_STATE_DISABLE;
+ m_hci_rx_fsm_state = HCI_RX_STATE_DISABLE;
+ m_hci_other_side_active = false;
+ }
+ hci_link_control_pkt_send();
+ hci_timeout_setup(HCI_LINK_CONTROL_TIMEOUT); // Need to trigger transmitting SYNC messages
+ break;
+ case HCI_PKT_SYNC_RSP:
+ if (m_hci_mode == HCI_MODE_UNINITIALIZED)
+ {
+ m_hci_mode = HCI_MODE_INITIALIZED;
+ m_hci_link_control_next_pkt = HCI_PKT_CONFIG;
+ }
+ break;
+ case HCI_PKT_CONFIG:
+ if (m_hci_mode != HCI_MODE_UNINITIALIZED)
+ {
+ m_hci_link_control_next_pkt = HCI_PKT_CONFIG_RSP;
+ hci_link_control_pkt_send();
+ m_hci_other_side_active = true;
+ }
+ break;
+ case HCI_PKT_CONFIG_RSP:
+ if (m_hci_mode == HCI_MODE_INITIALIZED)
+ {
+ m_hci_mode = HCI_MODE_ACTIVE;
+ m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
+ m_hci_rx_fsm_state = HCI_RX_STATE_RECEIVE;
+ }
+ break;
+ }
+ CRITICAL_REGION_EXIT();
+ (void) ser_phy_hci_slip_rx_buf_free(
+ p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
+ /* Kick the state machine so it can start process BLE packets */
+ if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
+ {
+ hci_tx_reschedule();
+ hci_rx_reschedule();
+ }
+ break;
+
+ case HCI_TIMER_EVT:
+ /* Send one of the Link Control packets if in Unintialized or Initialized state */
+ CRITICAL_REGION_ENTER();
+ switch (m_hci_mode)
+ {
+ case HCI_MODE_UNINITIALIZED:
+ //send packet
+ m_hci_link_control_next_pkt = HCI_PKT_SYNC;
+ hci_link_control_pkt_send();
+ hci_timeout_setup(HCI_LINK_CONTROL_TIMEOUT);
+ break;
+ case HCI_MODE_INITIALIZED:
+ m_hci_link_control_next_pkt = HCI_PKT_CONFIG;
+ hci_link_control_pkt_send();
+ hci_timeout_setup(HCI_LINK_CONTROL_TIMEOUT);
+ break;
+ case HCI_MODE_ACTIVE:
+ case HCI_MODE_DISABLE:
+ default:
+ // No implementation needed
+ break;
+ }
+ CRITICAL_REGION_EXIT();
+ break;
+ case HCI_SER_PHY_EVT:
+ default:
+ // No implementation needed
+ break;
+ }
+}
+#endif /* HCI_LINK_CONTROL */
+
+/* ser_phy API function */
+void ser_phy_interrupts_enable(void)
+{
+ bool pending_timer_callback_flag = false;
+
+ CRITICAL_REGION_ENTER();
+ m_hci_timer_enabled_flag = true;
+
+ if (m_hci_timout_pending_flag)
+ {
+ m_hci_timout_pending_flag = false;
+ pending_timer_callback_flag = true;
+ }
+ CRITICAL_REGION_EXIT();
+ // this is a workaround - scheduled SER_PHY EVENTS
+ m_hci_global_enable_flag = true;
+ hci_tx_reschedule();
+ hci_rx_reschedule();
+
+ if (pending_timer_callback_flag)
+ {
+ hci_signal_timeout_event();
+ }
+
+ return;
+}
+
+
+/* ser_phy API function */
+void ser_phy_interrupts_disable(void)
+{
+ CRITICAL_REGION_ENTER();
+ m_hci_timer_enabled_flag = false;
+ // transport calls PHY API with ser_phy_interrupts_disabled
+ m_hci_global_enable_flag = false;
+ CRITICAL_REGION_EXIT();
+}
+
+
+/* ser_phy API function */
+uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
+{
+ uint32_t status = NRF_SUCCESS;
+ hci_evt_t event;
+
+ if (m_buffer_reqested_flag)
+ {
+ m_buffer_reqested_flag = false;
+ m_p_rx_buffer = p_buffer;
+ event.evt_source = HCI_SER_PHY_EVT;
+ event.evt.ser_phy_evt.evt_type = HCI_SER_PHY_RX_BUF_GRANTED;
+ hci_rx_event_handler(&event);
+ }
+ else
+ {
+ status = NRF_ERROR_BUSY;
+ }
+ return status;
+}
+
+
+/* ser_phy API function */
+uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
+{
+ uint32_t status = NRF_SUCCESS;
+ hci_evt_t event;
+
+ if ( p_buffer == NULL || num_of_bytes == 0)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ if ( m_p_tx_payload == NULL)
+ {
+ m_tx_payload_length = num_of_bytes;
+ m_p_tx_payload = (uint8_t *)p_buffer;
+ DEBUG_EVT_TX_REQ(0);
+ event.evt_source = HCI_SER_PHY_EVT;
+ event.evt.ser_phy_evt.evt_type = HCI_SER_PHY_TX_REQUEST;
+ hci_tx_event_handler(&event);
+ }
+ else
+ {
+ status = NRF_ERROR_BUSY;
+ }
+
+ return status;
+}
+
+
+static uint32_t hci_timer_init(void)
+{
+ uint32_t err_code = NRF_SUCCESS;
+
+#ifdef HCI_APP_TIMER
+
+ err_code = app_timer_create(&m_app_timer_id, APP_TIMER_MODE_REPEATED, hci_timeout_handler);
+
+ if (err_code != NRF_SUCCESS)
+ {
+ return NRF_ERROR_INTERNAL;
+ }
+
+ err_code = app_timer_start(m_app_timer_id, RETRANSMISSION_TIMEOUT_IN_TICKS, NULL);
+
+ if (err_code != NRF_SUCCESS)
+ {
+ return NRF_ERROR_INTERNAL;
+ }
+
+#else
+
+ // Configure TIMER for compare[1] event
+ HCI_TIMER->PRESCALER = 9;
+ HCI_TIMER->MODE = TIMER_MODE_MODE_Timer;
+ HCI_TIMER->BITMODE = TIMER_BITMODE_BITMODE_16Bit;
+
+ // Clear TIMER
+ HCI_TIMER->TASKS_CLEAR = 1;
+
+ // Enable interrupt
+ HCI_TIMER->INTENCLR = 0xFFFFFFFF;
+ HCI_TIMER->INTENSET = TIMER_INTENSET_COMPARE1_Enabled << TIMER_INTENSET_COMPARE1_Pos;
+
+ NVIC_ClearPendingIRQ(HCI_TIMER_IRQn);
+ NVIC_SetPriority(HCI_TIMER_IRQn, APP_IRQ_PRIORITY_HIGH);
+ NVIC_EnableIRQ(HCI_TIMER_IRQn);
+
+#endif
+
+ return err_code;
+
+}
+
+
+/* ser_phy API function */
+uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
+{
+ uint32_t err_code;
+
+ if ((m_hci_tx_fsm_state != HCI_TX_STATE_DISABLE) || (m_hci_rx_fsm_state != HCI_RX_STATE_DISABLE))
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ err_code = hci_timer_init();
+
+ if (err_code != NRF_SUCCESS)
+ {
+ return NRF_ERROR_INTERNAL;
+ }
+
+ nrf_queue_reset(&m_tx_evt_queue);
+ nrf_queue_reset(&m_rx_evt_queue);
+
+ err_code = ser_phy_hci_slip_open(hci_slip_event_handler);
+
+ if (err_code != NRF_SUCCESS)
+ {
+ return err_code;
+ }
+
+ if (err_code == NRF_SUCCESS)
+ {
+ m_packet_ack_number = INITIAL_ACK_NUMBER_EXPECTED;
+ m_packet_seq_number = INITIAL_SEQ_NUMBER;
+ m_ser_phy_callback = events_handler;
+
+#ifndef HCI_LINK_CONTROL
+ m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
+ m_hci_rx_fsm_state = HCI_RX_STATE_RECEIVE;
+#else
+ hci_timeout_setup(HCI_LINK_CONTROL_TIMEOUT);// Trigger sending SYNC messages
+ m_hci_mode = HCI_MODE_UNINITIALIZED;
+ m_hci_other_side_active = false;
+#endif /*HCI_LINK_CONTROL*/
+ }
+ return err_code;
+}
+
+static uint32_t hci_timer_close(void)
+{
+ uint32_t err_code = NRF_SUCCESS;
+
+#ifdef HCI_APP_TIMER
+ err_code = app_timer_stop(m_app_timer_id);
+
+ if (err_code != NRF_SUCCESS)
+ {
+ return NRF_ERROR_INTERNAL;
+ }
+#endif
+
+ return err_code;
+}
+
+/* ser_phy API function */
+void ser_phy_close(void)
+{
+ m_ser_phy_callback = NULL;
+ ser_phy_hci_slip_close();
+ m_hci_tx_fsm_state = HCI_TX_STATE_DISABLE;
+ m_hci_rx_fsm_state = HCI_RX_STATE_DISABLE;
+
+#ifdef HCI_LINK_CONTROL
+ m_hci_mode = HCI_MODE_DISABLE;
+#endif /* HCI_LINK_CONTROL */
+
+ uint32_t err_code = hci_timer_close();
+ ser_phy_hci_assert(err_code == NRF_SUCCESS);
+}
+
+
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci.h b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci.h
new file mode 100644
index 0000000..c2993fd
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci.h
@@ -0,0 +1,183 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/** @file
+ *
+ * @defgroup ser_phy_hci HCI Serialization PHY
+ * @{
+ * @ingroup ble_sdk_lib_serialization
+ *
+ * @brief HCI PHY layer for serialization.
+ *
+ * @details This file contains declarations of functions and definitions of data structures and
+ * identifiers (typedef enum) used as API of the serialization HCI PHY layer.
+ *
+ *
+ */
+
+#ifndef SER_PHY_HCI_H__
+#define SER_PHY_HCI_H__
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**@brief Serialization PHY HCI module events types. */
+typedef enum
+{
+ SER_PHY_HCI_SLIP_EVT_PKT_SENT = 0, /**< An event indicating that packet has been transmitted. */
+ SER_PHY_HCI_SLIP_EVT_ACK_SENT, /**< An event indicating that ack packet has been transmitted. */
+ SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED, /**< An event indicating that packet has been received. */
+ SER_PHY_HCI_SLIP_EVT_HW_ERROR, /**< An event indicating a hardware error in PHY HCI module. */
+ SER_PHY_HCI_SLIP_EVT_TYPE_MAX /**< Enumeration upper bound. */
+} ser_phy_hci_slip_evt_type_t;
+
+/**@brief Struct representing a PHY HCI packet. */
+typedef struct
+{
+ uint8_t * p_buffer; /**< Pointer to a buffer containing a packet. */
+ uint16_t num_of_bytes; /**< Length of a packet in octets. */
+} ser_phy_hci_pkt_params_t;
+
+
+/**@brief Struct containing parameters of event of type @ref SER_PHY_HCI_SLIP_EVT_HW_ERROR. */
+typedef struct
+{
+ uint32_t error_code; /**< Hardware error code - specific for a microcontroller. */
+} ser_phy_hci_evt_hw_error_params_t;
+
+
+/**@brief Struct containing events from the Serialization PHY module.
+ *
+ * @note Some events do not have parameters, then the whole information is contained in the evt_type.
+ */
+typedef struct
+{
+ ser_phy_hci_slip_evt_type_t evt_type; /**< Type of an event. */
+ union /**< Union alternative identified by evt_type in the enclosing struct. */
+ {
+ /** Parameters of event of type @ref SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED. */
+ ser_phy_hci_pkt_params_t received_pkt;
+ /** Parameters of event of type @ref SER_PHY_HCI_SLIP_EVT_HW_ERROR. */
+ ser_phy_hci_evt_hw_error_params_t hw_error;
+ } evt_params;
+} ser_phy_hci_slip_evt_t;
+
+
+/**@brief Type of generic callback function handler to be used by all PHY HCI events.
+ *
+ * @param[in] event Serialization PHY HCI module event.
+ */
+typedef void (*ser_phy_hci_slip_event_handler_t)(ser_phy_hci_slip_evt_t *p_event);
+
+/**@brief Function for opening and initializing a HCI SLIP PHY module.
+ *
+ * @note The function initializes hardware and internal module states and registers callback
+ * function to be used by all PHY HCI module events.
+ *
+ * @warning If the function has been already called, the function @ref ser_phy_hci_slip_close has to be
+ * called before ser_phy_hci_slip_open can be called again.
+ *
+ * @param[in] events_handler Generic callback function handler to be used by all PHY HCI module
+ * events.
+ *
+ * @retval NRF_SUCCESS Operation success.
+ * @retval NRF_ERROR_INVALID_STATE Operation failure. The function has been already called.
+ * To call it again, the function @ref ser_phy_hci_slip_close has to
+ * be called first.
+ * @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied.
+ * @retval NRF_ERROR_INVALID_PARAM Operation failure. Hardware initialization parameters are not
+ * supported.
+ */
+uint32_t ser_phy_hci_slip_open(ser_phy_hci_slip_event_handler_t events_handler);
+
+
+/**@brief A function for transmitting a HCI SLIP packet.
+ *
+ * @note The function adds a packet pointed by p_buffer parameter to a transmission queue and
+ * schedules generation of an event of type @ref SER_PHY_HCI_SLIP_EVT_PKT_SENT upon transmission
+ * completion.
+ *
+ * @param[in] p_header Pointer to ser_phy_hci_pkt_params_t structure representing packet header.
+ * @param[in] p_payload Pointer to ser_phy_hci_pkt_params_t structure representing packet payload.
+ * @param[in] p_crc Pointer to ser_phy_hci_pkt_params_t structure representing packet crc.
+ *
+ * @retval NRF_SUCCESS Operation success. Packet was added to the transmission queue
+ * and event will be sent upon transmission completion.
+ * @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied in p_header parameter.
+ * NULL pointer is allowed for p_payload and p_crc parameters.
+ * @retval NRF_ERROR_INVALID_PARAM Operation failure. Number of bytes to be sent equals 0.
+ * @retval NRF_ERROR_BUSY Operation failure. Transmitting of a packet in progress.
+ */
+uint32_t ser_phy_hci_slip_tx_pkt_send(const ser_phy_hci_pkt_params_t * p_header,
+ const ser_phy_hci_pkt_params_t * p_payload,
+ const ser_phy_hci_pkt_params_t * p_crc);
+
+
+/**@brief A function for freeing an RX buffer.
+ *
+ * @note The function has to be called as a response to event @ref SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED
+ * when an RX packet has been processed. The function frees the RX buffer and therefore enables
+ * reception of next incoming data.
+
+ * @param[in] p_buffer Pointer to an RX buffer which must be freed.
+ *
+ * @retval NRF_SUCCESS Operation success.
+ * @retval NRF_ERROR_NULL Operation failure. NULL pointer supplied.
+ * @retval NRF_ERROR_INVALID_STATE Operation failure. A buffer was already free.
+ */
+uint32_t ser_phy_hci_slip_rx_buf_free(uint8_t * p_buffer);
+
+
+/**@brief A function for closing a PHY HCI module.
+ *
+ * @note The function disables hardware, resets internal module states, and unregisters the events
+ * callback function.
+ */
+void ser_phy_hci_slip_close(void);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SER_PHY_HCI_H__ */
+/** @} */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci_slip.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci_slip.c
new file mode 100644
index 0000000..4139bae
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci_slip.c
@@ -0,0 +1,689 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+#include "ser_phy_hci.h"
+#include "ser_config.h"
+#ifdef SER_CONNECTIVITY
+#include "ser_phy_config_conn.h"
+#else
+#include "ser_phy_config_app.h"
+#endif
+#include "nrf_drv_uart.h"
+#include "app_error.h"
+#include "app_util_platform.h"
+
+#define APP_SLIP_END 0xC0 /**< SLIP code for identifying the beginning and end of a packet frame.. */
+#define APP_SLIP_ESC 0xDB /**< SLIP escape code. This code is used to specify that the following character is specially encoded. */
+#define APP_SLIP_ESC_END 0xDC /**< SLIP special code. When this code follows 0xDB, this character is interpreted as payload data 0xC0.. */
+#define APP_SLIP_ESC_ESC 0xDD /**< SLIP special code. When this code follows 0xDB, this character is interpreted as payload data 0xDB. */
+
+#define HDR_SIZE 4
+#define CRC_SIZE 2
+#define PKT_SIZE (SER_HAL_TRANSPORT_MAX_PKT_SIZE + HDR_SIZE + CRC_SIZE)
+
+static const nrf_drv_uart_t m_uart = NRF_DRV_UART_INSTANCE(0);
+static const nrf_drv_uart_config_t m_uart_config = {
+ .pseltxd = SER_PHY_UART_TX,
+ .pselrxd = SER_PHY_UART_RX,
+ .pselrts = SER_PHY_UART_RTS,
+ .pselcts = SER_PHY_UART_CTS,
+ .p_context = NULL,
+ .interrupt_priority = UART_IRQ_PRIORITY,
+#if defined(UARTE_PRESENT) && defined(UART_PRESENT)
+ .use_easy_dma = true,
+#endif
+ // These values are common for application and connectivity, they are
+ // defined in "ser_config.h".
+ .hwfc = SER_PHY_UART_FLOW_CTRL,
+ .parity = SER_PHY_UART_PARITY,
+ .baudrate = (nrf_uart_baudrate_t)SER_PHY_UART_BAUDRATE
+};
+
+typedef struct {
+ ser_phy_hci_pkt_params_t header;
+ ser_phy_hci_pkt_params_t payload;
+ ser_phy_hci_pkt_params_t crc;
+} ser_phy_hci_slip_pkt_t;
+static ser_phy_hci_slip_pkt_t m_tx_curr_packet;
+static ser_phy_hci_slip_pkt_t m_tx_next_packet;
+
+static ser_phy_hci_slip_evt_t m_ser_phy_hci_slip_event;
+static ser_phy_hci_slip_event_handler_t m_ser_phy_hci_slip_event_handler; /**< Event handler for upper layer */
+
+static uint8_t m_tx_buf0[SER_PHY_HCI_SLIP_TX_BUF_SIZE];
+static uint8_t m_tx_buf1[SER_PHY_HCI_SLIP_TX_BUF_SIZE];
+static uint8_t * mp_tx_buf;
+static uint8_t m_tx_bytes;
+static enum {
+ PHASE_BEGIN,
+ PHASE_HEADER,
+ PHASE_PAYLOAD,
+ PHASE_CRC,
+ PHASE_ACK_END,
+ // The following three elements have to have consecutive values,
+ // 'tx_buf_fill()' relies on this.
+ PHASE_PACKET_END,
+ PHASE_PRE_IDLE = PHASE_PACKET_END + 1,
+ PHASE_IDLE = PHASE_PRE_IDLE + 1
+} volatile m_tx_phase;
+static bool volatile m_tx_in_progress;
+static bool volatile m_tx_pending;
+
+#define NO_EVENT SER_PHY_HCI_SLIP_EVT_TYPE_MAX
+static ser_phy_hci_slip_evt_type_t m_tx_evt_type;
+static ser_phy_hci_slip_evt_type_t m_tx_pending_evt_type;
+
+static uint8_t m_small_buffer[HDR_SIZE];
+static uint8_t m_big_buffer[PKT_SIZE];
+
+static uint8_t * mp_small_buffer = NULL;
+static uint8_t * mp_big_buffer = NULL;
+static uint8_t * mp_buffer = NULL;
+
+static uint8_t m_rx_buf[1];
+static bool m_rx_escape;
+
+
+// The function returns false to signal that no more bytes can be passed to be
+// sent (put into the TX buffer) until UART transmission is done.
+static bool tx_buf_put(uint8_t data_byte)
+{
+ ASSERT(m_tx_bytes < SER_PHY_HCI_SLIP_TX_BUF_SIZE);
+
+ mp_tx_buf[m_tx_bytes] = data_byte;
+ ++m_tx_bytes;
+
+ bool flush = false;
+ ser_phy_hci_slip_evt_type_t slip_evt_type = NO_EVENT;
+ if (m_tx_phase == PHASE_ACK_END)
+ {
+ // Send buffer, then signal that an acknowledge packet has been sent.
+ flush = true;
+ slip_evt_type = SER_PHY_HCI_SLIP_EVT_ACK_SENT;
+ }
+ else if (m_tx_phase == PHASE_PACKET_END)
+ {
+ // Send buffer, then signal that a packet with payload has been sent.
+ flush = true;
+ slip_evt_type = SER_PHY_HCI_SLIP_EVT_PKT_SENT;
+ }
+ else if (m_tx_bytes >= SER_PHY_HCI_SLIP_TX_BUF_SIZE)
+ {
+ // Send buffer (because it is filled up), but don't signal anything,
+ // since the packet sending is not complete yet.
+ flush = true;
+ }
+
+ if (flush)
+ {
+ // If some TX transfer is being done at the moment, a new one cannot be
+ // started, it must be scheduled to be performed later.
+ if (m_tx_in_progress)
+ {
+ m_tx_pending_evt_type = slip_evt_type;
+ m_tx_pending = true;
+ // No more buffers available, can't continue filling.
+ return false;
+ }
+
+ m_tx_in_progress = true;
+ m_tx_evt_type = slip_evt_type;
+ APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buf, m_tx_bytes));
+
+ // Switch to the second buffer.
+ mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0;
+ m_tx_bytes = 0;
+ }
+
+ return true;
+}
+
+static void tx_buf_fill(void)
+{
+ static ser_phy_hci_pkt_params_t * mp_tx_data = NULL;
+ static uint32_t m_tx_index;
+ bool can_continue = true;
+
+ do {
+ static uint8_t tx_escaped_data = 0;
+
+ if (tx_escaped_data != 0)
+ {
+ can_continue = tx_buf_put(tx_escaped_data);
+ tx_escaped_data = 0;
+ }
+ else switch (m_tx_phase)
+ {
+ case PHASE_BEGIN:
+ can_continue = tx_buf_put(APP_SLIP_END);
+ mp_tx_data = &m_tx_curr_packet.header;
+ m_tx_index = 0;
+ m_tx_phase = PHASE_HEADER;
+ tx_escaped_data = 0;
+ break;
+
+ case PHASE_ACK_END:
+ case PHASE_PACKET_END:
+ can_continue = tx_buf_put(APP_SLIP_END);
+
+ // [this is needed for the '++m_tx_phase;' below]
+ m_tx_phase = PHASE_PACKET_END;
+ // no break, intentional fall-through
+
+ case PHASE_PRE_IDLE:
+ // In PHASE_PRE_IDLE the sending process is almost finished, only
+ // the NRF_DRV_UART_EVT_TX_DONE event is needed before it can switch
+ // to PHASE_IDLE. But during this waiting a new packet may appear
+ // (i.e. 'ser_phy_hci_slip_tx_pkt_send()' may be called), hence
+ // the following pointer must be checked before switching the phase,
+ // just like right after writing whole packet to buffer (i.e. in
+ // PHASE_PACKET_END). Therefore, the following code is common for
+ // these two cases.
+ if (m_tx_next_packet.header.p_buffer != NULL)
+ {
+ m_tx_curr_packet = m_tx_next_packet;
+ m_tx_next_packet.header.p_buffer = NULL;
+
+ m_tx_phase = PHASE_BEGIN;
+ break;
+ }
+ // Go to the next phase:
+ // PHASE_PACKET_END -> PHASE_PRE_IDLE
+ // PHASE_PRE_IDLE -> PHASE_IDLE
+ ++m_tx_phase;
+ return;
+
+ default:
+ ASSERT(mp_tx_data->p_buffer != NULL);
+ uint8_t data = mp_tx_data->p_buffer[m_tx_index];
+ ++m_tx_index;
+
+ if (data == APP_SLIP_END)
+ {
+ data = APP_SLIP_ESC;
+ tx_escaped_data = APP_SLIP_ESC_END;
+ }
+ else if (data == APP_SLIP_ESC)
+ {
+ tx_escaped_data = APP_SLIP_ESC_ESC;
+ }
+ can_continue = tx_buf_put(data);
+
+ if (m_tx_index >= mp_tx_data->num_of_bytes)
+ {
+ mp_tx_data->p_buffer = NULL;
+
+ if (m_tx_phase == PHASE_HEADER)
+ {
+ if (m_tx_curr_packet.payload.p_buffer == NULL)
+ {
+ // No payload -> ACK packet.
+ m_tx_phase = PHASE_ACK_END;
+ }
+ else
+ {
+ mp_tx_data = &m_tx_curr_packet.payload;
+ m_tx_index = 0;
+ m_tx_phase = PHASE_PAYLOAD;
+ }
+ }
+ else if (m_tx_phase == PHASE_PAYLOAD)
+ {
+ if (m_tx_curr_packet.crc.p_buffer == NULL)
+ {
+ // Packet without CRC.
+ m_tx_phase = PHASE_PACKET_END;
+ }
+ else
+ {
+ mp_tx_data = &m_tx_curr_packet.crc;
+ m_tx_index = 0;
+ m_tx_phase = PHASE_CRC;
+ }
+ }
+ else
+ {
+ ASSERT(m_tx_phase == PHASE_CRC);
+ m_tx_phase = PHASE_PACKET_END;
+ }
+ }
+ break;
+ }
+ } while (can_continue);
+}
+
+uint32_t ser_phy_hci_slip_tx_pkt_send(const ser_phy_hci_pkt_params_t * p_header,
+ const ser_phy_hci_pkt_params_t * p_payload,
+ const ser_phy_hci_pkt_params_t * p_crc)
+{
+ if (p_header == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ CRITICAL_REGION_ENTER();
+
+ // If some packet is already transmitted, schedule this new one to be sent
+ // as next. A critical region is needed here to ensure that the transmission
+ // won't finish before the following assignments are done.
+ if (m_tx_phase != PHASE_IDLE)
+ {
+ m_tx_next_packet.header = *p_header;
+
+ if (p_payload == NULL)
+ {
+ m_tx_next_packet.payload.p_buffer = NULL;
+ }
+ else
+ {
+ m_tx_next_packet.payload = *p_payload;
+ }
+
+ if (p_crc == NULL)
+ {
+ m_tx_next_packet.crc.p_buffer = NULL;
+ }
+ else
+ {
+ m_tx_next_packet.crc = *p_crc;
+ }
+ }
+ else
+ {
+ m_tx_curr_packet.header = *p_header;
+
+ if (p_payload == NULL)
+ {
+ m_tx_curr_packet.payload.p_buffer = NULL;
+ }
+ else
+ {
+ m_tx_curr_packet.payload = *p_payload;
+ }
+
+ if (p_crc == NULL)
+ {
+ m_tx_curr_packet.crc.p_buffer = NULL;
+ }
+ else
+ {
+ m_tx_curr_packet.crc = *p_crc;
+ }
+
+ m_tx_phase = PHASE_BEGIN;
+ tx_buf_fill();
+ }
+
+ CRITICAL_REGION_EXIT();
+
+ return NRF_SUCCESS;
+}
+
+/* Function returns false when last byte in packet is detected.*/
+static bool slip_decode(uint8_t * p_received_byte)
+{
+ switch (*p_received_byte)
+ {
+ case APP_SLIP_END:
+ return false;
+
+ case APP_SLIP_ESC:
+ m_rx_escape = true;
+ break;
+
+ case APP_SLIP_ESC_END:
+
+ if (m_rx_escape == true)
+ {
+ m_rx_escape = false;
+ *p_received_byte = APP_SLIP_END;
+ }
+ break;
+
+ case APP_SLIP_ESC_ESC:
+
+ if (m_rx_escape == true)
+ {
+ m_rx_escape = false;
+ *p_received_byte = APP_SLIP_ESC;
+ }
+ break;
+
+ /* Normal character - decoding not needed*/
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+static void ser_phi_hci_rx_byte(uint8_t rx_byte)
+{
+ static bool rx_sync = false;
+ uint8_t received_byte = rx_byte;
+ static bool big_buff_in_use = false;
+ static uint32_t m_rx_index;
+ /* Test received byte for SLIP packet start: 0xC0*/
+ if (!rx_sync)
+ {
+ if (received_byte == APP_SLIP_END)
+ {
+ m_rx_index = 0;
+ rx_sync = true;
+ }
+ return;
+ }
+
+ /* Additional check needed in case rx_sync flag was set by end of previous packet*/
+ if ((m_rx_index) == 0 && (received_byte == APP_SLIP_END))
+ {
+ return;
+ }
+
+ /* Check if small (ACK) buffer is available*/
+ if ((mp_small_buffer != NULL) && (big_buff_in_use == false))
+ {
+ if (m_rx_index == 0)
+ {
+ mp_buffer = mp_small_buffer;
+ }
+
+ /* Check if switch between small and big buffer is needed*/
+ if (m_rx_index == sizeof (m_small_buffer) && received_byte != APP_SLIP_END)
+ {
+ /* Check if big (PKT) buffer is available*/
+ if (mp_big_buffer != NULL)
+ {
+ /* Switch to big buffer*/
+ memcpy(m_big_buffer, m_small_buffer, sizeof (m_small_buffer));
+ mp_buffer = m_big_buffer;
+ }
+ else
+ {
+ /* Small buffer is too small and big buffer not available - cannot continue reception*/
+ rx_sync = false;
+ return;
+ }
+ }
+
+ /* Check if big buffer is full */
+ if ((m_rx_index >= PKT_SIZE) && (received_byte != APP_SLIP_END))
+ {
+ /* Do not notify upper layer - the packet is too big and cannot be handled by slip */
+ rx_sync = false;
+ return;
+ }
+
+ /* Decode byte. Will return false when it is 0xC0 - end of packet*/
+ if (slip_decode(&received_byte))
+ {
+ /* Write Rx byte only if it is not escape char */
+ if (!m_rx_escape)
+ {
+ mp_buffer[m_rx_index++] = received_byte;
+ }
+ }
+ else
+ {
+ /* Reset pointers to signalise buffers are locked waiting for upper layer */
+ if (mp_buffer == mp_small_buffer)
+ {
+ mp_small_buffer = NULL;
+ }
+ else
+ {
+ mp_big_buffer = NULL;
+ }
+ /* Report packet reception end*/
+ m_ser_phy_hci_slip_event.evt_type =
+ SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED;
+ m_ser_phy_hci_slip_event.evt_params.received_pkt.p_buffer = mp_buffer;
+ m_ser_phy_hci_slip_event.evt_params.received_pkt.num_of_bytes = m_rx_index;
+ m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
+
+ rx_sync = false;
+ }
+ }
+ else if (mp_big_buffer != NULL)
+ {
+ big_buff_in_use = true;
+ mp_buffer = mp_big_buffer;
+
+ /* Check if big buffer is full */
+ if ((m_rx_index >= PKT_SIZE) && (received_byte != APP_SLIP_END))
+ {
+ /* Do not notify upper layer - the packet is too big and cannot be handled by slip */
+ rx_sync = false;
+ return;
+ }
+
+ /* Decode byte*/
+ if (slip_decode(&received_byte))
+ {
+ /* Write Rx byte only if it is not escape char */
+ if (!m_rx_escape)
+ {
+ mp_buffer[m_rx_index++] = received_byte;
+ }
+ }
+ else
+ {
+ // Mark the big buffer as locked (it should be freed by the upper
+ // layer).
+ mp_big_buffer = NULL;
+ big_buff_in_use = false;
+
+ /* Report packet reception end*/
+ m_ser_phy_hci_slip_event.evt_type =
+ SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED;
+ m_ser_phy_hci_slip_event.evt_params.received_pkt.p_buffer = mp_buffer;
+ m_ser_phy_hci_slip_event.evt_params.received_pkt.num_of_bytes = m_rx_index;
+ m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
+
+ rx_sync = false;
+ }
+ }
+ else
+ {
+ /* Both buffers are not available - cannot continue reception*/
+ rx_sync = false;
+ return;
+ }
+}
+
+
+uint32_t ser_phy_hci_slip_rx_buf_free(uint8_t * p_buffer)
+{
+ uint32_t err_code = NRF_SUCCESS;
+
+ if (p_buffer == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+ else if (p_buffer == m_small_buffer)
+ {
+ /* Free small buffer*/
+ if (mp_small_buffer == NULL)
+ {
+ mp_small_buffer = m_small_buffer;
+ }
+ else
+ {
+ err_code = NRF_ERROR_INVALID_STATE;
+ }
+ }
+ else if (p_buffer == m_big_buffer)
+ {
+ /* Free big buffer*/
+ if (mp_big_buffer == NULL)
+ {
+ mp_big_buffer = m_big_buffer;
+ }
+ else
+ {
+ err_code = NRF_ERROR_INVALID_STATE;
+ }
+ }
+
+ return err_code;
+}
+
+
+static void uart_event_handler(nrf_drv_uart_event_t * p_event,
+ void * p_context)
+{
+ (void)p_context;
+
+ switch (p_event->type)
+ {
+ case NRF_DRV_UART_EVT_ERROR:
+ // Process the error only if this is a parity or overrun error.
+ // Break and framing errors will always occur before the other
+ // side becomes active.
+ if (p_event->data.error.error_mask &
+ (NRF_UART_ERROR_PARITY_MASK | NRF_UART_ERROR_OVERRUN_MASK))
+ {
+ // Pass error source to upper layer
+ m_ser_phy_hci_slip_event.evt_type =
+ SER_PHY_HCI_SLIP_EVT_HW_ERROR;
+ m_ser_phy_hci_slip_event.evt_params.hw_error.error_code =
+ p_event->data.error.error_mask;
+ m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
+ }
+ APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_buf, 1));
+ break;
+
+ case NRF_DRV_UART_EVT_TX_DONE:
+ // If there is a pending transfer (the second buffer is ready to
+ // be sent), start it immediately.
+ if (m_tx_pending)
+ {
+ APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buf, m_tx_bytes));
+
+ // Switch to the buffer that has just been sent completely
+ // and now can be filled again.
+ mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0;
+ m_tx_bytes = 0;
+
+ m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type;
+ m_tx_evt_type = m_tx_pending_evt_type;
+
+ m_tx_pending = false;
+ }
+ else
+ {
+ m_tx_in_progress = false;
+ m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type;
+ }
+ // If needed, notify the upper layer that the packet transfer is
+ // complete (note that this notification may result in another
+ // packet send request, so everything must be cleaned up above).
+ if (m_ser_phy_hci_slip_event.evt_type != NO_EVENT)
+ {
+ m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
+ }
+ // And if the sending process is not yet finished, look what is
+ // to be done next.
+ if (m_tx_phase != PHASE_IDLE)
+ {
+ tx_buf_fill();
+ }
+ break;
+
+ case NRF_DRV_UART_EVT_RX_DONE:
+ {
+ uint8_t rx_byte = m_rx_buf[0];
+ APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_buf, 1));
+ ser_phi_hci_rx_byte(rx_byte);
+ }
+ break;
+
+ default:
+ APP_ERROR_CHECK(NRF_ERROR_INTERNAL);
+ }
+}
+
+
+uint32_t ser_phy_hci_slip_open(ser_phy_hci_slip_event_handler_t events_handler)
+{
+ uint32_t err_code;
+
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ // Check if function was not called before.
+ if (m_ser_phy_hci_slip_event_handler != NULL)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ m_ser_phy_hci_slip_event_handler = events_handler;
+
+ err_code = nrf_drv_uart_init(&m_uart, &m_uart_config, uart_event_handler);
+ if (err_code != NRF_SUCCESS)
+ {
+ return NRF_ERROR_INVALID_PARAM;
+ }
+
+ mp_tx_buf = m_tx_buf0;
+ m_tx_bytes = 0;
+ m_tx_phase = PHASE_IDLE;
+ m_tx_in_progress = false;
+ m_tx_pending = false;
+
+ m_rx_escape = false;
+ mp_small_buffer = m_small_buffer;
+ mp_big_buffer = m_big_buffer;
+
+ APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_buf, 1));
+
+ return NRF_SUCCESS;
+}
+
+
+void ser_phy_hci_slip_close(void)
+{
+ nrf_drv_uart_uninit(&m_uart);
+ m_ser_phy_hci_slip_event_handler = NULL;
+}
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci_slip_cdc.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci_slip_cdc.c
new file mode 100644
index 0000000..99efac0
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_hci_slip_cdc.c
@@ -0,0 +1,720 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+#include "ser_phy_hci.h"
+#include "ser_config.h"
+#ifdef SER_CONNECTIVITY
+#include "ser_phy_config_conn.h"
+#else
+#include "ser_phy_config_app.h"
+#endif
+#include "app_usbd_cdc_acm.h"
+#include "nrf_drv_clock.h"
+#include "app_error.h"
+#include "app_util_platform.h"
+
+#define NRF_LOG_MODULE_NAME sphy_cdc
+#include "nrf_log.h"
+NRF_LOG_MODULE_REGISTER();
+
+#define APP_SLIP_END 0xC0 /**< SLIP code for identifying the beginning and end of a packet frame.. */
+#define APP_SLIP_ESC 0xDB /**< SLIP escape code. This code is used to specify that the following character is specially encoded. */
+#define APP_SLIP_ESC_END 0xDC /**< SLIP special code. When this code follows 0xDB, this character is interpreted as payload data 0xC0.. */
+#define APP_SLIP_ESC_ESC 0xDD /**< SLIP special code. When this code follows 0xDB, this character is interpreted as payload data 0xDB. */
+
+#define HDR_SIZE 4
+#define CRC_SIZE 2
+#define PKT_SIZE (SER_HAL_TRANSPORT_MAX_PKT_SIZE + HDR_SIZE + CRC_SIZE)
+
+static void cdc_acm_user_ev_handler(app_usbd_class_inst_t const * p_inst,
+ app_usbd_cdc_acm_user_event_t event);
+
+#define CDC_ACM_COMM_INTERFACE 0
+#define CDC_ACM_COMM_EPIN NRF_DRV_USBD_EPIN2
+
+#define CDC_ACM_DATA_INTERFACE 1
+#define CDC_ACM_DATA_EPIN NRF_DRV_USBD_EPIN1
+#define CDC_ACM_DATA_EPOUT NRF_DRV_USBD_EPOUT1
+
+APP_USBD_CDC_ACM_GLOBAL_DEF(m_app_cdc_acm,
+ cdc_acm_user_ev_handler,
+ CDC_ACM_COMM_INTERFACE,
+ CDC_ACM_DATA_INTERFACE,
+ CDC_ACM_COMM_EPIN,
+ CDC_ACM_DATA_EPIN,
+ CDC_ACM_DATA_EPOUT,
+ APP_USBD_CDC_COMM_PROTOCOL_NONE
+);
+
+static bool volatile m_port_open;
+
+typedef struct {
+ ser_phy_hci_pkt_params_t header;
+ ser_phy_hci_pkt_params_t payload;
+ ser_phy_hci_pkt_params_t crc;
+} ser_phy_hci_slip_pkt_t;
+static ser_phy_hci_slip_pkt_t m_tx_curr_packet;
+static ser_phy_hci_slip_pkt_t m_tx_next_packet;
+
+static ser_phy_hci_slip_evt_t m_ser_phy_hci_slip_event;
+static ser_phy_hci_slip_event_handler_t m_ser_phy_hci_slip_event_handler; /**< Event handler for upper layer */
+
+static uint8_t m_tx_buf0[NRF_DRV_USBD_EPSIZE];
+static uint8_t m_tx_buf1[NRF_DRV_USBD_EPSIZE];
+static uint8_t * mp_tx_buf;
+static uint8_t m_tx_bytes;
+
+static enum {
+ PHASE_BEGIN,
+ PHASE_HEADER,
+ PHASE_PAYLOAD,
+ PHASE_CRC,
+ PHASE_ACK_END,
+ // The following three elements have to have consecutive values,
+ // 'tx_buf_fill()' relies on this.
+ PHASE_PACKET_END,
+ PHASE_PRE_IDLE = PHASE_PACKET_END + 1,
+ PHASE_IDLE = PHASE_PRE_IDLE + 1
+} volatile m_tx_phase;
+
+static bool volatile m_tx_in_progress;
+static bool volatile m_tx_pending;
+
+#define NO_EVENT SER_PHY_HCI_SLIP_EVT_TYPE_MAX
+static ser_phy_hci_slip_evt_type_t m_tx_evt_type;
+static ser_phy_hci_slip_evt_type_t m_tx_pending_evt_type;
+
+static ser_phy_hci_pkt_params_t * mp_tx_data = NULL;
+static uint32_t m_tx_index;
+
+static uint8_t m_small_buffer[HDR_SIZE];
+static uint8_t m_big_buffer[PKT_SIZE];
+
+static uint8_t * mp_small_buffer = NULL;
+static uint8_t * mp_big_buffer = NULL;
+static uint8_t * mp_buffer = NULL;
+static uint32_t m_rx_index;
+
+static uint8_t m_rx_byte;
+static bool m_rx_escape;
+
+
+// The function returns false to signal that no more bytes can be passed to be
+// sent (put into the TX buffer) until UART transmission is done.
+static bool tx_buf_put(uint8_t data_byte)
+{
+ ASSERT(m_tx_bytes < SER_PHY_HCI_SLIP_TX_BUF_SIZE);
+ mp_tx_buf[m_tx_bytes] = data_byte;
+ ++m_tx_bytes;
+
+ bool flush = false;
+ ser_phy_hci_slip_evt_type_t slip_evt_type = NO_EVENT;
+ if (m_tx_phase == PHASE_ACK_END)
+ {
+ // Send buffer, then signal that an acknowledge packet has been sent.
+ flush = true;
+ slip_evt_type = SER_PHY_HCI_SLIP_EVT_ACK_SENT;
+ }
+ else if (m_tx_phase == PHASE_PACKET_END)
+ {
+ // Send buffer, then signal that a packet with payload has been sent.
+ flush = true;
+ slip_evt_type = SER_PHY_HCI_SLIP_EVT_PKT_SENT;
+ }
+ else if (m_tx_bytes >= SER_PHY_HCI_SLIP_TX_BUF_SIZE)
+ {
+ // Send buffer (because it is filled up), but don't signal anything,
+ // since the packet sending is not complete yet.
+ flush = true;
+ }
+
+ if (flush)
+ {
+ // If some TX transfer is being done at the moment, a new one cannot be
+ // started, it must be scheduled to be performed later.
+ if (m_tx_in_progress)
+ {
+ m_tx_pending_evt_type = slip_evt_type;
+ m_tx_pending = true;
+ // No more buffers available, can't continue filling.
+ return false;
+ }
+
+ if (m_port_open)
+ {
+ m_tx_in_progress = true;
+ m_tx_evt_type = slip_evt_type;
+ APP_ERROR_CHECK(app_usbd_cdc_acm_write(&m_app_cdc_acm,
+ mp_tx_buf, m_tx_bytes));
+ }
+
+ // Switch to the second buffer.
+ mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0;
+ m_tx_bytes = 0;
+ }
+
+ return true;
+}
+
+static void tx_buf_fill(void)
+{
+ bool can_continue = true;
+ do {
+ static uint8_t tx_escaped_data = 0;
+
+ if (tx_escaped_data != 0)
+ {
+ can_continue = tx_buf_put(tx_escaped_data);
+ tx_escaped_data = 0;
+ }
+ else switch (m_tx_phase)
+ {
+ case PHASE_BEGIN:
+ can_continue = tx_buf_put(APP_SLIP_END);
+ mp_tx_data = &m_tx_curr_packet.header;
+ m_tx_index = 0;
+ m_tx_phase = PHASE_HEADER;
+ tx_escaped_data = 0;
+ break;
+
+ case PHASE_ACK_END:
+ case PHASE_PACKET_END:
+ can_continue = tx_buf_put(APP_SLIP_END);
+
+ // [this is needed for the '++m_tx_phase;' below]
+ m_tx_phase = PHASE_PACKET_END;
+ // no break, intentional fall-through
+
+ case PHASE_PRE_IDLE:
+ // In PHASE_PRE_IDLE the sending process is almost finished, only
+ // the NRF_DRV_UART_EVT_TX_DONE event is needed before it can switch
+ // to PHASE_IDLE. But during this waiting a new packet may appear
+ // (i.e. 'ser_phy_hci_slip_tx_pkt_send()' may be called), hence
+ // the following pointer must be checked before switching the phase,
+ // just like right after writing whole packet to buffer (i.e. in
+ // PHASE_PACKET_END). Therefore, the following code is common for
+ // these two cases.
+ if (m_tx_next_packet.header.p_buffer != NULL)
+ {
+ m_tx_curr_packet = m_tx_next_packet;
+ m_tx_next_packet.header.p_buffer = NULL;
+
+ m_tx_phase = PHASE_BEGIN;
+ break;
+ }
+ // Go to the next phase:
+ // PHASE_PACKET_END -> PHASE_PRE_IDLE
+ // PHASE_PRE_IDLE -> PHASE_IDLE
+ ++m_tx_phase;
+ return;
+
+ default:
+ ASSERT(mp_tx_data->p_buffer != NULL);
+ uint8_t data = mp_tx_data->p_buffer[m_tx_index];
+ ++m_tx_index;
+
+ if (data == APP_SLIP_END)
+ {
+ data = APP_SLIP_ESC;
+ tx_escaped_data = APP_SLIP_ESC_END;
+ }
+ else if (data == APP_SLIP_ESC)
+ {
+ tx_escaped_data = APP_SLIP_ESC_ESC;
+ }
+ can_continue = tx_buf_put(data);
+
+ if (m_tx_index >= mp_tx_data->num_of_bytes)
+ {
+ mp_tx_data->p_buffer = NULL;
+
+ if (m_tx_phase == PHASE_HEADER)
+ {
+ if (m_tx_curr_packet.payload.p_buffer == NULL)
+ {
+ // No payload -> ACK packet.
+ m_tx_phase = PHASE_ACK_END;
+ }
+ else
+ {
+ mp_tx_data = &m_tx_curr_packet.payload;
+ m_tx_index = 0;
+ m_tx_phase = PHASE_PAYLOAD;
+ }
+ }
+ else if (m_tx_phase == PHASE_PAYLOAD)
+ {
+ if (m_tx_curr_packet.crc.p_buffer == NULL)
+ {
+ // Packet without CRC.
+ m_tx_phase = PHASE_PACKET_END;
+ }
+ else
+ {
+ mp_tx_data = &m_tx_curr_packet.crc;
+ m_tx_index = 0;
+ m_tx_phase = PHASE_CRC;
+ }
+ }
+ else
+ {
+ ASSERT(m_tx_phase == PHASE_CRC);
+ m_tx_phase = PHASE_PACKET_END;
+ }
+ }
+ break;
+ }
+ } while (can_continue);
+}
+
+uint32_t ser_phy_hci_slip_tx_pkt_send(const ser_phy_hci_pkt_params_t * p_header,
+ const ser_phy_hci_pkt_params_t * p_payload,
+ const ser_phy_hci_pkt_params_t * p_crc)
+{
+ if (p_header == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ if (!m_port_open)
+ {
+ return NRF_SUCCESS;
+ }
+
+ CRITICAL_REGION_ENTER();
+
+ // If some packet is already transmitted, schedule this new one to be sent
+ // as next. A critical region is needed here to ensure that the transmission
+ // won't finish before the following assignments are done.
+ if (m_tx_phase != PHASE_IDLE)
+ {
+ m_tx_next_packet.header = *p_header;
+
+ if (p_payload == NULL)
+ {
+ m_tx_next_packet.payload.p_buffer = NULL;
+ }
+ else
+ {
+ m_tx_next_packet.payload = *p_payload;
+ }
+
+ if (p_crc == NULL)
+ {
+ m_tx_next_packet.crc.p_buffer = NULL;
+ }
+ else
+ {
+ m_tx_next_packet.crc = *p_crc;
+ }
+ }
+ else
+ {
+ m_tx_curr_packet.header = *p_header;
+
+ if (p_payload == NULL)
+ {
+ m_tx_curr_packet.payload.p_buffer = NULL;
+ }
+ else
+ {
+ m_tx_curr_packet.payload = *p_payload;
+ }
+
+ if (p_crc == NULL)
+ {
+ m_tx_curr_packet.crc.p_buffer = NULL;
+ }
+ else
+ {
+ m_tx_curr_packet.crc = *p_crc;
+ }
+
+ m_tx_phase = PHASE_BEGIN;
+ tx_buf_fill();
+ }
+
+ CRITICAL_REGION_EXIT();
+
+ return NRF_SUCCESS;
+}
+
+/* Function returns false when last byte in packet is detected.*/
+static bool slip_decode(uint8_t * p_received_byte)
+{
+ switch (*p_received_byte)
+ {
+ case APP_SLIP_END:
+ return false;
+
+ case APP_SLIP_ESC:
+ m_rx_escape = true;
+ break;
+
+ case APP_SLIP_ESC_END:
+
+ if (m_rx_escape == true)
+ {
+ m_rx_escape = false;
+ *p_received_byte = APP_SLIP_END;
+ }
+ break;
+
+ case APP_SLIP_ESC_ESC:
+
+ if (m_rx_escape == true)
+ {
+ m_rx_escape = false;
+ *p_received_byte = APP_SLIP_ESC;
+ }
+ break;
+
+ /* Normal character - decoding not needed*/
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+static void ser_phi_hci_rx_byte(uint8_t rx_byte)
+{
+ static bool rx_sync = false;
+ uint8_t received_byte = rx_byte;
+ static bool big_buff_in_use = false;
+
+ /* Test received byte for SLIP packet start: 0xC0*/
+ if (!rx_sync)
+ {
+ if (received_byte == APP_SLIP_END)
+ {
+ m_rx_index = 0;
+ rx_sync = true;
+ }
+ return;
+ }
+
+ /* Additional check needed in case rx_sync flag was set by end of previous packet*/
+ if ((m_rx_index) == 0 && (received_byte == APP_SLIP_END))
+ {
+ return;
+ }
+
+ /* Check if small (ACK) buffer is available*/
+ if ((mp_small_buffer != NULL) && (big_buff_in_use == false))
+ {
+ if (m_rx_index == 0)
+ {
+ mp_buffer = mp_small_buffer;
+ }
+
+ /* Check if switch between small and big buffer is needed*/
+ if (m_rx_index == sizeof (m_small_buffer) /*NEW!!!*/ && received_byte != APP_SLIP_END)
+ {
+ /* Check if big (PKT) buffer is available*/
+ if (mp_big_buffer != NULL)
+ {
+ /* Switch to big buffer*/
+ memcpy(m_big_buffer, m_small_buffer, sizeof (m_small_buffer));
+ mp_buffer = m_big_buffer;
+ }
+ else
+ {
+ /* Small buffer is too small and big buffer not available - cannot continue reception*/
+ rx_sync = false;
+ return;
+ }
+ }
+
+ /* Check if big buffer is full */
+ if ((m_rx_index >= PKT_SIZE) && (received_byte != APP_SLIP_END))
+ {
+ /* Do not notify upper layer - the packet is too big and cannot be handled by slip */
+ rx_sync = false;
+ return;
+ }
+
+ /* Decode byte. Will return false when it is 0xC0 - end of packet*/
+ if (slip_decode(&received_byte))
+ {
+ /* Write Rx byte only if it is not escape char */
+ if (!m_rx_escape)
+ {
+ mp_buffer[m_rx_index++] = received_byte;
+ }
+ }
+ else
+ {
+ /* Reset pointers to signalise buffers are locked waiting for upper layer */
+ if (mp_buffer == mp_small_buffer)
+ {
+ mp_small_buffer = NULL;
+ }
+ else
+ {
+ mp_big_buffer = NULL;
+ }
+ /* Report packet reception end*/
+ m_ser_phy_hci_slip_event.evt_type =
+ SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED;
+ m_ser_phy_hci_slip_event.evt_params.received_pkt.p_buffer = mp_buffer;
+ m_ser_phy_hci_slip_event.evt_params.received_pkt.num_of_bytes = m_rx_index;
+ m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
+
+ rx_sync = false;
+ }
+ }
+ else if (mp_big_buffer != NULL)
+ {
+ big_buff_in_use = true;
+ mp_buffer = mp_big_buffer;
+
+ /* Check if big buffer is full */
+ if ((m_rx_index >= PKT_SIZE) && (received_byte != APP_SLIP_END))
+ {
+ /* Do not notify upper layer - the packet is too big and cannot be handled by slip */
+ rx_sync = false;
+ return;
+ }
+
+ /* Decode byte*/
+ if (slip_decode(&received_byte))
+ {
+ /* Write Rx byte only if it is not escape char */
+ if (!m_rx_escape)
+ {
+ mp_buffer[m_rx_index++] = received_byte;
+ }
+ }
+ else
+ {
+ // Mark the big buffer as locked (it should be freed by the upper
+ // layer).
+ mp_big_buffer = NULL;
+ big_buff_in_use = false;
+
+ /* Report packet reception end*/
+ m_ser_phy_hci_slip_event.evt_type =
+ SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED;
+ m_ser_phy_hci_slip_event.evt_params.received_pkt.p_buffer = mp_buffer;
+ m_ser_phy_hci_slip_event.evt_params.received_pkt.num_of_bytes = m_rx_index;
+ m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
+
+ rx_sync = false;
+ }
+ }
+ else
+ {
+ /* Both buffers are not available - cannot continue reception*/
+ rx_sync = false;
+ return;
+ }
+}
+
+
+uint32_t ser_phy_hci_slip_rx_buf_free(uint8_t * p_buffer)
+{
+ uint32_t err_code = NRF_SUCCESS;
+
+ if (p_buffer == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+ else if (p_buffer == m_small_buffer)
+ {
+ /* Free small buffer*/
+ if (mp_small_buffer == NULL)
+ {
+ mp_small_buffer = m_small_buffer;
+ }
+ else
+ {
+ err_code = NRF_ERROR_INVALID_STATE;
+ }
+ }
+ else if (p_buffer == m_big_buffer)
+ {
+ /* Free big buffer*/
+ if (mp_big_buffer == NULL)
+ {
+ mp_big_buffer = m_big_buffer;
+ }
+ else
+ {
+ err_code = NRF_ERROR_INVALID_STATE;
+ }
+ }
+
+ return err_code;
+}
+
+
+static void cdc_acm_user_ev_handler(app_usbd_class_inst_t const * p_inst,
+ app_usbd_cdc_acm_user_event_t event)
+{
+ app_usbd_cdc_acm_t const * p_cdc_acm = app_usbd_cdc_acm_class_get(p_inst);
+
+ switch (event)
+ {
+ case APP_USBD_CDC_ACM_USER_EVT_PORT_OPEN:
+ NRF_LOG_DEBUG("EVT_PORT_OPEN");
+ if (!m_port_open)
+ {
+ ret_code_t ret_code;
+
+ m_port_open = true;
+
+ do {
+ ret_code = app_usbd_cdc_acm_read(p_cdc_acm, &m_rx_byte, 1);
+ if (ret_code == NRF_SUCCESS)
+ {
+ ser_phi_hci_rx_byte(m_rx_byte);
+ }
+ else if (ret_code != NRF_ERROR_IO_PENDING)
+ {
+ APP_ERROR_CHECK(ret_code);
+ }
+ } while (ret_code == NRF_SUCCESS);
+ }
+ break;
+
+ case APP_USBD_CDC_ACM_USER_EVT_PORT_CLOSE:
+ NRF_LOG_DEBUG("EVT_PORT_CLOSE");
+ m_port_open = false;
+ break;
+
+ case APP_USBD_CDC_ACM_USER_EVT_TX_DONE:
+ // If there is a pending transfer (the second buffer is ready to
+ // be sent), start it immediately.
+ if (m_tx_pending)
+ {
+ APP_ERROR_CHECK(app_usbd_cdc_acm_write(p_cdc_acm,
+ mp_tx_buf, m_tx_bytes));
+
+ // Switch to the buffer that has just been sent completely
+ // and now can be filled again.
+ mp_tx_buf = (mp_tx_buf == m_tx_buf0) ? m_tx_buf1 : m_tx_buf0;
+ m_tx_bytes = 0;
+
+ m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type;
+ m_tx_evt_type = m_tx_pending_evt_type;
+
+ m_tx_pending = false;
+ }
+ else
+ {
+ m_tx_in_progress = false;
+ m_ser_phy_hci_slip_event.evt_type = m_tx_evt_type;
+ }
+ // If needed, notify the upper layer that the packet transfer is
+ // complete (note that this notification may result in another
+ // packet send request, so everything must be cleaned up above).
+ if (m_ser_phy_hci_slip_event.evt_type != NO_EVENT)
+ {
+ m_ser_phy_hci_slip_event_handler(&m_ser_phy_hci_slip_event);
+ }
+ // And if the sending process is not yet finished, look what is
+ // to be done next.
+ if (m_tx_phase != PHASE_IDLE)
+ {
+ tx_buf_fill();
+ }
+ break;
+
+ case APP_USBD_CDC_ACM_USER_EVT_RX_DONE:
+ {
+ ret_code_t ret_code;
+ do
+ {
+ ser_phi_hci_rx_byte(m_rx_byte);
+
+ ret_code = app_usbd_cdc_acm_read(p_cdc_acm, &m_rx_byte, 1);
+ } while (ret_code == NRF_SUCCESS);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+uint32_t ser_phy_hci_slip_open(ser_phy_hci_slip_event_handler_t events_handler)
+{
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ // Check if function was not called before.
+ if (m_ser_phy_hci_slip_event_handler != NULL)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ ret_code_t ret = app_usbd_class_append(
+ app_usbd_cdc_acm_class_inst_get(&m_app_cdc_acm));
+ if (ret != NRF_SUCCESS)
+ {
+ return ret;
+ }
+
+
+ m_ser_phy_hci_slip_event_handler = events_handler;
+
+ mp_tx_buf = m_tx_buf0;
+ m_tx_bytes = 0;
+ m_tx_phase = PHASE_IDLE;
+ m_tx_in_progress = false;
+ m_tx_pending = false;
+
+ m_rx_escape = false;
+ mp_small_buffer = m_small_buffer;
+ mp_big_buffer = m_big_buffer;
+
+ return NRF_SUCCESS;
+}
+
+
+void ser_phy_hci_slip_close(void)
+{
+ m_ser_phy_hci_slip_event_handler = NULL;
+}
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_nohci.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_nohci.c
new file mode 100644
index 0000000..4580086
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_nohci.c
@@ -0,0 +1,382 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/**@file
+ *
+ * @defgroup ser_phy_spi_phy_driver_slave ser_phy_nrf51_spi_slave.c
+ * @{
+ * @ingroup ser_phy_spi_phy_driver_slave
+ *
+ * @brief SPI_RAW PHY slave driver.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include "app_error.h"
+#include "app_util.h"
+#include "app_util_platform.h"
+#include "app_timer.h"
+#include "ser_phy.h"
+#include "ser_phy_hci.h"
+#include "crc16.h"
+#include "nrf_soc.h"
+
+#include "ser_phy_debug_comm.h"
+
+static bool m_flag_nohci_init = false;
+static bool m_flag_expect_ack;
+static bool m_flag_buffer_reqested = false;
+
+static uint16_t m_rx_packet_length;
+static uint8_t * m_p_rx_packet;
+
+static uint16_t m_rx_pending_packet_length;
+static uint8_t * m_p_rx_pending_packet;
+
+static uint16_t m_rx_allocated_packet_length;
+static uint8_t * m_p_rx_allocated_packet;
+
+static uint8_t * m_p_tx_packet = NULL;
+static uint16_t m_tx_packet_length;
+
+static ser_phy_events_handler_t m_ser_phy_callback = NULL;
+
+#define PKT_HDR_SIZE 4 /**< Packet header size in number of bytes. */
+#define PKT_CRC_SIZE 2 /**< Packet CRC size in number of bytes. */
+
+static void ser_phy_nohci_assert(bool cond)
+{
+ APP_ERROR_CHECK_BOOL(cond);
+}
+
+
+static void ser_phy_event_callback(ser_phy_evt_t event)
+{
+ if (m_ser_phy_callback)
+ {
+ m_ser_phy_callback(event);
+ }
+}
+
+
+static void memory_request_callback(uint16_t size)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_BUF_REQUEST(0);
+
+ event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
+ event.evt_params.rx_buf_request.num_of_bytes = size;
+ ser_phy_event_callback(event);
+}
+
+
+static void packet_received_callback(uint8_t * pBuffer, uint16_t size)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_RX_PKT_RECEIVED(0);
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
+ event.evt_params.rx_pkt_received.num_of_bytes = size;
+ event.evt_params.rx_pkt_received.p_buffer = pBuffer;
+ ser_phy_event_callback(event);
+}
+
+
+static void packet_dropped_callback(void)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_RX_PKT_DROPPED(0);
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
+ ser_phy_event_callback(event);
+}
+
+
+static void packet_transmitted_callback(void)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_HCI_PHY_EVT_TX_PKT_SENT(0);
+
+ event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
+ ser_phy_event_callback(event);
+}
+
+
+static void hci_slip_event_handler(ser_phy_hci_slip_evt_t * p_event)
+{
+ if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT )
+ {
+ DEBUG_EVT_SLIP_PACKET_TXED(0);
+
+ if (!m_flag_expect_ack)
+ {
+ m_p_tx_packet = NULL;
+ packet_transmitted_callback();
+ }
+ else
+ {
+ ser_phy_nohci_assert(false); // packet was send as a ACK packet, callback should be with ACK_SENT
+ }
+
+ }
+ else if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_ACK_SENT )
+ {
+ DEBUG_EVT_SLIP_ACK_TXED(0);
+
+ if (m_flag_expect_ack)
+ {
+ m_p_tx_packet = NULL;
+ packet_transmitted_callback();
+ }
+ else
+ {
+ ser_phy_nohci_assert(false); // packet was send as a normal packet, callback should be with PKT_SENT
+ }
+
+ }
+ else if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED )
+ {
+ CRITICAL_REGION_ENTER();
+
+ if (m_p_rx_packet == NULL)
+ {
+ m_p_rx_packet = p_event->evt_params.received_pkt.p_buffer;
+ m_rx_packet_length = p_event->evt_params.received_pkt.num_of_bytes;
+ m_p_rx_allocated_packet = m_p_rx_packet;
+ m_rx_allocated_packet_length = m_rx_packet_length;
+ m_flag_buffer_reqested = true;
+ memory_request_callback(m_rx_allocated_packet_length);
+ }
+ else if (m_p_rx_pending_packet == NULL)
+ {
+ m_p_rx_pending_packet = p_event->evt_params.received_pkt.p_buffer;
+ m_rx_pending_packet_length = p_event->evt_params.received_pkt.num_of_bytes;
+ }
+ else
+ {
+ // both buffers are not released; this is fault
+ ser_phy_nohci_assert(false);
+ }
+ CRITICAL_REGION_EXIT();
+ }
+ else
+ {
+ // no other callbacks are expected
+ ser_phy_nohci_assert(false);
+ }
+}
+
+
+/* ser_phy API function */
+void ser_phy_interrupts_enable(void)
+{
+
+ NVIC_EnableIRQ(UART0_IRQn);
+ return;
+}
+
+
+/* ser_phy API function */
+void ser_phy_interrupts_disable(void)
+{
+ NVIC_DisableIRQ(UART0_IRQn);
+ return;
+}
+
+
+/* ser_phy API function */
+uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
+{
+ uint32_t status = NRF_SUCCESS;
+
+ if (m_flag_buffer_reqested)
+ {
+ m_flag_buffer_reqested = false;
+
+ if (p_buffer)
+ {
+ memcpy(p_buffer, m_p_rx_allocated_packet, m_rx_allocated_packet_length);
+ packet_received_callback(p_buffer, m_rx_allocated_packet_length);
+ }
+ else
+ {
+ packet_dropped_callback();
+ }
+
+ CRITICAL_REGION_ENTER();
+
+ if (m_p_rx_allocated_packet == m_p_rx_packet && (m_p_rx_pending_packet == NULL))
+ {
+ // packet is copied and there is no pending packet
+ (void) ser_phy_hci_slip_rx_buf_free(m_p_rx_packet);
+ m_p_rx_packet = NULL;
+ m_p_rx_allocated_packet = NULL;
+ }
+ else if (m_p_rx_allocated_packet == m_p_rx_packet && (m_p_rx_pending_packet != NULL))
+ {
+ // there is a pending packet - request memory for it
+ m_p_rx_allocated_packet = m_p_rx_pending_packet;
+ m_rx_allocated_packet_length = m_rx_pending_packet_length;
+ m_flag_buffer_reqested = true;
+ }
+ else if (m_p_rx_allocated_packet == m_p_rx_pending_packet )
+ {
+ // the pending packet was serviced - release both
+ m_p_rx_allocated_packet = NULL;
+ (void) ser_phy_hci_slip_rx_buf_free(m_p_rx_packet);
+ m_p_rx_packet = NULL;
+ (void) ser_phy_hci_slip_rx_buf_free(m_p_rx_pending_packet);
+ m_p_rx_pending_packet = NULL;
+ }
+ else
+ {
+ // no other calls are expected
+ ser_phy_nohci_assert(false);
+ }
+ CRITICAL_REGION_EXIT();
+
+ // request memory for a pending
+ if (m_p_rx_allocated_packet)
+ {
+ memory_request_callback(m_rx_allocated_packet_length);
+ }
+ }
+ else
+ {
+ status = NRF_ERROR_BUSY;
+ }
+ return status;
+}
+
+
+/* ser_phy API function */
+uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
+{
+ uint32_t status = NRF_SUCCESS;
+ uint32_t err_code;
+
+ if ( p_buffer == NULL || num_of_bytes == 0)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ if ( m_p_tx_packet == NULL)
+ {
+ m_tx_packet_length = num_of_bytes;
+ m_p_tx_packet = (uint8_t *)p_buffer;
+
+ if (m_tx_packet_length <= PKT_HDR_SIZE + PKT_CRC_SIZE)
+ {
+ ser_phy_hci_pkt_params_t pkt; // all packets smaller than 6 goes as ACK
+
+ m_flag_expect_ack = true;
+ pkt.p_buffer = (uint8_t *)m_p_tx_packet;
+ pkt.num_of_bytes = m_tx_packet_length;
+ DEBUG_EVT_SLIP_ACK_TX(0);
+ err_code = ser_phy_hci_slip_tx_pkt_send(&pkt, NULL, NULL); // this will look like ACK for slip
+ ser_phy_nohci_assert(err_code == NRF_SUCCESS);
+ }
+ else
+ {
+ ser_phy_hci_pkt_params_t header; // this is fake header - just first 4 bytes
+ ser_phy_hci_pkt_params_t crc; // this is fake header - just last 2 bytes
+ ser_phy_hci_pkt_params_t payload; // this is fake payload - all except for header and crc
+
+ m_flag_expect_ack = false;
+ header.p_buffer = (uint8_t *)m_p_tx_packet;
+ header.num_of_bytes = PKT_HDR_SIZE;
+ crc.p_buffer = (uint8_t *)m_p_tx_packet + m_tx_packet_length - PKT_CRC_SIZE;
+ crc.num_of_bytes = PKT_CRC_SIZE;
+ payload.p_buffer = (uint8_t *)m_p_tx_packet + PKT_HDR_SIZE;
+ payload.num_of_bytes = m_tx_packet_length - PKT_HDR_SIZE - PKT_CRC_SIZE;
+ DEBUG_EVT_SLIP_PACKET_TX(0);
+ err_code = ser_phy_hci_slip_tx_pkt_send(&header, &payload, &crc); // this will look like normal packet for slip
+ ser_phy_nohci_assert(err_code == NRF_SUCCESS);
+ }
+ }
+ else
+ {
+ status = NRF_ERROR_BUSY;
+ }
+
+ return status;
+}
+
+
+/* ser_phy API function */
+uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
+{
+ uint32_t err_code;
+
+ if (m_flag_nohci_init)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+ err_code = ser_phy_hci_slip_open(hci_slip_event_handler);
+
+ if (err_code != NRF_SUCCESS)
+ {
+ return err_code;
+ }
+
+ m_ser_phy_callback = events_handler;
+ m_flag_nohci_init = true;
+ return NRF_SUCCESS;
+}
+
+
+/* ser_phy API function */
+void ser_phy_close(void)
+{
+ m_ser_phy_callback = NULL;
+ ser_phy_hci_slip_close();
+ m_flag_nohci_init = false;
+}
+
+
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_5W_master.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_5W_master.c
new file mode 100644
index 0000000..ade86e5
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_5W_master.c
@@ -0,0 +1,823 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/**@file
+ *
+ * @defgroup ser_phy_spi_5W_phy_driver_master ser_phy_nrf51_spi_5W_master.c
+ * @{
+ * @ingroup ser_phy_spi_5W_phy_driver_master
+ *
+ * @brief SPI_5W_RAW PHY master driver.
+ */
+
+#include <stdio.h>
+#include "app_util.h"
+#include "app_util_platform.h"
+#include "boards.h"
+#include "nrf_error.h"
+#include "nrf_gpio.h"
+#include "nrf_drv_gpiote.h"
+#include "ser_config.h"
+#include "ser_config_5W_app.h"
+#include "ser_phy.h"
+#include "ser_phy_config_app.h"
+#include "spi_5W_master.h"
+#include "ser_phy_debug_app.h"
+#include "app_error.h"
+#define notUSE_PendSV
+
+#ifdef USE_PendSV
+#define SW_IRQn PendSV_IRQn
+#define SW_IRQ_Handler() PendSV_Handler()
+#define SET_Pend_SW_IRQ() SCB->ICSR = SCB->ICSR | SCB_ICSR_PENDSVSET_Msk //NVIC_SetPendingIRQ(PendSV_IRQn) - PendSV_IRQn is a negative - does not work with CMSIS
+#else
+#define SW_IRQn SWI3_IRQn
+#define SW_IRQ_Handler() SWI3_IRQHandler()
+#define SET_Pend_SW_IRQ() NVIC_SetPendingIRQ(SWI3_IRQn)
+#endif
+
+#define SER_PHY_SPI_5W_MTU_SIZE SER_PHY_SPI_MTU_SIZE
+
+typedef enum
+{
+ SER_PHY_STATE_IDLE = 0,
+ SER_PHY_STATE_TX_HEADER,
+ SER_PHY_STATE_TX_WAIT_FOR_RDY,
+ SER_PHY_STATE_TX_PAYLOAD,
+ SER_PHY_STATE_RX_WAIT_FOR_RDY,
+ SER_PHY_STATE_TX_ZERO_HEADER,
+ SER_PHY_STATE_RX_HEADER,
+ SER_PHY_STATE_MEMORY_REQUEST,
+ SER_PHY_STATE_RX_PAYLOAD,
+ SER_PHY_STATE_DISABLED
+} ser_phy_spi_master_state_t;
+
+typedef enum
+{
+ SER_PHY_EVT_GPIO_RDY = 0,
+ SER_PHY_EVT_GPIO_REQ,
+ SER_PHY_EVT_SPI_TRANSFER_DONE,
+ SER_PHY_EVT_TX_API_CALL,
+ SER_PHY_EVT_RX_API_CALL
+} ser_phy_event_source_t;
+
+#define _static static
+
+_static uint8_t * mp_tx_buffer = NULL;
+_static uint16_t m_tx_buf_len = 0;
+
+_static uint8_t * mp_rx_buffer = NULL;
+_static uint16_t m_rx_buf_len = 0;
+_static uint8_t m_recv_buffer[SER_PHY_SPI_5W_MTU_SIZE];
+_static uint8_t m_len_buffer[SER_PHY_HEADER_SIZE + 1] = { 0 }; //len is asymmetric for 5W, there is a 1 byte guard when receiving
+
+_static uint16_t m_tx_packet_length = 0;
+_static uint16_t m_accumulated_tx_packet_length = 0;
+_static uint16_t m_current_tx_packet_length = 0;
+
+_static uint16_t m_rx_packet_length = 0;
+_static uint16_t m_accumulated_rx_packet_length = 0;
+_static uint16_t m_current_rx_packet_length = 0;
+
+_static volatile bool m_pend_req_flag = 0;
+_static volatile bool m_pend_rdy_flag = 0;
+_static volatile bool m_pend_xfer_flag = 0;
+_static volatile bool m_pend_rx_api_flag = 0;
+_static volatile bool m_pend_tx_api_flag = 0;
+
+_static volatile bool m_slave_ready_flag = false;
+_static volatile bool m_slave_request_flag = false;
+
+
+_static ser_phy_events_handler_t m_callback_events_handler = NULL;
+_static ser_phy_spi_master_state_t m_spi_master_state = SER_PHY_STATE_DISABLED;
+
+static void ser_phy_switch_state(ser_phy_event_source_t evt_src);
+
+static void spi_master_raw_assert(bool cond)
+{
+ APP_ERROR_CHECK_BOOL(cond);
+}
+
+void SW_IRQ_Handler()
+{
+ if (m_pend_req_flag)
+ {
+ m_pend_req_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_REQUEST(0);
+ ser_phy_switch_state(SER_PHY_EVT_GPIO_REQ);
+ }
+
+ if (m_pend_rdy_flag)
+ {
+ m_pend_rdy_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_READY(0);
+ ser_phy_switch_state(SER_PHY_EVT_GPIO_RDY);
+ }
+
+ if (m_pend_xfer_flag)
+ {
+ m_pend_xfer_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_XFER_DONE(0);
+ ser_phy_switch_state(SER_PHY_EVT_SPI_TRANSFER_DONE);
+ }
+
+ if (m_pend_rx_api_flag)
+ {
+ m_pend_rx_api_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_API_CALL(0);
+ ser_phy_switch_state(SER_PHY_EVT_RX_API_CALL);
+ }
+
+ if (m_pend_tx_api_flag)
+ {
+ m_pend_tx_api_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_API_CALL(0);
+ ser_phy_switch_state(SER_PHY_EVT_TX_API_CALL);
+ }
+
+}
+
+#ifndef _SPI_5W_
+static void ser_phy_spi_master_ready(nrf_drv_gpiote_pin_t pin,
+ nrf_gpiote_polarity_t action)
+{
+ if (nrf_gpio_pin_read(pin) == 0)
+ {
+ m_slave_ready_flag = true;
+ m_pend_rdy_flag = true;
+ }
+ else
+ {
+ m_slave_ready_flag = false;
+ }
+
+ DEBUG_EVT_SPI_MASTER_RAW_READY_EDGE((uint32_t) !m_slave_ready_flag);
+ SET_Pend_SW_IRQ();
+}
+#endif
+
+static void ser_phy_spi_master_request(nrf_drv_gpiote_pin_t pin,
+ nrf_gpiote_polarity_t action)
+{
+ if (nrf_gpio_pin_read(pin) == 0)
+ {
+ m_slave_request_flag = true;
+ m_pend_req_flag = true;
+ }
+ else
+ {
+ m_slave_request_flag = false;
+ }
+
+ DEBUG_EVT_SPI_MASTER_RAW_REQUEST_EDGE((uint32_t) !m_slave_request_flag);
+ SET_Pend_SW_IRQ();
+}
+
+/* Send event SER_PHY_EVT_TX_PKT_SENT */
+static __INLINE void callback_packet_sent()
+{
+ ser_phy_evt_t event;
+
+ event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
+ m_callback_events_handler(event);
+}
+
+/* Send event SER_PHY_EVT_RX_PKT_DROPPED */
+static __INLINE void callback_packet_dropped()
+{
+ ser_phy_evt_t event;
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
+ m_callback_events_handler(event);
+}
+
+/* Send event SER_PHY_EVT_RX_PKT_RECEIVED */
+static __INLINE void callback_packet_received()
+{
+ ser_phy_evt_t event;
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
+ event.evt_params.rx_pkt_received.p_buffer = mp_rx_buffer;
+ event.evt_params.rx_pkt_received.num_of_bytes = m_rx_buf_len;
+ m_callback_events_handler(event);
+}
+
+/* Send event SER_PHY_EVT_RX_BUF_REQUEST */
+static __INLINE void callback_mem_request()
+{
+ ser_phy_evt_t event;
+
+ event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
+ event.evt_params.rx_buf_request.num_of_bytes = m_rx_buf_len;
+ m_callback_events_handler(event);
+}
+
+static __INLINE void copy_buff(uint8_t * const p_dest, uint8_t const * const p_src, uint16_t len)
+{
+ uint16_t index;
+
+ for (index = 0; index < len; index++)
+ {
+ p_dest[index] = p_src[index];
+ }
+ return;
+}
+
+static __INLINE void buffer_release(uint8_t * * const pp_buffer, uint16_t * const p_buf_len)
+{
+ *pp_buffer = NULL;
+ *p_buf_len = 0;
+}
+
+static uint16_t compute_current_packet_length(const uint16_t packet_length,
+ const uint16_t accumulated_packet_length)
+{
+ uint16_t current_packet_length = packet_length - accumulated_packet_length;
+
+ if (current_packet_length > SER_PHY_SPI_5W_MTU_SIZE)
+ {
+ current_packet_length = SER_PHY_SPI_5W_MTU_SIZE;
+ }
+
+ return current_packet_length;
+}
+
+static __INLINE uint32_t header_send(const uint16_t length)
+{
+ uint16_t buf_len_size = uint16_encode(length, m_len_buffer);
+
+ return spi_master_send_recv(SER_PHY_SPI_MASTER, m_len_buffer, buf_len_size, NULL, 0);
+}
+
+static __INLINE uint32_t frame_send()
+{
+ uint32_t err_code;
+
+ m_current_tx_packet_length = compute_current_packet_length(m_tx_packet_length,
+ m_accumulated_tx_packet_length);
+ err_code =
+ spi_master_send_recv(SER_PHY_SPI_MASTER,
+ &mp_tx_buffer[m_accumulated_tx_packet_length],
+ m_current_tx_packet_length,
+ NULL,
+ 0);
+ m_accumulated_tx_packet_length += m_current_tx_packet_length;
+ return err_code;
+}
+
+static __INLINE uint32_t header_get()
+{
+ return spi_master_send_recv(SER_PHY_SPI_MASTER, NULL, 0, m_len_buffer, SER_PHY_HEADER_SIZE + 1); //add 0 byte guard when receiving
+}
+
+static __INLINE uint32_t frame_get()
+{
+ uint32_t err_code;
+
+ m_current_rx_packet_length = compute_current_packet_length(m_rx_packet_length,
+ m_accumulated_rx_packet_length);
+
+ if (m_current_rx_packet_length < SER_PHY_SPI_5W_MTU_SIZE)
+ {
+ m_current_rx_packet_length++; //take into account guard byte when receiving
+ }
+ err_code = spi_master_send_recv(SER_PHY_SPI_MASTER,
+ NULL,
+ 0,
+ m_recv_buffer,
+ m_current_rx_packet_length);
+ return err_code;
+}
+
+/**
+ * \brief Master driver main state machine
+ * Executed only in the context of PendSV_Handler()
+ * For UML graph, please refer to SDK documentation
+*/
+
+static void ser_phy_switch_state(ser_phy_event_source_t evt_src)
+{
+ uint32_t err_code = NRF_SUCCESS;
+ static bool m_waitForReadyFlag = false; //local scheduling flag to defer RDY events
+
+ switch (m_spi_master_state)
+ {
+
+ case SER_PHY_STATE_IDLE:
+
+ if (evt_src == SER_PHY_EVT_GPIO_REQ)
+ {
+ m_waitForReadyFlag = false;
+
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
+ err_code = header_send(0);
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
+ }
+ }
+ else if (evt_src == SER_PHY_EVT_TX_API_CALL)
+ {
+ spi_master_raw_assert(mp_tx_buffer != NULL); //api event with tx_buffer == NULL has no sense
+ m_waitForReadyFlag = false;
+
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_HEADER;
+ err_code = header_send(m_tx_buf_len);
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_WAIT_FOR_RDY;
+ }
+ }
+ break;
+
+ case SER_PHY_STATE_TX_WAIT_FOR_RDY:
+
+ if (evt_src == SER_PHY_EVT_GPIO_RDY)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_HEADER;
+ err_code = header_send(m_tx_buf_len);
+ }
+ break;
+
+ case SER_PHY_STATE_RX_WAIT_FOR_RDY:
+
+ if (evt_src == SER_PHY_EVT_GPIO_RDY)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
+ err_code = header_send(0);
+
+ }
+ break;
+
+ case SER_PHY_STATE_TX_HEADER:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ m_tx_packet_length = m_tx_buf_len;
+ m_accumulated_tx_packet_length = 0;
+
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_PAYLOAD;
+ err_code = frame_send();
+
+ }
+ else
+ {
+ m_waitForReadyFlag = true;
+ }
+ }
+ else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_waitForReadyFlag)
+ {
+ m_waitForReadyFlag = false;
+ m_spi_master_state = SER_PHY_STATE_TX_PAYLOAD;
+ err_code = frame_send();
+ }
+
+ break;
+
+ case SER_PHY_STATE_TX_PAYLOAD:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ if (m_accumulated_tx_packet_length < m_tx_packet_length)
+ {
+ if (m_slave_ready_flag)
+ {
+ err_code = frame_send();
+ }
+ else
+ {
+ m_waitForReadyFlag = true;
+ }
+ }
+ else
+ {
+ spi_master_raw_assert(m_accumulated_tx_packet_length == m_tx_packet_length);
+ //Release TX buffer
+ buffer_release(&mp_tx_buffer, &m_tx_buf_len);
+ callback_packet_sent();
+
+ if ( m_slave_request_flag)
+ {
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
+ err_code = header_send(0);
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
+ }
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_IDLE; //m_Tx_buffer is NULL - have to wait for API event
+ }
+ }
+ }
+ else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_waitForReadyFlag )
+ {
+ m_waitForReadyFlag = false;
+ err_code = frame_send();
+ }
+
+ break;
+
+ case SER_PHY_STATE_TX_ZERO_HEADER:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_HEADER;
+ err_code = header_get();
+ }
+ else
+ {
+ m_waitForReadyFlag = true;
+ }
+ }
+ else if ( (evt_src == SER_PHY_EVT_GPIO_RDY) && m_waitForReadyFlag)
+ {
+ m_waitForReadyFlag = false;
+ m_spi_master_state = SER_PHY_STATE_RX_HEADER;
+ err_code = header_get();
+ }
+ break;
+
+ case SER_PHY_STATE_RX_HEADER:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ m_spi_master_state = SER_PHY_STATE_MEMORY_REQUEST;
+ m_rx_buf_len = uint16_decode(&(m_len_buffer[1])); //skip guard when receiving
+ m_rx_packet_length = m_rx_buf_len;
+ callback_mem_request();
+ }
+ break;
+
+ case SER_PHY_STATE_MEMORY_REQUEST:
+
+ if (evt_src == SER_PHY_EVT_RX_API_CALL)
+ {
+ m_accumulated_rx_packet_length = 0;
+
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_PAYLOAD;
+ err_code = frame_get();
+ }
+ else
+ {
+ m_waitForReadyFlag = true;
+ }
+ }
+ else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_waitForReadyFlag)
+ {
+ m_waitForReadyFlag = false;
+ m_spi_master_state = SER_PHY_STATE_RX_PAYLOAD;
+ err_code = frame_get();
+ }
+ break;
+
+ case SER_PHY_STATE_RX_PAYLOAD:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ if (mp_rx_buffer)
+ {
+ copy_buff(&(mp_rx_buffer[m_accumulated_rx_packet_length]),
+ &(m_recv_buffer[1]),
+ m_current_rx_packet_length - 1); //skip guard byte when receiving
+ }
+ m_accumulated_rx_packet_length += (m_current_rx_packet_length - 1);
+
+ if (m_accumulated_rx_packet_length < m_rx_packet_length)
+ {
+ if (m_slave_ready_flag)
+ {
+ err_code = frame_get();
+ }
+ else
+ {
+ m_waitForReadyFlag = true;
+ }
+ }
+ else
+ {
+ spi_master_raw_assert(m_accumulated_rx_packet_length == m_rx_packet_length);
+
+ if (mp_rx_buffer == NULL)
+ {
+ callback_packet_dropped();
+ }
+ else
+ {
+ callback_packet_received();
+ }
+ //Release RX buffer
+ buffer_release(&mp_rx_buffer, &m_rx_buf_len);
+
+ if ((mp_tx_buffer != NULL)) //mp_tx_buffer !=NULL, this means that API_EVT was scheduled
+ {
+ if (m_slave_ready_flag )
+ {
+ err_code = header_send(m_tx_buf_len);
+ m_spi_master_state = SER_PHY_STATE_TX_HEADER;
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_WAIT_FOR_RDY;
+ }
+ }
+ else if (m_slave_request_flag)
+ {
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
+ err_code = header_send(0);
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
+ }
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_IDLE;
+ }
+ }
+ }
+ else if ( evt_src == SER_PHY_EVT_GPIO_RDY && m_waitForReadyFlag)
+ {
+ m_waitForReadyFlag = false;
+ err_code = frame_get();
+ }
+
+
+ break;
+
+ default:
+ break;
+ }
+
+
+ if (err_code != NRF_SUCCESS)
+ {
+ (void)err_code;
+ }
+}
+
+/* SPI master event handler */
+static void ser_phy_spi_master_event_handler(spi_master_evt_t spi_master_evt)
+{
+ switch (spi_master_evt.type)
+ {
+ case SPI_MASTER_EVT_TRANSFER_COMPLETED:
+
+ /* Switch state */
+ m_pend_xfer_flag = true;
+ SET_Pend_SW_IRQ();
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void ser_phy_init_pendSV(void)
+{
+ NVIC_SetPriority(SW_IRQn, APP_IRQ_PRIORITY_MID);
+ NVIC_EnableIRQ(SW_IRQn);
+}
+
+static void ser_phy_init_gpiote(void)
+{
+ if (!nrf_drv_gpiote_is_init())
+ {
+ (void)nrf_drv_gpiote_init();
+ }
+ NVIC_SetPriority(GPIOTE_IRQn, APP_IRQ_PRIORITY_HIGH);
+
+ nrf_drv_gpiote_in_config_t config = GPIOTE_CONFIG_IN_SENSE_TOGGLE(true);
+ /* Enable pullup to ensure high state while connectivity device is reset */
+ config.pull = NRF_GPIO_PIN_PULLUP;
+ (void)nrf_drv_gpiote_in_init(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST, &config,
+ ser_phy_spi_master_request);
+ nrf_drv_gpiote_in_event_enable(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST,true);
+ m_slave_request_flag = !(nrf_gpio_pin_read(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST));
+
+#ifdef _SPI_5W_
+ m_slave_ready_flag = true;
+#else
+ (void)nrf_drv_gpiote_in_init(SER_PHY_SPI_MASTER_PIN_SLAVE_READY, &config,
+ ser_phy_spi_master_ready);
+ nrf_drv_gpiote_in_event_enable(SER_PHY_SPI_MASTER_PIN_SLAVE_READY,true);
+ m_slave_ready_flag = !(nrf_gpio_pin_read(SER_PHY_SPI_MASTER_PIN_SLAVE_READY));
+#endif
+
+ NVIC_ClearPendingIRQ(SW_IRQn);
+}
+
+static void ser_phy_deinit_gpiote(void)
+{
+ nrf_drv_gpiote_in_uninit(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST);
+#ifndef _SPI_5W_
+ nrf_drv_gpiote_in_uninit(SER_PHY_SPI_MASTER_PIN_SLAVE_READY);
+#endif
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
+{
+ if (p_buffer == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ if (num_of_bytes == 0)
+ {
+ return NRF_ERROR_INVALID_PARAM;
+ }
+
+ if (mp_tx_buffer != NULL)
+ {
+ return NRF_ERROR_BUSY;
+ }
+
+ //ser_phy_interrupts_disable();
+ CRITICAL_REGION_ENTER();
+ mp_tx_buffer = (uint8_t *)p_buffer;
+ m_tx_buf_len = num_of_bytes;
+ m_pend_tx_api_flag = true;
+ SET_Pend_SW_IRQ();
+ //ser_phy_interrupts_enable();
+ CRITICAL_REGION_EXIT();
+
+ return NRF_SUCCESS;
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
+{
+ if (m_spi_master_state != SER_PHY_STATE_MEMORY_REQUEST)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ //ser_phy_interrupts_disable();
+ CRITICAL_REGION_ENTER();
+ mp_rx_buffer = p_buffer;
+ m_pend_rx_api_flag = true;
+ SET_Pend_SW_IRQ();
+ //ser_phy_interrupts_enable();
+ CRITICAL_REGION_EXIT();
+
+ return NRF_SUCCESS;
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
+{
+
+ if (m_spi_master_state != SER_PHY_STATE_DISABLED)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ uint32_t err_code = NRF_SUCCESS;
+ m_spi_master_state = SER_PHY_STATE_IDLE;
+ m_callback_events_handler = events_handler;
+ ser_phy_init_gpiote();
+
+ /* Configure SPI Master driver */
+ spi_master_config_t spi_master_config;
+ spi_master_config.SPI_Freq = SPI_FREQUENCY_FREQUENCY_M1;
+ spi_master_config.SPI_Pin_SCK = SER_PHY_SPI_MASTER_PIN_SCK;
+ spi_master_config.SPI_Pin_MISO = SER_PHY_SPI_MASTER_PIN_MISO;
+ spi_master_config.SPI_Pin_MOSI = SER_PHY_SPI_MASTER_PIN_MOSI;
+ spi_master_config.SPI_Pin_SS = SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT;
+ spi_master_config.SPI_ORDER = SPI_CONFIG_ORDER_LsbFirst;
+ spi_master_config.SPI_CPOL = SPI_CONFIG_CPOL_ActiveHigh;
+ spi_master_config.SPI_CPHA = SPI_CONFIG_CPHA_Leading;
+
+ err_code = spi_master_open(SER_PHY_SPI_MASTER, &spi_master_config);
+
+ if (err_code != NRF_SUCCESS)
+ {
+ return err_code;
+ }
+#ifdef _SPI_5W_
+ spi_5W_master_evt_handler_reg(SER_PHY_SPI_MASTER, ser_phy_spi_master_event_handler);
+#else
+ spi_master_evt_handler_reg(SER_PHY_SPI_MASTER, ser_phy_spi_master_event_handler);
+#endif
+ ser_phy_init_pendSV();
+
+ return err_code;
+}
+
+/* ser_phy API function */
+void ser_phy_close(void)
+{
+ m_spi_master_state = SER_PHY_STATE_DISABLED;
+
+ m_callback_events_handler = NULL;
+
+ buffer_release(&mp_tx_buffer, &m_tx_buf_len);
+ buffer_release(&mp_rx_buffer, &m_rx_buf_len);
+ m_tx_packet_length = 0;
+ m_accumulated_tx_packet_length = 0;
+ m_current_tx_packet_length = 0;
+ m_rx_packet_length = 0;
+ m_accumulated_rx_packet_length = 0;
+ m_current_rx_packet_length = 0;
+ ser_phy_deinit_gpiote();
+ spi_master_close(SER_PHY_SPI_MASTER);
+}
+
+/* ser_phy API function */
+void ser_phy_interrupts_enable(void)
+{
+ NVIC_EnableIRQ(SW_IRQn);
+}
+
+/* ser_phy API function */
+void ser_phy_interrupts_disable(void)
+{
+ NVIC_DisableIRQ(SW_IRQn);
+}
+
+
+#ifdef SER_PHY_DEBUG_APP_ENABLE
+
+static spi_master_raw_callback_t m_spi_master_raw_evt_callback;
+
+void debug_evt(spi_master_raw_evt_type_t evt, uint32_t data)
+{
+ if (m_spi_master_raw_evt_callback)
+ {
+ spi_master_raw_evt_t e;
+ e.evt = evt;
+ e.data = data;
+ m_spi_master_raw_evt_callback(e);
+ }
+}
+
+void debug_init(spi_master_raw_callback_t spi_master_raw_evt_callback)
+{
+ m_spi_master_raw_evt_callback = spi_master_raw_evt_callback;
+}
+
+#endif
+/** @} */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_5W_slave.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_5W_slave.c
new file mode 100644
index 0000000..88588b4
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_5W_slave.c
@@ -0,0 +1,644 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/**@file
+ *
+ * @defgroup ser_phy_spi_5W_phy_driver_slave ser_phy_nrf51_spi_5W_slave.c
+ * @{
+ * @ingroup ser_phy_spi_5W_phy_driver_slave
+ *
+ * @brief SPI_5W_RAW PHY slave driver.
+ */
+
+
+#include <stddef.h>
+#include <string.h>
+
+
+#include "boards.h"
+#include "nrf_drv_spis.h"
+#include "ser_phy.h"
+#include "ser_config.h"
+#include "nrf_gpio.h"
+#include "nrf_gpiote.h"
+#include "nrf_soc.h"
+#include "app_error.h"
+#include "app_util.h"
+#include "ser_phy_config_conn.h"
+#include "ser_phy_debug_conn.h"
+#include "app_error.h"
+
+#define _static static
+
+#define SER_PHY_SPI_5W_MTU_SIZE SER_PHY_SPI_MTU_SIZE
+
+#define SER_PHY_SPI_DEF_CHARACTER 0xFF //SPI default character. Character clocked out in case of an ignored transaction
+#define SER_PHY_SPI_ORC_CHARACTER 0xFF //SPI over-read character. Character clocked out after an over-read of the transmit buffer
+
+static nrf_drv_spis_t m_spis = NRF_DRV_SPIS_INSTANCE(SER_PHY_SPI_SLAVE_INSTANCE);
+
+#define _SPI_5W_
+
+//SPI raw peripheral device configuration data
+typedef struct
+{
+ int32_t pin_req; //SPI /REQ pin. -1 for not using
+ int32_t pin_rdy; //SPI /RDY pin. -1 for not using
+ int32_t ppi_rdy_ch; //SPI /RDY ppi ready channel
+ int32_t gpiote_rdy_ch; //SPI /RDY pin ready channel
+} spi_slave_raw_trasp_cfg_t;
+
+/**@brief States of the SPI transaction state machine. */
+typedef enum
+{
+ SPI_RAW_STATE_UNKNOWN,
+ SPI_RAW_STATE_SETUP_HEADER,
+ SPI_RAW_STATE_RX_HEADER,
+ SPI_RAW_STATE_MEM_REQUESTED,
+ SPI_RAW_STATE_RX_PAYLOAD,
+ SPI_RAW_STATE_TX_HEADER,
+ SPI_RAW_STATE_TX_PAYLOAD,
+} trans_state_t;
+
+_static spi_slave_raw_trasp_cfg_t m_spi_slave_raw_config;
+
+_static uint16_t m_accumulated_rx_packet_length;
+_static uint16_t m_rx_packet_length;
+_static uint16_t m_current_rx_frame_length;
+
+_static uint16_t m_accumulated_tx_packet_length;
+_static uint16_t m_tx_packet_length;
+_static uint16_t m_current_tx_frame_length;
+
+_static uint8_t m_header_rx_buffer[SER_PHY_HEADER_SIZE + 1]; // + 1 for '0' guard in SPI_5W
+_static uint8_t m_header_tx_buffer[SER_PHY_HEADER_SIZE + 1]; // + 1 for '0' guard in SPI_5W
+
+_static uint8_t m_tx_frame_buffer[SER_PHY_SPI_5W_MTU_SIZE];
+_static uint8_t m_rx_frame_buffer[SER_PHY_SPI_5W_MTU_SIZE];
+_static uint8_t m_zero_buff[SER_PHY_SPI_5W_MTU_SIZE] = { 0 }; //ROM'able declaration - all guard bytes
+
+_static uint8_t * volatile m_p_rx_buffer = NULL;
+_static const uint8_t * volatile m_p_tx_buffer = NULL;
+
+_static bool m_trash_payload_flag;
+_static bool m_buffer_reqested_flag;
+
+_static trans_state_t m_trans_state = SPI_RAW_STATE_UNKNOWN;
+_static ser_phy_events_handler_t m_ser_phy_callback = NULL;
+
+static void spi_slave_raw_assert(bool cond)
+{
+ APP_ERROR_CHECK_BOOL(cond);
+}
+
+static void callback_ser_phy_event(ser_phy_evt_t event)
+{
+ if (m_ser_phy_callback)
+ {
+ m_ser_phy_callback(event);
+ }
+ return;
+}
+
+static void callback_memory_request(uint16_t size)
+{
+ ser_phy_evt_t event;
+
+ event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
+ event.evt_params.rx_buf_request.num_of_bytes = size;
+ callback_ser_phy_event(event);
+ return;
+
+}
+
+static void callback_packet_received(uint8_t * pBuffer, uint16_t size)
+{
+ ser_phy_evt_t event;
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
+ event.evt_params.rx_pkt_received.num_of_bytes = size;
+ event.evt_params.rx_pkt_received.p_buffer = pBuffer;
+ callback_ser_phy_event(event);
+ return;
+}
+
+static void callback_packet_dropped()
+{
+ ser_phy_evt_t event;
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
+ callback_ser_phy_event(event);
+ return;
+}
+
+static void callback_packet_transmitted(void)
+{
+ ser_phy_evt_t event;
+
+ event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
+ callback_ser_phy_event(event);
+ return;
+}
+
+static void copy_buff(uint8_t * const p_dest, uint8_t const * const p_src, uint16_t len)
+{
+ uint16_t index;
+
+ for (index = 0; index < len; index++)
+ {
+ p_dest[index] = p_src[index];
+ }
+ return;
+}
+
+/* Function computes current packet length */
+static uint16_t compute_current_frame_length(const uint16_t packet_length,
+ const uint16_t accumulated_packet_length)
+{
+ uint16_t current_packet_length = packet_length - accumulated_packet_length;
+
+ if (current_packet_length > SER_PHY_SPI_5W_MTU_SIZE)
+ {
+ current_packet_length = SER_PHY_SPI_5W_MTU_SIZE;
+ }
+
+ return current_packet_length;
+}
+
+static uint32_t header_get()
+{
+ uint32_t err_code;
+
+ err_code = nrf_drv_spis_buffers_set(&m_spis,
+ (uint8_t *) m_zero_buff,
+ SER_PHY_HEADER_SIZE,
+ m_header_rx_buffer,
+ SER_PHY_HEADER_SIZE);
+ return err_code;
+}
+
+static uint32_t frame_get()
+{
+ uint32_t err_code;
+
+ m_current_rx_frame_length = compute_current_frame_length(m_rx_packet_length,
+ m_accumulated_rx_packet_length);
+
+ if (!m_trash_payload_flag)
+ {
+ err_code =
+ nrf_drv_spis_buffers_set(&m_spis,
+ (uint8_t *) m_zero_buff,
+ m_current_rx_frame_length,
+ &(m_p_rx_buffer[m_accumulated_rx_packet_length]),
+ m_current_rx_frame_length);
+ }
+ else
+ {
+ err_code = nrf_drv_spis_buffers_set(&m_spis,
+ (uint8_t *) m_zero_buff,
+ m_current_rx_frame_length,
+ m_rx_frame_buffer,
+ m_current_rx_frame_length);
+ }
+ return err_code;
+}
+
+static uint32_t header_send(uint16_t len)
+{
+ uint32_t err_code;
+
+ m_header_tx_buffer[0] = (uint8_t) 0; //this is guard byte
+ (void)uint16_encode(len, &(m_header_tx_buffer[1]));
+ err_code = nrf_drv_spis_buffers_set(&m_spis,
+ m_header_tx_buffer,
+ SER_PHY_HEADER_SIZE + 1,
+ m_header_rx_buffer,
+ SER_PHY_HEADER_SIZE + 1);
+ return err_code;
+}
+
+static uint32_t frame_send()
+{
+ uint32_t err_code;
+
+ m_current_tx_frame_length = compute_current_frame_length(m_tx_packet_length,
+ m_accumulated_tx_packet_length);
+
+ if (m_current_tx_frame_length == SER_PHY_SPI_5W_MTU_SIZE)
+ {
+ m_current_tx_frame_length -= 1; //extra space for guard byte must be taken into account for MTU
+ }
+ m_tx_frame_buffer[0] = 0; //guard byte
+ copy_buff(&(m_tx_frame_buffer[1]),
+ &(m_p_tx_buffer[m_accumulated_tx_packet_length]),
+ m_current_tx_frame_length);
+ err_code = nrf_drv_spis_buffers_set(&m_spis,
+ m_tx_frame_buffer,
+ m_current_tx_frame_length + 1,
+ m_rx_frame_buffer,
+ m_current_tx_frame_length + 1);
+
+ return err_code;
+}
+
+static void set_ready_line(void)
+{
+#ifndef _SPI_5W_
+ //toggle - this should go high - but toggle is unsafe
+ uint32_t rdy_task = nrf_drv_gpiote_out_task_addr_get(m_spi_slave_raw_config.gpiote_rdy_ch);
+ *(uint32_t *)rdy_task = 1;
+#endif
+ return;
+}
+
+static void set_request_line(void)
+{
+ //active low logic - set is 0
+ nrf_gpio_pin_clear(m_spi_slave_raw_config.pin_req);
+ DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(0);
+ return;
+}
+
+static void clear_request_line(void)
+{
+ //active low logic - clear is 1
+ nrf_gpio_pin_set(m_spi_slave_raw_config.pin_req);
+ DEBUG_EVT_SPI_SLAVE_RAW_REQ_CLEARED(0);
+ return;
+}
+
+/**
+ * \brief Slave driver main state machine
+ * For UML graph, please refer to SDK documentation
+*/
+static void spi_slave_event_handle(nrf_drv_spis_event_t event)
+{
+ static uint32_t err_code = NRF_SUCCESS;
+ static uint16_t packetLength;
+
+ switch (m_trans_state)
+ {
+ case SPI_RAW_STATE_SETUP_HEADER:
+ m_trans_state = SPI_RAW_STATE_RX_HEADER;
+ err_code = header_get();
+ break;
+
+ case SPI_RAW_STATE_RX_HEADER:
+
+ if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
+ set_ready_line();
+ }
+
+ if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(event.rx_amount);
+ spi_slave_raw_assert(event.rx_amount == SER_PHY_HEADER_SIZE);
+ packetLength = uint16_decode(m_header_rx_buffer);
+
+ if (packetLength != 0 )
+ {
+ m_trans_state = SPI_RAW_STATE_MEM_REQUESTED;
+ m_buffer_reqested_flag = true;
+ m_rx_packet_length = packetLength;
+ callback_memory_request(packetLength);
+ }
+ else
+ {
+ if (m_p_tx_buffer)
+ {
+ clear_request_line();
+ m_trans_state = SPI_RAW_STATE_TX_HEADER;
+ err_code = header_send(m_tx_packet_length);
+ }
+ else
+ {
+ //there is nothing to send - zero response facilitates pooling - but perhaps, it should be assert
+ err_code = header_send(0);
+ }
+ }
+ }
+
+ break;
+
+ case SPI_RAW_STATE_MEM_REQUESTED:
+
+ if (event.evt_type == NRF_DRV_SPIS_EVT_TYPE_MAX) //This is API dummy event
+ {
+ m_buffer_reqested_flag = false;
+ m_trans_state = SPI_RAW_STATE_RX_PAYLOAD;
+ m_accumulated_rx_packet_length = 0;
+ err_code = frame_get();
+ }
+ break;
+
+
+ case SPI_RAW_STATE_RX_PAYLOAD:
+
+ if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
+ set_ready_line();
+ }
+
+ if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(event.rx_amount);
+ spi_slave_raw_assert(event.rx_amount == m_current_rx_frame_length);
+ m_accumulated_rx_packet_length += m_current_rx_frame_length;
+
+ if (m_accumulated_rx_packet_length < m_rx_packet_length )
+ {
+ err_code = frame_get();
+ }
+ else
+ {
+ spi_slave_raw_assert(m_accumulated_rx_packet_length == m_rx_packet_length);
+ m_trans_state = SPI_RAW_STATE_RX_HEADER;
+ err_code = header_get();
+
+ if (!m_trash_payload_flag)
+ {
+ callback_packet_received(m_p_rx_buffer, m_accumulated_rx_packet_length);
+ }
+ else
+ {
+ callback_packet_dropped();
+ }
+ }
+ }
+ break;
+
+ case SPI_RAW_STATE_TX_HEADER:
+
+ if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
+ set_ready_line();
+ }
+
+ if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(event.tx_amount);
+ spi_slave_raw_assert(event.tx_amount == SER_PHY_HEADER_SIZE + 1);
+ m_trans_state = SPI_RAW_STATE_TX_PAYLOAD;
+ m_accumulated_tx_packet_length = 0;
+ err_code = frame_send();
+ }
+
+ break;
+
+ case SPI_RAW_STATE_TX_PAYLOAD:
+
+ if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
+ set_ready_line();
+ }
+
+ if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(event.tx_amount);
+ spi_slave_raw_assert(event.tx_amount == m_current_tx_frame_length + 1);
+ m_accumulated_tx_packet_length += m_current_tx_frame_length;
+
+ if ( m_accumulated_tx_packet_length < m_tx_packet_length )
+ {
+ err_code = frame_send();
+ }
+ else
+ {
+ spi_slave_raw_assert(m_accumulated_tx_packet_length == m_tx_packet_length);
+ //clear pointer before callback
+ m_p_tx_buffer = NULL;
+ callback_packet_transmitted();
+ //spi slave TX transfer is possible only when RX is ready, so return to waiting for a header
+ m_trans_state = SPI_RAW_STATE_RX_HEADER;
+ err_code = header_get();
+ }
+ }
+ break;
+
+ default:
+ err_code = NRF_ERROR_INVALID_STATE;
+ break;
+ }
+ APP_ERROR_CHECK(err_code);
+}
+
+#ifndef _SPI_5W_
+static void spi_slave_gpiote_init(void)
+{
+ if (!nrf_drv_gpiote_is_init())
+ {
+ (void)nrf_drv_gpiote_init();
+ }
+ nrf_drv_gpiote_out_config_t config = GPIOTE_CONFIG_OUT_TASK_TOGGLE(true);
+ (void)nrf_drv_gpiote_out_init(m_spi_slave_raw_config.gpiote_rdy_ch, &config);
+ return;
+}
+
+static void spi_slave_ppi_init(void)
+{
+ uint32_t rdy_task = nrf_drv_gpiote_out_task_addr_get(m_spi_slave_raw_config.gpiote_rdy_ch);
+ //Configure PPI channel to clear /RDY line
+ NRF_PPI->CH[m_spi_slave_raw_config.ppi_rdy_ch].EEP = (uint32_t)(&NRF_SPIS1->EVENTS_END);
+ NRF_PPI->CH[m_spi_slave_raw_config.ppi_rdy_ch].TEP = rdy_task;
+
+ //this works only for channels 0..15 - but soft device is using 8-15 anyway
+ NRF_PPI->CHEN |= (1 << m_spi_slave_raw_config.ppi_rdy_ch);
+ return;
+}
+#endif
+
+static void spi_slave_gpio_init(void)
+{
+ nrf_gpio_pin_set(m_spi_slave_raw_config.pin_req);
+ nrf_gpio_cfg_output(m_spi_slave_raw_config.pin_req);
+#ifndef _SPI_5W_
+ nrf_gpio_pin_set(m_spi_slave_raw_config.pin_rdy);
+ nrf_gpio_cfg_output(m_spi_slave_raw_config.pin_rdy);
+#endif
+ return;
+}
+
+/* ser_phy API function */
+void ser_phy_interrupts_enable(void)
+{
+ NVIC_EnableIRQ(nrfx_get_irq_number(m_spis.p_reg));
+}
+
+/* ser_phy API function */
+void ser_phy_interrupts_disable(void)
+{
+ NVIC_DisableIRQ(nrfx_get_irq_number(m_spis.p_reg));
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
+{
+ uint32_t status = NRF_SUCCESS;
+ nrf_drv_spis_event_t event;
+
+ ser_phy_interrupts_disable();
+
+ if (m_buffer_reqested_flag && (m_trans_state == SPI_RAW_STATE_MEM_REQUESTED))
+ {
+ m_p_rx_buffer = p_buffer;
+
+ if (m_p_rx_buffer)
+ {
+ m_trash_payload_flag = false;
+ }
+ else
+ {
+ m_trash_payload_flag = true;
+ }
+
+ event.evt_type = NRF_DRV_SPIS_EVT_TYPE_MAX; //force transition with dummy event
+ event.rx_amount = 0;
+ event.tx_amount = 0;
+ spi_slave_event_handle(event);
+ }
+ else
+ {
+ status = NRF_ERROR_BUSY;
+ }
+ ser_phy_interrupts_enable();
+
+ return status;
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
+{
+ uint32_t status = NRF_SUCCESS;
+
+ if ( p_buffer == NULL || num_of_bytes == 0)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ ser_phy_interrupts_disable();
+
+ if ( m_p_tx_buffer == NULL)
+ {
+ m_tx_packet_length = num_of_bytes;
+ m_p_tx_buffer = p_buffer;
+ set_request_line();
+ }
+ else
+ {
+ status = NRF_ERROR_BUSY;
+ }
+ ser_phy_interrupts_enable();
+
+ return status;
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
+{
+ uint32_t err_code;
+ nrf_drv_spis_config_t spi_slave_config;
+ nrf_drv_spis_event_t event;
+
+ if (m_trans_state != SPI_RAW_STATE_UNKNOWN)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ //one ppi channel and one gpiote channel are used to drive RDY line
+ m_spi_slave_raw_config.pin_req = SER_PHY_SPI_SLAVE_REQ_PIN;
+ m_spi_slave_raw_config.pin_rdy = SER_PHY_SPI_SLAVE_RDY_PIN;
+ m_spi_slave_raw_config.ppi_rdy_ch = SER_PHY_SPI_PPI_RDY_CH;
+ m_spi_slave_raw_config.gpiote_rdy_ch = SER_PHY_SPI_GPIOTE_RDY_CH;
+
+ spi_slave_gpio_init();
+#ifndef _SPI_5W_
+ spi_slave_gpiote_init();
+ spi_slave_ppi_init();
+#endif
+
+ spi_slave_config.miso_pin = SER_CON_SPIS_MISO_PIN;
+ spi_slave_config.mosi_pin = SER_CON_SPIS_MOSI_PIN;
+ spi_slave_config.sck_pin = SER_CON_SPIS_SCK_PIN;
+ spi_slave_config.csn_pin = SER_CON_SPIS_CSN_PIN;
+ spi_slave_config.mode = NRF_DRV_SPIS_MODE_0;
+ spi_slave_config.bit_order = NRF_DRV_SPIS_BIT_ORDER_LSB_FIRST;
+ spi_slave_config.def = SER_PHY_SPI_DEF_CHARACTER;
+ spi_slave_config.orc = SER_PHY_SPI_ORC_CHARACTER;
+ spi_slave_config.csn_pullup = NRF_GPIO_PIN_PULLUP;
+ spi_slave_config.irq_priority = APP_IRQ_PRIORITY_LOWEST;
+
+ //keep /CS high when init
+ nrf_gpio_cfg_input(spi_slave_config.csn_pin, NRF_GPIO_PIN_PULLUP);
+
+ err_code = nrf_drv_spis_init(&m_spis, &spi_slave_config, spi_slave_event_handle);
+ APP_ERROR_CHECK(err_code);
+
+ if (err_code == NRF_SUCCESS)
+ {
+ m_ser_phy_callback = events_handler;
+
+ m_trans_state = SPI_RAW_STATE_SETUP_HEADER;
+ event.evt_type = NRF_DRV_SPIS_EVT_TYPE_MAX; //force transition for dummy event
+ event.rx_amount = 0;
+ event.tx_amount = 0;
+ spi_slave_event_handle(event);
+
+ }
+
+ return err_code;
+}
+
+/* ser_phy API function */
+void ser_phy_close(void)
+{
+ nrf_drv_spis_uninit(&m_spis);
+ m_ser_phy_callback = NULL;
+ m_trans_state = SPI_RAW_STATE_UNKNOWN;
+}
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_master.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_master.c
new file mode 100644
index 0000000..6065446
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_master.c
@@ -0,0 +1,804 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/**@file
+ *
+ * @defgroup ser_phy_spi_phy_driver_master ser_phy_nrf51_spi_master.c
+ * @{
+ * @ingroup ser_phy_spi_phy_driver_master
+ *
+ * @brief SPI_RAW PHY master driver.
+ */
+
+#include <stdio.h>
+#include "nrf_drv_gpiote.h"
+#include "nrf_drv_spi.h"
+#include "ser_phy.h"
+#include "ser_config.h"
+#include "app_util.h"
+#include "app_util_platform.h"
+#include "app_error.h"
+#include "nrf_error.h"
+#include "nrf_gpio.h"
+#include "nrf_gpiote.h"
+#include "boards.h"
+#include "app_error.h"
+#include "ser_phy_config_app.h"
+#include "ser_phy_debug_app.h"
+
+#define notUSE_PendSV
+
+#ifdef USE_PendSV
+
+#define SW_IRQn PendSV_IRQn
+#define SW_IRQ_Handler() PendSV_Handler()
+#define SET_Pend_SW_IRQ() SCB->ICSR = SCB->ICSR | SCB_ICSR_PENDSVSET_Msk //NVIC_SetPendingIRQ(PendSV_IRQn) - PendSV_IRQn is a negative - does not work with CMSIS
+
+#else
+
+#define SW_IRQn SWI3_IRQn
+#define SW_IRQ_Handler() SWI3_IRQHandler()
+#define SET_Pend_SW_IRQ() NVIC_SetPendingIRQ(SWI3_IRQn)
+#endif /* USE_PendSV */
+
+typedef enum
+{
+ SER_PHY_STATE_IDLE = 0,
+ SER_PHY_STATE_TX_HEADER,
+ SER_PHY_STATE_TX_WAIT_FOR_RDY,
+ SER_PHY_STATE_TX_PAYLOAD,
+ SER_PHY_STATE_RX_WAIT_FOR_RDY,
+ SER_PHY_STATE_TX_ZERO_HEADER,
+ SER_PHY_STATE_RX_HEADER,
+ SER_PHY_STATE_MEMORY_REQUEST,
+ SER_PHY_STATE_RX_PAYLOAD,
+ SER_PHY_STATE_DISABLED
+} ser_phy_spi_master_state_t;
+
+typedef enum
+{
+ SER_PHY_EVT_GPIO_RDY = 0,
+ SER_PHY_EVT_GPIO_REQ,
+ SER_PHY_EVT_SPI_TRANSFER_DONE,
+ SER_PHY_EVT_TX_API_CALL,
+ SER_PHY_EVT_RX_API_CALL
+} ser_phy_event_source_t;
+
+#define _static static
+
+_static uint8_t * mp_tx_buffer = NULL;
+_static uint16_t m_tx_buf_len = 0;
+
+_static uint8_t * mp_rx_buffer = NULL;
+_static uint16_t m_rx_buf_len = 0;
+_static uint8_t m_frame_buffer[SER_PHY_SPI_MTU_SIZE];
+_static uint8_t m_header_buffer[SER_PHY_HEADER_SIZE] = { 0 };
+
+_static uint16_t m_tx_packet_length = 0;
+_static uint16_t m_accumulated_tx_packet_length = 0;
+_static uint16_t m_current_tx_packet_length = 0;
+
+_static uint16_t m_rx_packet_length = 0;
+_static uint16_t m_accumulated_rx_packet_length = 0;
+_static uint16_t m_current_rx_packet_length = 0;
+
+_static volatile bool m_pend_req_flag = 0;
+_static volatile bool m_pend_rdy_flag = 0;
+_static volatile bool m_pend_xfer_flag = 0;
+_static volatile bool m_pend_rx_api_flag = 0;
+_static volatile bool m_pend_tx_api_flag = 0;
+
+_static volatile bool m_slave_ready_flag = false;
+_static volatile bool m_slave_request_flag = false;
+
+_static ser_phy_events_handler_t m_callback_events_handler = NULL;
+_static ser_phy_spi_master_state_t m_spi_master_state = SER_PHY_STATE_DISABLED;
+
+_static const nrf_drv_spi_t m_spi_master = SER_PHY_SPI_MASTER_INSTANCE;
+
+static void ser_phy_switch_state(ser_phy_event_source_t evt_src);
+
+static void spi_master_raw_assert(bool cond)
+{
+ APP_ERROR_CHECK_BOOL(cond);
+}
+
+void SW_IRQ_Handler()
+{
+ if (m_pend_req_flag)
+ {
+ m_pend_req_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_REQUEST(0);
+ ser_phy_switch_state(SER_PHY_EVT_GPIO_REQ);
+ }
+
+ if (m_pend_rdy_flag)
+ {
+ m_pend_rdy_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_READY(0);
+ ser_phy_switch_state(SER_PHY_EVT_GPIO_RDY);
+ }
+
+ if (m_pend_xfer_flag)
+ {
+ m_pend_xfer_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_XFER_DONE(0);
+ ser_phy_switch_state(SER_PHY_EVT_SPI_TRANSFER_DONE);
+ }
+
+ if (m_pend_rx_api_flag)
+ {
+ m_pend_rx_api_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_API_CALL(0);
+ ser_phy_switch_state(SER_PHY_EVT_RX_API_CALL);
+ }
+
+ if (m_pend_tx_api_flag)
+ {
+ m_pend_tx_api_flag = false;
+ DEBUG_EVT_SPI_MASTER_RAW_API_CALL(0);
+ ser_phy_switch_state(SER_PHY_EVT_TX_API_CALL);
+ }
+}
+
+static void ser_phy_spi_master_ready(nrf_drv_gpiote_pin_t pin,
+ nrf_gpiote_polarity_t action)
+{
+ if (nrf_gpio_pin_read(pin) == 0)
+ {
+ m_slave_ready_flag = true;
+ m_pend_rdy_flag = true;
+ }
+ else
+ {
+ m_slave_ready_flag = false;
+ }
+
+ DEBUG_EVT_SPI_MASTER_RAW_READY_EDGE((uint32_t) !m_slave_ready_flag);
+ SET_Pend_SW_IRQ();
+}
+
+static void ser_phy_spi_master_request(nrf_drv_gpiote_pin_t pin,
+ nrf_gpiote_polarity_t action)
+{
+ if (nrf_gpio_pin_read(pin) == 0)
+ {
+ m_slave_request_flag = true;
+ m_pend_req_flag = true;
+ }
+ else
+ {
+ m_slave_request_flag = false;
+ }
+
+ DEBUG_EVT_SPI_MASTER_RAW_REQUEST_EDGE((uint32_t) !m_slave_request_flag);
+ SET_Pend_SW_IRQ();
+}
+
+/* Send event SER_PHY_EVT_TX_PKT_SENT */
+static __INLINE void callback_packet_sent()
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_SPI_MASTER_PHY_TX_PKT_SENT(0);
+
+ event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
+ m_callback_events_handler(event);
+}
+
+/* Send event SER_PHY_EVT_RX_PKT_DROPPED */
+static __INLINE void callback_packet_dropped()
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_DROPPED(0);
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
+ m_callback_events_handler(event);
+}
+
+/* Send event SER_PHY_EVT_RX_PKT_RECEIVED */
+static __INLINE void callback_packet_received()
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_SPI_MASTER_PHY_RX_PKT_RECEIVED(0);
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
+ event.evt_params.rx_pkt_received.p_buffer = mp_rx_buffer;
+ event.evt_params.rx_pkt_received.num_of_bytes = m_rx_buf_len;
+ m_callback_events_handler(event);
+}
+
+/* Send event SER_PHY_EVT_RX_BUF_REQUEST */
+static __INLINE void callback_mem_request()
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_SPI_MASTER_PHY_BUF_REQUEST(0);
+
+ event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
+ event.evt_params.rx_buf_request.num_of_bytes = m_rx_buf_len;
+ m_callback_events_handler(event);
+}
+
+/* Release buffer */
+static __INLINE void buffer_release(uint8_t * * const pp_buffer,
+ uint16_t * const p_buf_len)
+{
+ *pp_buffer = NULL;
+ *p_buf_len = 0;
+}
+
+/* Function computes current packet length */
+static uint16_t compute_current_packet_length(const uint16_t packet_length,
+ const uint16_t accumulated_packet_length)
+{
+ uint16_t current_packet_length = packet_length - accumulated_packet_length;
+
+ if (current_packet_length > SER_PHY_SPI_MTU_SIZE)
+ {
+ current_packet_length = SER_PHY_SPI_MTU_SIZE;
+ }
+
+ return current_packet_length;
+}
+
+static __INLINE uint32_t header_send(const uint16_t length)
+{
+ uint8_t buf_len_size = uint16_encode(length, m_header_buffer);
+
+ return nrf_drv_spi_transfer(&m_spi_master, m_header_buffer, buf_len_size, NULL, 0);
+}
+
+
+static __INLINE uint32_t frame_send()
+{
+ uint32_t err_code;
+
+ m_current_tx_packet_length = compute_current_packet_length(m_tx_packet_length,
+ m_accumulated_tx_packet_length);
+ err_code =
+ nrf_drv_spi_transfer(&m_spi_master,
+ &mp_tx_buffer[m_accumulated_tx_packet_length],
+ m_current_tx_packet_length,
+ NULL,
+ 0);
+ m_accumulated_tx_packet_length += m_current_tx_packet_length;
+ return err_code;
+}
+
+static __INLINE uint32_t header_get()
+{
+ return nrf_drv_spi_transfer(&m_spi_master, NULL, 0, m_header_buffer, SER_PHY_HEADER_SIZE);
+}
+
+static __INLINE uint32_t frame_get()
+{
+ uint32_t err_code;
+
+ m_current_rx_packet_length = compute_current_packet_length(m_rx_packet_length,
+ m_accumulated_rx_packet_length);
+
+ if (mp_rx_buffer)
+ {
+ err_code = nrf_drv_spi_transfer(&m_spi_master,
+ NULL,
+ 0,
+ &(mp_rx_buffer[m_accumulated_rx_packet_length]),
+ m_current_rx_packet_length);
+ }
+ else
+ {
+ err_code = nrf_drv_spi_transfer(&m_spi_master,
+ NULL,
+ 0,
+ m_frame_buffer,
+ m_current_rx_packet_length);
+ }
+ return err_code;
+}
+
+
+/**
+ * \brief Master driver main state machine
+ * Executed only in the context of PendSV_Handler()
+ * For UML graph, please refer to SDK documentation
+*/
+static void ser_phy_switch_state(ser_phy_event_source_t evt_src)
+{
+ uint32_t err_code = NRF_SUCCESS;
+ static bool m_wait_for_ready_flag = false; //local scheduling flag to defer RDY events
+
+ switch (m_spi_master_state)
+ {
+
+ case SER_PHY_STATE_IDLE:
+
+ if (evt_src == SER_PHY_EVT_GPIO_REQ)
+ {
+ m_wait_for_ready_flag = false;
+
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
+ err_code = header_send(0);
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
+ }
+ }
+ else if (evt_src == SER_PHY_EVT_TX_API_CALL)
+ {
+ spi_master_raw_assert(mp_tx_buffer != NULL); //api event with tx_buffer == NULL has no sense
+ m_wait_for_ready_flag = false;
+
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_HEADER;
+ err_code = header_send(m_tx_buf_len);
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_WAIT_FOR_RDY;
+ }
+ }
+ break;
+
+ case SER_PHY_STATE_TX_WAIT_FOR_RDY:
+
+ if (evt_src == SER_PHY_EVT_GPIO_RDY)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_HEADER;
+ err_code = header_send(m_tx_buf_len);
+ }
+ break;
+
+ case SER_PHY_STATE_RX_WAIT_FOR_RDY:
+
+ if (evt_src == SER_PHY_EVT_GPIO_RDY)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
+ err_code = header_send(0);
+
+ }
+ break;
+
+ case SER_PHY_STATE_TX_HEADER:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ m_tx_packet_length = m_tx_buf_len;
+ m_accumulated_tx_packet_length = 0;
+
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_PAYLOAD;
+ err_code = frame_send();
+
+ }
+ else
+ {
+ m_wait_for_ready_flag = true;
+ }
+ }
+ else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_wait_for_ready_flag)
+ {
+ m_wait_for_ready_flag = false;
+ m_spi_master_state = SER_PHY_STATE_TX_PAYLOAD;
+ err_code = frame_send();
+ }
+
+ break;
+
+ case SER_PHY_STATE_TX_PAYLOAD:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ if (m_accumulated_tx_packet_length < m_tx_packet_length)
+ {
+ if (m_slave_ready_flag)
+ {
+ err_code = frame_send();
+ }
+ else
+ {
+ m_wait_for_ready_flag = true;
+ }
+ }
+ else
+ {
+ spi_master_raw_assert(m_accumulated_tx_packet_length == m_tx_packet_length);
+ buffer_release(&mp_tx_buffer, &m_tx_buf_len);
+ callback_packet_sent();
+ if ( m_slave_request_flag)
+ {
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
+ err_code = header_send(0);
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
+ }
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_IDLE; //m_Tx_buffer is NULL - have to wait for API event
+ }
+ }
+ }
+ else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_wait_for_ready_flag )
+ {
+ m_wait_for_ready_flag = false;
+ err_code = frame_send();
+ }
+
+ break;
+
+ case SER_PHY_STATE_TX_ZERO_HEADER:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_HEADER;
+ err_code = header_get();
+ }
+ else
+ {
+ m_wait_for_ready_flag = true;
+ }
+ }
+ else if ( (evt_src == SER_PHY_EVT_GPIO_RDY) && m_wait_for_ready_flag)
+ {
+ m_wait_for_ready_flag = false;
+ m_spi_master_state = SER_PHY_STATE_RX_HEADER;
+ err_code = header_get();
+ }
+ break;
+
+ case SER_PHY_STATE_RX_HEADER:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ m_spi_master_state = SER_PHY_STATE_MEMORY_REQUEST;
+ m_rx_buf_len = uint16_decode(m_header_buffer);
+ m_rx_packet_length = m_rx_buf_len;
+ callback_mem_request();
+
+ }
+ break;
+
+ case SER_PHY_STATE_MEMORY_REQUEST:
+
+ if (evt_src == SER_PHY_EVT_RX_API_CALL)
+ {
+ m_accumulated_rx_packet_length = 0;
+
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_PAYLOAD;
+ err_code = frame_get();
+ }
+ else
+ {
+ m_wait_for_ready_flag = true;
+ }
+ }
+ else if ((evt_src == SER_PHY_EVT_GPIO_RDY) && m_wait_for_ready_flag)
+ {
+ m_wait_for_ready_flag = false;
+ m_spi_master_state = SER_PHY_STATE_RX_PAYLOAD;
+ err_code = frame_get();
+ }
+ break;
+
+ case SER_PHY_STATE_RX_PAYLOAD:
+
+ if (evt_src == SER_PHY_EVT_SPI_TRANSFER_DONE)
+ {
+ m_accumulated_rx_packet_length += m_current_rx_packet_length;
+
+ if (m_accumulated_rx_packet_length < m_rx_packet_length)
+ {
+ if (m_slave_ready_flag)
+ {
+ err_code = frame_get();
+ }
+ else
+ {
+ m_wait_for_ready_flag = true;
+ }
+ }
+ else
+ {
+ spi_master_raw_assert(m_accumulated_rx_packet_length == m_rx_packet_length);
+
+ if (mp_rx_buffer == NULL)
+ {
+ callback_packet_dropped();
+ }
+ else
+ {
+ callback_packet_received();
+ }
+ buffer_release(&mp_rx_buffer, &m_rx_buf_len);
+ if (mp_tx_buffer != NULL) //mp_tx_buffer !=NULL, this means that API_EVT was scheduled
+ {
+ if (m_slave_ready_flag )
+ {
+ err_code = header_send(m_tx_buf_len);
+ m_spi_master_state = SER_PHY_STATE_TX_HEADER;
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_WAIT_FOR_RDY;
+ }
+ }
+ else if (m_slave_request_flag)
+ {
+ if (m_slave_ready_flag)
+ {
+ m_spi_master_state = SER_PHY_STATE_TX_ZERO_HEADER;
+ err_code = header_send(0);
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_RX_WAIT_FOR_RDY;
+ }
+ }
+ else
+ {
+ m_spi_master_state = SER_PHY_STATE_IDLE;
+
+ }
+ }
+
+ }
+ else if ( evt_src == SER_PHY_EVT_GPIO_RDY && m_wait_for_ready_flag)
+ {
+ m_wait_for_ready_flag = false;
+ err_code = frame_get();
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (err_code != NRF_SUCCESS)
+ {
+ (void)err_code;
+ }
+}
+
+static void ser_phy_spi_master_event_handler(nrf_drv_spi_evt_t const * p_event,
+ void * p_context)
+{
+ switch (p_event->type)
+ {
+ case NRF_DRV_SPI_EVENT_DONE:
+
+ /* Switch state */
+ m_pend_xfer_flag = true;
+ SET_Pend_SW_IRQ();
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void ser_phy_init_PendSV(void)
+{
+ NVIC_SetPriority(SW_IRQn, APP_IRQ_PRIORITY_MID);
+ NVIC_EnableIRQ(SW_IRQn);
+}
+
+static ret_code_t ser_phy_init_gpiote(void)
+{
+ if (!nrf_drv_gpiote_is_init())
+ {
+ (void)nrf_drv_gpiote_init();
+ }
+ NVIC_SetPriority(GPIOTE_IRQn, APP_IRQ_PRIORITY_HIGH);
+
+ nrf_drv_gpiote_in_config_t config = GPIOTE_CONFIG_IN_SENSE_TOGGLE(true);
+ /* Enable pullup to ensure high state while connectivity device is reset */
+ config.pull = NRF_GPIO_PIN_PULLUP;
+ ret_code_t err_code = nrf_drv_gpiote_in_init(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST,
+ &config, ser_phy_spi_master_request);
+ if (err_code != NRF_SUCCESS)
+ {
+ return err_code;
+ }
+ nrf_drv_gpiote_in_event_enable(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST,true);
+
+ err_code = nrf_drv_gpiote_in_init(SER_PHY_SPI_MASTER_PIN_SLAVE_READY,
+ &config, ser_phy_spi_master_ready);
+ if (err_code != NRF_SUCCESS)
+ {
+ return err_code;
+ }
+ nrf_drv_gpiote_in_event_enable(SER_PHY_SPI_MASTER_PIN_SLAVE_READY,true);
+
+ m_slave_request_flag = !(nrf_gpio_pin_read(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST));
+ m_slave_ready_flag = !(nrf_gpio_pin_read(SER_PHY_SPI_MASTER_PIN_SLAVE_READY));
+
+ NVIC_ClearPendingIRQ(SW_IRQn);
+
+ return NRF_SUCCESS;
+}
+
+static void ser_phy_deinit_gpiote(void)
+{
+ nrf_drv_gpiote_in_uninit(SER_PHY_SPI_MASTER_PIN_SLAVE_REQUEST);
+ nrf_drv_gpiote_in_uninit(SER_PHY_SPI_MASTER_PIN_SLAVE_READY);
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
+{
+ if (p_buffer == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ if (num_of_bytes == 0)
+ {
+ return NRF_ERROR_INVALID_PARAM;
+ }
+
+ if (mp_tx_buffer != NULL)
+ {
+ return NRF_ERROR_BUSY;
+ }
+
+ //ser_phy_interrupts_disable();
+ CRITICAL_REGION_ENTER();
+ mp_tx_buffer = (uint8_t *)p_buffer;
+ m_tx_buf_len = num_of_bytes;
+ m_pend_tx_api_flag = true;
+ SET_Pend_SW_IRQ();
+ //ser_phy_interrupts_enable();
+ CRITICAL_REGION_EXIT();
+
+ return NRF_SUCCESS;
+}
+/* ser_phy API function */
+uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
+{
+ if (m_spi_master_state != SER_PHY_STATE_MEMORY_REQUEST)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ //ser_phy_interrupts_disable();
+ CRITICAL_REGION_ENTER();
+ mp_rx_buffer = p_buffer;
+ m_pend_rx_api_flag = true;
+ SET_Pend_SW_IRQ();
+ //ser_phy_interrupts_enable();
+ CRITICAL_REGION_EXIT();
+
+ return NRF_SUCCESS;
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
+{
+ if (m_spi_master_state != SER_PHY_STATE_DISABLED)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ uint32_t err_code = NRF_SUCCESS;
+
+ m_spi_master_state = SER_PHY_STATE_IDLE;
+ m_callback_events_handler = events_handler;
+ nrf_drv_spi_config_t spi_master_config = {
+ .sck_pin = SER_PHY_SPI_MASTER_PIN_SCK,
+ .mosi_pin = SER_PHY_SPI_MASTER_PIN_MOSI,
+ .miso_pin = SER_PHY_SPI_MASTER_PIN_MISO,
+ .ss_pin = SER_PHY_SPI_MASTER_PIN_SLAVE_SELECT,
+ .irq_priority = APP_IRQ_PRIORITY_MID,
+ .orc = 0,
+ .frequency = SER_PHY_SPI_FREQUENCY,
+ .mode = NRF_DRV_SPI_MODE_0,
+ .bit_order = NRF_DRV_SPI_BIT_ORDER_LSB_FIRST,
+ };
+ err_code = nrf_drv_spi_init(&m_spi_master,
+ &spi_master_config,
+ ser_phy_spi_master_event_handler,
+ NULL);
+ if (err_code != NRF_SUCCESS)
+ {
+ return err_code;
+ }
+
+ err_code = ser_phy_init_gpiote();
+ ser_phy_init_PendSV();
+ return err_code;
+}
+
+/* ser_phy API function */
+void ser_phy_close(void)
+{
+ m_spi_master_state = SER_PHY_STATE_DISABLED;
+
+ m_callback_events_handler = NULL;
+
+ buffer_release(&mp_tx_buffer, &m_tx_buf_len);
+ buffer_release(&mp_rx_buffer, &m_rx_buf_len);
+
+ m_tx_packet_length = 0;
+ m_accumulated_tx_packet_length = 0;
+ m_current_tx_packet_length = 0;
+
+ m_rx_packet_length = 0;
+ m_accumulated_rx_packet_length = 0;
+ m_current_rx_packet_length = 0;
+
+ ser_phy_deinit_gpiote();
+ nrf_drv_spi_uninit(&m_spi_master);
+}
+
+/* ser_phy API function */
+/* only PendSV may interact with ser_phy layer, other interrupts are internal */
+void ser_phy_interrupts_enable(void)
+{
+ NVIC_EnableIRQ(SW_IRQn);
+}
+
+/* ser_phy API function */
+void ser_phy_interrupts_disable(void)
+{
+ NVIC_DisableIRQ(SW_IRQn);
+}
+
+/** @} */
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_slave.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_slave.c
new file mode 100644
index 0000000..cde0250
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_spi_slave.c
@@ -0,0 +1,613 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/**@file
+ *
+ * @defgroup ser_phy_spi_phy_driver_slave ser_phy_nrf51_spi_slave.c
+ * @{
+ * @ingroup ser_phy_spi_phy_driver_slave
+ *
+ * @brief SPI_RAW PHY slave driver.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include "app_error.h"
+#include "app_util.h"
+#include "boards.h"
+#include "nrf_gpio.h"
+#include "nrf_drv_gpiote.h"
+#include "nrf_soc.h"
+#include "nrf_drv_spis.h"
+#include "ser_config.h"
+#include "ser_phy.h"
+#include "ser_phy_config_conn.h"
+#include "ser_phy_debug_conn.h"
+
+#define SER_PHY_SPI_DEF_CHARACTER 0xFF //SPI default character. Character clocked out in case of an ignored transaction
+#define SER_PHY_SPI_ORC_CHARACTER 0xFF //SPI over-read character. Character clocked out after an over-read of the transmit buffer
+
+static nrf_drv_spis_t m_spis = NRF_DRV_SPIS_INSTANCE(SER_PHY_SPI_SLAVE_INSTANCE);
+
+#ifdef NRF_SPIS0
+#define SPI_SLAVE_REG NRF_SPIS0
+#else
+#define SPI_SLAVE_REG NRF_SPIS1
+#endif
+
+//SPI raw peripheral device configuration data
+typedef struct
+{
+ int32_t pin_req; //SPI /REQ pin. -1 for not using
+ int32_t pin_rdy; //SPI /RDY pin. -1 for not using
+ int32_t ppi_rdy_ch; //SPI /RDY ppi ready channel
+ int32_t gpiote_rdy_ch; //SPI /RDY pin ready channel
+} spi_slave_raw_trasp_cfg_t;
+
+/**@brief States of the SPI transaction state machine. */
+typedef enum
+{
+ SPI_RAW_STATE_UNKNOWN,
+ SPI_RAW_STATE_SETUP_HEADER,
+ SPI_RAW_STATE_RX_HEADER,
+ SPI_RAW_STATE_MEM_REQUESTED,
+ SPI_RAW_STATE_RX_PAYLOAD,
+ SPI_RAW_STATE_TX_HEADER,
+ SPI_RAW_STATE_TX_PAYLOAD,
+} trans_state_t;
+
+#define _static static
+
+static spi_slave_raw_trasp_cfg_t m_spi_slave_raw_config;
+
+_static uint16_t m_accumulated_rx_packet_length;
+_static uint16_t m_rx_packet_length;
+_static uint16_t m_current_rx_frame_length;
+
+_static uint16_t m_accumulated_tx_packet_length;
+_static uint16_t m_tx_packet_length;
+_static uint16_t m_current_tx_frame_length;
+
+_static uint8_t m_header_rx_buffer[SER_PHY_HEADER_SIZE];
+_static uint8_t m_header_tx_buffer[SER_PHY_HEADER_SIZE];
+
+_static uint8_t m_frame_buffer[SER_PHY_SPI_MTU_SIZE]; //trash storage
+_static uint8_t m_zero_buffer[SER_PHY_SPI_MTU_SIZE] = { 0 }; //ROM'able declaration
+
+_static uint8_t * volatile m_p_rx_buffer = NULL;
+_static const uint8_t * volatile m_p_tx_buffer = NULL;
+
+_static bool m_trash_payload_flag;
+_static bool m_buffer_reqested_flag;
+
+_static trans_state_t m_trans_state = SPI_RAW_STATE_UNKNOWN;
+_static ser_phy_events_handler_t m_ser_phy_callback = NULL;
+
+static void spi_slave_raw_assert(bool cond)
+{
+ APP_ERROR_CHECK_BOOL(cond);
+}
+
+static void callback_ser_phy_event(ser_phy_evt_t event)
+{
+ if (m_ser_phy_callback)
+ {
+ m_ser_phy_callback(event);
+ }
+}
+
+static void callback_memory_request(uint16_t size)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_SPI_SLAVE_PHY_BUF_REQUEST(0);
+
+ event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
+ event.evt_params.rx_buf_request.num_of_bytes = size;
+ callback_ser_phy_event(event);
+}
+
+static void callback_packet_received(uint8_t * pBuffer, uint16_t size)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_SPI_SLAVE_PHY_PKT_RECEIVED(0);
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
+ event.evt_params.rx_pkt_received.num_of_bytes = size;
+ event.evt_params.rx_pkt_received.p_buffer = pBuffer;
+ callback_ser_phy_event(event);
+}
+
+static void callback_packet_dropped()
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_SPI_SLAVE_PHY_PKT_DROPPED(0);
+
+ event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
+ callback_ser_phy_event(event);
+}
+
+static void callback_packet_transmitted(void)
+{
+ ser_phy_evt_t event;
+
+ DEBUG_EVT_SPI_SLAVE_PHY_PKT_SENT(0);
+
+ event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
+ callback_ser_phy_event(event);
+}
+
+/* Function computes current packet length */
+static uint16_t compute_current_frame_length(const uint16_t packet_length,
+ const uint16_t accumulated_packet_length)
+{
+ uint16_t current_packet_length = packet_length - accumulated_packet_length;
+
+ if (current_packet_length > SER_PHY_SPI_MTU_SIZE)
+ {
+ current_packet_length = SER_PHY_SPI_MTU_SIZE;
+ }
+
+ return current_packet_length;
+}
+
+static uint32_t header_get()
+{
+ uint32_t err_code;
+
+ err_code = nrf_drv_spis_buffers_set(&m_spis,
+ (uint8_t *) m_zero_buffer,
+ SER_PHY_HEADER_SIZE,
+ m_header_rx_buffer,
+ SER_PHY_HEADER_SIZE);
+ return err_code;
+}
+
+static uint32_t frame_get()
+{
+ uint32_t err_code;
+
+ m_current_rx_frame_length = compute_current_frame_length(m_rx_packet_length,
+ m_accumulated_rx_packet_length);
+
+ if (!m_trash_payload_flag)
+ {
+ err_code =
+ nrf_drv_spis_buffers_set(&m_spis,
+ (uint8_t *) m_zero_buffer,
+ m_current_rx_frame_length,
+ &(m_p_rx_buffer[m_accumulated_rx_packet_length]),
+ m_current_rx_frame_length);
+ }
+ else
+ {
+ err_code = nrf_drv_spis_buffers_set(&m_spis,
+ (uint8_t *) m_zero_buffer,
+ m_current_rx_frame_length,
+ m_frame_buffer,
+ m_current_rx_frame_length);
+ }
+ return err_code;
+}
+
+static uint32_t header_send(uint16_t len)
+{
+ uint32_t err_code;
+
+ (void) uint16_encode(len, m_header_tx_buffer);
+ err_code =
+ nrf_drv_spis_buffers_set(&m_spis,
+ m_header_tx_buffer,
+ sizeof (m_header_tx_buffer),
+ m_header_rx_buffer,
+ sizeof (m_header_tx_buffer));
+ return err_code;
+}
+
+static uint32_t frame_send()
+{
+ uint32_t err_code;
+
+ m_current_tx_frame_length = compute_current_frame_length(m_tx_packet_length,
+ m_accumulated_tx_packet_length);
+ err_code =
+ nrf_drv_spis_buffers_set(&m_spis,
+ (uint8_t *) &(m_p_tx_buffer[m_accumulated_tx_packet_length]),
+ m_current_tx_frame_length,
+ m_frame_buffer,
+ m_current_tx_frame_length);
+ return err_code;
+}
+
+static void set_ready_line(void)
+{
+ //toggle - this should go high - but toggle is unsafe
+ uint32_t rdy_task = nrf_drv_gpiote_out_task_addr_get(m_spi_slave_raw_config.pin_rdy);
+ *(uint32_t *)rdy_task = 1;
+ return;
+}
+
+static void set_request_line(void)
+{
+ //active low logic - set is 0
+ nrf_gpio_pin_clear(m_spi_slave_raw_config.pin_req);
+ DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(0);
+}
+
+static void clear_request_line(void)
+{
+ //active low logic - clear is 1
+ nrf_gpio_pin_set(m_spi_slave_raw_config.pin_req);
+ DEBUG_EVT_SPI_SLAVE_RAW_REQ_SET(0);
+}
+
+/**
+ * \brief Slave driver main state machine
+ * For UML graph, please refer to SDK documentation
+*/
+static void spi_slave_event_handle(nrf_drv_spis_event_t event)
+{
+ uint32_t err_code = NRF_SUCCESS;
+ static uint16_t packetLength;
+
+ switch (m_trans_state)
+ {
+ case SPI_RAW_STATE_SETUP_HEADER:
+ m_trans_state = SPI_RAW_STATE_RX_HEADER;
+ err_code = header_get();
+ break;
+
+ case SPI_RAW_STATE_RX_HEADER:
+
+ if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
+ set_ready_line();
+ }
+
+ if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(event.rx_amount);
+ spi_slave_raw_assert(event.rx_amount == SER_PHY_HEADER_SIZE);
+ packetLength = uint16_decode(m_header_rx_buffer);
+
+ if (packetLength != 0 )
+ {
+ m_trans_state = SPI_RAW_STATE_MEM_REQUESTED;
+ m_buffer_reqested_flag = true;
+ m_rx_packet_length = packetLength;
+ callback_memory_request(packetLength);
+ }
+ else
+ {
+ if (m_p_tx_buffer)
+ {
+ clear_request_line();
+ m_trans_state = SPI_RAW_STATE_TX_HEADER;
+ err_code = header_send(m_tx_packet_length);
+ }
+ else
+ {
+ //there is nothing to send - zero response facilitates pooling - but perhaps, it should be assert
+ err_code = header_send(0);
+ }
+ }
+ }
+
+ break;
+
+ case SPI_RAW_STATE_MEM_REQUESTED:
+
+ if (event.evt_type == NRF_DRV_SPIS_EVT_TYPE_MAX) //This is API dummy event
+ {
+ m_buffer_reqested_flag = false;
+ m_trans_state = SPI_RAW_STATE_RX_PAYLOAD;
+ m_accumulated_rx_packet_length = 0;
+ err_code = frame_get();
+ }
+ break;
+
+ case SPI_RAW_STATE_RX_PAYLOAD:
+
+ if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
+ set_ready_line();
+ }
+
+ if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_RX_XFER_DONE(event.rx_amount);
+ spi_slave_raw_assert(event.rx_amount == m_current_rx_frame_length);
+ m_accumulated_rx_packet_length += m_current_rx_frame_length;
+
+ if (m_accumulated_rx_packet_length < m_rx_packet_length )
+ {
+ err_code = frame_get();
+ }
+ else
+ {
+ spi_slave_raw_assert(m_accumulated_rx_packet_length == m_rx_packet_length);
+ m_trans_state = SPI_RAW_STATE_RX_HEADER;
+ err_code = header_get();
+
+ if (!m_trash_payload_flag)
+ {
+ callback_packet_received(m_p_rx_buffer, m_accumulated_rx_packet_length);
+ }
+ else
+ {
+ callback_packet_dropped();
+ }
+ }
+ }
+ break;
+
+ case SPI_RAW_STATE_TX_HEADER:
+
+ if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
+ set_ready_line();
+ }
+
+ if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(event.tx_amount);
+ spi_slave_raw_assert(event.tx_amount == SER_PHY_HEADER_SIZE);
+ m_trans_state = SPI_RAW_STATE_TX_PAYLOAD;
+ m_accumulated_tx_packet_length = 0;
+ err_code = frame_send();
+ }
+
+ break;
+
+ case SPI_RAW_STATE_TX_PAYLOAD:
+
+ if (event.evt_type == NRF_DRV_SPIS_BUFFERS_SET_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_BUFFERS_SET(0);
+ set_ready_line();
+ }
+
+ if (event.evt_type == NRF_DRV_SPIS_XFER_DONE)
+ {
+ DEBUG_EVT_SPI_SLAVE_RAW_TX_XFER_DONE(event.tx_amount);
+ spi_slave_raw_assert(event.tx_amount == m_current_tx_frame_length);
+ m_accumulated_tx_packet_length += m_current_tx_frame_length;
+
+ if ( m_accumulated_tx_packet_length < m_tx_packet_length )
+ {
+ err_code = frame_send();
+ }
+ else
+ {
+ spi_slave_raw_assert(m_accumulated_tx_packet_length == m_tx_packet_length);
+ //clear pointer before callback
+ m_p_tx_buffer = NULL;
+ callback_packet_transmitted();
+ //spi slave TX transfer is possible only when RX is ready, so return to waiting for a header
+ m_trans_state = SPI_RAW_STATE_RX_HEADER;
+ err_code = header_get();
+ }
+ }
+ break;
+
+ default:
+ err_code = NRF_ERROR_INVALID_STATE;
+ break;
+ }
+ APP_ERROR_CHECK(err_code);
+}
+
+static void spi_slave_gpiote_init(void)
+{
+ if (!nrf_drv_gpiote_is_init())
+ {
+ (void)nrf_drv_gpiote_init();
+ }
+ nrf_drv_gpiote_out_config_t config = GPIOTE_CONFIG_OUT_TASK_TOGGLE(true);
+ (void) nrf_drv_gpiote_out_init(m_spi_slave_raw_config.pin_rdy, &config);
+ (void) nrf_drv_gpiote_out_task_enable(m_spi_slave_raw_config.pin_rdy);
+ return;
+}
+
+static void spi_slave_ppi_init(void)
+{
+ uint32_t rdy_task = nrf_drv_gpiote_out_task_addr_get(m_spi_slave_raw_config.pin_rdy);
+ //Configure PPI channel to clear /RDY line
+ NRF_PPI->CH[m_spi_slave_raw_config.ppi_rdy_ch].EEP = (uint32_t)(&SPI_SLAVE_REG->EVENTS_END);
+ NRF_PPI->CH[m_spi_slave_raw_config.ppi_rdy_ch].TEP = rdy_task;
+
+ //this works only for channels 0..15 - but soft device is using 8-15 anyway
+ NRF_PPI->CHEN |= (1 << m_spi_slave_raw_config.ppi_rdy_ch);
+ return;
+}
+
+static void spi_slave_gpio_init(void)
+{
+ nrf_gpio_pin_set(m_spi_slave_raw_config.pin_req);
+ nrf_gpio_cfg_output(m_spi_slave_raw_config.pin_req);
+ nrf_gpio_pin_set(m_spi_slave_raw_config.pin_rdy);
+ nrf_gpio_cfg_output(m_spi_slave_raw_config.pin_rdy);
+}
+
+/* ser_phy API function */
+void ser_phy_interrupts_enable(void)
+{
+ (void)sd_nvic_EnableIRQ(nrfx_get_irq_number(m_spis.p_reg));
+}
+
+/* ser_phy API function */
+void ser_phy_interrupts_disable(void)
+{
+ (void)sd_nvic_DisableIRQ(nrfx_get_irq_number(m_spis.p_reg));
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
+{
+ uint32_t status = NRF_SUCCESS;
+ nrf_drv_spis_event_t event;
+
+ ser_phy_interrupts_disable();
+
+ if (m_buffer_reqested_flag && (m_trans_state == SPI_RAW_STATE_MEM_REQUESTED))
+ {
+ m_p_rx_buffer = p_buffer;
+
+ if (m_p_rx_buffer)
+ {
+ m_trash_payload_flag = false;
+ }
+ else
+ {
+ m_trash_payload_flag = true;
+ }
+ event.evt_type = NRF_DRV_SPIS_EVT_TYPE_MAX; //force transition with dummy event
+ event.rx_amount = 0;
+ event.tx_amount = 0;
+ spi_slave_event_handle(event);
+ }
+ else
+ {
+ status = NRF_ERROR_BUSY;
+ }
+ ser_phy_interrupts_enable();
+
+ return status;
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
+{
+ uint32_t status = NRF_SUCCESS;
+
+ if ( p_buffer == NULL || num_of_bytes == 0)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ ser_phy_interrupts_disable();
+
+ if ( m_p_tx_buffer == NULL)
+ {
+ m_tx_packet_length = num_of_bytes;
+ m_p_tx_buffer = p_buffer;
+ set_request_line();
+ }
+ else
+ {
+ status = NRF_ERROR_BUSY;
+ }
+ ser_phy_interrupts_enable();
+
+ return status;
+}
+
+/* ser_phy API function */
+uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
+{
+ uint32_t err_code;
+ nrf_drv_spis_config_t spi_slave_config;
+ nrf_drv_spis_event_t event;
+
+ if (m_trans_state != SPI_RAW_STATE_UNKNOWN)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ //one ppi channel and one gpiote channel are used to drive RDY line
+ m_spi_slave_raw_config.pin_req = SER_PHY_SPI_SLAVE_REQ_PIN;
+ m_spi_slave_raw_config.pin_rdy = SER_PHY_SPI_SLAVE_RDY_PIN;
+ m_spi_slave_raw_config.ppi_rdy_ch = SER_PHY_SPI_PPI_RDY_CH;
+ m_spi_slave_raw_config.gpiote_rdy_ch = SER_PHY_SPI_GPIOTE_RDY_CH;
+
+ spi_slave_gpio_init();
+ spi_slave_gpiote_init();
+ spi_slave_ppi_init();
+
+ spi_slave_config.miso_pin = SER_PHY_SPI_SLAVE_MISO_PIN;
+ spi_slave_config.mosi_pin = SER_PHY_SPI_SLAVE_MOSI_PIN;
+ spi_slave_config.sck_pin = SER_PHY_SPI_SLAVE_SCK_PIN;
+ spi_slave_config.csn_pin = SER_PHY_SPI_SLAVE_SS_PIN;
+ spi_slave_config.mode = NRF_DRV_SPIS_MODE_0;
+ spi_slave_config.bit_order = NRF_DRV_SPIS_BIT_ORDER_LSB_FIRST;
+ spi_slave_config.def = SER_PHY_SPI_DEF_CHARACTER;
+ spi_slave_config.orc = SER_PHY_SPI_ORC_CHARACTER;
+ spi_slave_config.irq_priority = APP_IRQ_PRIORITY_LOWEST;
+ spi_slave_config.miso_drive = NRF_DRV_SPIS_DEFAULT_MISO_DRIVE;
+ //use /CS pullup because state of the line might be undefined when master redefines PIO lines
+ spi_slave_config.csn_pullup = NRF_GPIO_PIN_PULLUP;
+
+ //keep /CS high when init
+ nrf_gpio_cfg_input(spi_slave_config.csn_pin, NRF_GPIO_PIN_PULLUP);
+
+ err_code = nrf_drv_spis_init(&m_spis, &spi_slave_config, spi_slave_event_handle);
+ APP_ERROR_CHECK(err_code);
+
+ if (err_code == NRF_SUCCESS)
+ {
+ m_ser_phy_callback = events_handler;
+
+ m_trans_state = SPI_RAW_STATE_SETUP_HEADER;
+ event.evt_type = NRF_DRV_SPIS_EVT_TYPE_MAX; //force transition for dummy event
+ event.rx_amount = 0;
+ event.tx_amount = 0;
+ spi_slave_event_handle(event);
+
+ }
+ return err_code;
+}
+
+/* ser_phy API function */
+void ser_phy_close(void)
+{
+ nrf_drv_spis_uninit(&m_spis);
+ m_ser_phy_callback = NULL;
+ m_trans_state = SPI_RAW_STATE_UNKNOWN;
+}
diff --git a/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_uart.c b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_uart.c
new file mode 100644
index 0000000..d690ac7
--- /dev/null
+++ b/thirdparty/nRF5_SDK_15.0.0_a53641a/components/serialization/common/transport/ser_phy/ser_phy_uart.c
@@ -0,0 +1,357 @@
+/**
+ * Copyright (c) 2014 - 2018, Nordic Semiconductor ASA
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form, except as embedded into a Nordic
+ * Semiconductor ASA integrated circuit in a product or a software update for
+ * such product, must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * 4. This software, with or without modification, must only be used with a
+ * Nordic Semiconductor ASA integrated circuit.
+ *
+ * 5. Any software provided in binary form under this license must not be reverse
+ * engineered, decompiled, modified and/or disassembled.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include "ser_phy.h"
+#include "ser_config.h"
+#ifdef SER_CONNECTIVITY
+ #include "ser_phy_config_conn.h"
+#else
+ #include "ser_phy_config_app.h"
+#endif
+#include "nrf_drv_uart.h"
+#include "app_error.h"
+#include "app_util.h"
+#include "app_util_platform.h"
+
+#define UART_TRANSFER_MAX 255
+
+#if defined(UARTE_PRESENT) && !defined(UART_PRESENT)
+#define SER_UART_IRQ UARTE0_IRQn
+#else
+#define SER_UART_IRQ UART0_IRQn
+#endif
+
+static const nrf_drv_uart_t m_uart = NRF_DRV_UART_INSTANCE(0);
+static const nrf_drv_uart_config_t m_uart_config = {
+ .pseltxd = SER_PHY_UART_TX,
+ .pselrxd = SER_PHY_UART_RX,
+ .pselrts = SER_PHY_UART_RTS,
+ .pselcts = SER_PHY_UART_CTS,
+ .p_context = NULL,
+ .interrupt_priority = UART_IRQ_PRIORITY,
+#if defined(UARTE_PRESENT) && defined(UART_PRESENT)
+ .use_easy_dma = true,
+#endif
+ // These values are common for application and connectivity, they are
+ // defined in "ser_config.h".
+ .hwfc = SER_PHY_UART_FLOW_CTRL,
+ .parity = SER_PHY_UART_PARITY,
+ .baudrate = (nrf_uart_baudrate_t)SER_PHY_UART_BAUDRATE
+};
+
+static bool volatile m_tx_in_progress;
+static uint8_t m_tx_header_buf[SER_PHY_HEADER_SIZE];
+static uint16_t m_bytes_to_transmit;
+static uint8_t const * mp_tx_buffer;
+
+static uint8_t m_rx_header_buf[SER_PHY_HEADER_SIZE];
+static uint16_t m_bytes_to_receive;
+static uint8_t m_rx_drop_buf[1];
+
+static ser_phy_events_handler_t m_ser_phy_event_handler;
+static ser_phy_evt_t m_ser_phy_rx_event;
+
+
+static void packet_sent_callback(void)
+{
+ static ser_phy_evt_t const event = {
+ .evt_type = SER_PHY_EVT_TX_PKT_SENT,
+ };
+ m_ser_phy_event_handler(event);
+}
+
+static void buffer_request_callback(uint16_t num_of_bytes)
+{
+ m_ser_phy_rx_event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
+ m_ser_phy_rx_event.evt_params.rx_buf_request.num_of_bytes = num_of_bytes;
+ m_ser_phy_event_handler(m_ser_phy_rx_event);
+}
+
+static void packet_received_callback(void)
+{
+ m_ser_phy_event_handler(m_ser_phy_rx_event);
+}
+
+static void packet_dropped_callback(void)
+{
+ static ser_phy_evt_t const event = {
+ .evt_type = SER_PHY_EVT_RX_PKT_DROPPED,
+ };
+ m_ser_phy_event_handler(event);
+}
+
+static void hardware_error_callback(uint32_t hw_error)
+{
+ ser_phy_evt_t event = {
+ .evt_type = SER_PHY_EVT_HW_ERROR,
+ .evt_params.hw_error.error_code = hw_error,
+ };
+ m_ser_phy_event_handler(event);
+}
+
+static void packet_rx_start(void)
+{
+ APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_header_buf,
+ SER_PHY_HEADER_SIZE));
+}
+
+static void packet_byte_drop(void)
+{
+ APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, m_rx_drop_buf, 1));
+}
+
+static void uart_event_handler(nrf_drv_uart_event_t * p_event,
+ void * p_context)
+{
+ (void)p_context;
+
+ switch (p_event->type)
+ {
+ case NRF_DRV_UART_EVT_ERROR:
+ // Process the error only if this is a parity or overrun error.
+ // Break and framing errors will always occur before the other
+ // side becomes active.
+ if (p_event->data.error.error_mask &
+ (NRF_UART_ERROR_PARITY_MASK | NRF_UART_ERROR_OVERRUN_MASK))
+ {
+ // Pass error source to upper layer.
+ hardware_error_callback(p_event->data.error.error_mask);
+ }
+
+ packet_rx_start();
+ break;
+
+ case NRF_DRV_UART_EVT_TX_DONE:
+ if (p_event->data.rxtx.p_data == m_tx_header_buf)
+ {
+#if (SER_HAL_TRANSPORT_TX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
+ if (m_bytes_to_transmit > UART_TRANSFER_MAX)
+ {
+ APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buffer,
+ UART_TRANSFER_MAX));
+ }
+ else
+#endif // (SER_HAL_TRANSPORT_TX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
+ {
+ APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, mp_tx_buffer,
+ m_bytes_to_transmit));
+ }
+ }
+ else
+ {
+#if (SER_HAL_TRANSPORT_TX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
+ ASSERT(p_event->data.rxtx.bytes <= m_bytes_to_transmit);
+ m_bytes_to_transmit -= p_event->data.rxtx.bytes;
+ if (m_bytes_to_transmit != 0)
+ {
+ APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart,
+ p_event->data.rxtx.p_data + p_event->data.rxtx.bytes,
+ m_bytes_to_transmit < UART_TRANSFER_MAX ?
+ m_bytes_to_transmit : UART_TRANSFER_MAX));
+ }
+ else
+#endif // (SER_HAL_TRANSPORT_TX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
+ {
+ m_tx_in_progress = false;
+ packet_sent_callback();
+ }
+ }
+ break;
+
+ case NRF_DRV_UART_EVT_RX_DONE:
+ if (p_event->data.rxtx.p_data == m_rx_header_buf)
+ {
+ m_bytes_to_receive = uint16_decode(m_rx_header_buf);
+ buffer_request_callback(m_bytes_to_receive);
+ }
+ else if (p_event->data.rxtx.p_data == m_rx_drop_buf)
+ {
+ --m_bytes_to_receive;
+ if (m_bytes_to_receive != 0)
+ {
+ packet_byte_drop();
+ }
+ else
+ {
+ packet_dropped_callback();
+
+ packet_rx_start();
+ }
+ }
+ else
+ {
+#if (SER_HAL_TRANSPORT_RX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
+ ASSERT(p_event->data.rxtx.bytes <= m_bytes_to_receive);
+ m_bytes_to_receive -= p_event->data.rxtx.bytes;
+ if (m_bytes_to_receive != 0)
+ {
+ APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart,
+ p_event->data.rxtx.p_data + p_event->data.rxtx.bytes,
+ m_bytes_to_receive < UART_TRANSFER_MAX ?
+ m_bytes_to_receive : UART_TRANSFER_MAX));
+ }
+ else
+#endif // (SER_HAL_TRANSPORT_RX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
+ {
+ packet_received_callback();
+
+ packet_rx_start();
+ }
+ }
+ break;
+
+ default:
+ APP_ERROR_CHECK(NRF_ERROR_INTERNAL);
+ }
+}
+
+/** API FUNCTIONS */
+
+uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
+{
+ uint32_t err_code;
+
+ if (events_handler == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+
+ // Check if function was not called before.
+ if (m_ser_phy_event_handler != NULL)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ err_code = nrf_drv_uart_init(&m_uart, &m_uart_config, uart_event_handler);
+ if (err_code != NRF_SUCCESS)
+ {
+ return NRF_ERROR_INVALID_PARAM;
+ }
+
+ m_ser_phy_event_handler = events_handler;
+
+ packet_rx_start();
+
+ return err_code;
+}
+
+uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
+{
+ if (p_buffer == NULL)
+ {
+ return NRF_ERROR_NULL;
+ }
+ else if (num_of_bytes == 0)
+ {
+ return NRF_ERROR_INVALID_PARAM;
+ }
+
+ bool busy;
+
+ CRITICAL_REGION_ENTER();
+ busy = m_tx_in_progress;
+ m_tx_in_progress = true;
+ CRITICAL_REGION_EXIT();
+
+ if (busy)
+ {
+ return NRF_ERROR_BUSY;
+ }
+
+ (void)uint16_encode(num_of_bytes, m_tx_header_buf);
+ mp_tx_buffer = p_buffer;
+ m_bytes_to_transmit = num_of_bytes;
+ APP_ERROR_CHECK(nrf_drv_uart_tx(&m_uart, m_tx_header_buf,
+ SER_PHY_HEADER_SIZE));
+
+ return NRF_SUCCESS;
+}
+
+
+uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
+{
+
+ if (m_ser_phy_rx_event.evt_type != SER_PHY_EVT_RX_BUF_REQUEST)
+ {
+ return NRF_ERROR_INVALID_STATE;
+ }
+
+ m_ser_phy_rx_event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
+ m_ser_phy_rx_event.evt_params.rx_pkt_received.p_buffer = p_buffer;
+ m_ser_phy_rx_event.evt_params.rx_pkt_received.num_of_bytes =
+ m_bytes_to_receive;
+
+ // If there is not enough memory to receive the packet (no buffer was
+ // provided), drop its data byte by byte (using an internal 1-byte buffer).
+ if (p_buffer == NULL)
+ {
+ packet_byte_drop();
+ }
+#if (SER_HAL_TRANSPORT_RX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
+ else if (m_bytes_to_receive > UART_TRANSFER_MAX)
+ {
+ APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, p_buffer, UART_TRANSFER_MAX));
+ }
+#endif // (SER_HAL_TRANSPORT_RX_MAX_PKT_SIZE > UART_TRANSFER_MAX)
+ else
+ {
+ APP_ERROR_CHECK(nrf_drv_uart_rx(&m_uart, p_buffer, m_bytes_to_receive));
+ }
+
+ return NRF_SUCCESS;
+}
+
+
+void ser_phy_close(void)
+{
+ nrf_drv_uart_uninit(&m_uart);
+ m_ser_phy_event_handler = NULL;
+}
+
+
+void ser_phy_interrupts_enable(void)
+{
+ NVIC_EnableIRQ(SER_UART_IRQ);
+}
+
+
+void ser_phy_interrupts_disable(void)
+{
+ NVIC_DisableIRQ(SER_UART_IRQ);
+}