fix: serialize ch390 recovery in ethernet poll path

This commit is contained in:
2026-04-23 18:06:03 +08:00
parent c519f90149
commit c9ece65182
+188 -9
View File
@@ -42,11 +42,33 @@ static uint8_t s_rx_buffer[CH390_PKT_MAX];
static uint8_t s_tx_buffer[CH390_PKT_MAX];
static uint8_t s_garp_sent = 0u;
#define ETH_RECOVERY_NONE 0u
#define ETH_RECOVERY_RX 1u
#define ETH_RECOVERY_FULL 2u
struct ethernetif_recovery_state
{
uint8_t pending;
uint8_t in_progress;
uint32_t tx_timeout_count;
uint32_t rx_overflow_count;
uint32_t rx_bad_count;
uint32_t rx_recovery_count;
uint32_t full_recovery_count;
};
static struct ethernetif_recovery_state s_recovery = {0};
/* Forward declarations */
static err_t low_level_init(struct netif *netif);
static err_t low_level_output(struct netif *netif, struct pbuf *p);
static struct pbuf *low_level_input(struct netif *netif);
static void ethernetif_update_link(uint8_t link_status);
static uint32_t ethernetif_record_counter(uint32_t *counter);
static uint32_t ethernetif_record_event_and_request(uint32_t *counter, uint8_t recovery_type);
static uint8_t ethernetif_claim_recovery(void);
static void ethernetif_finish_recovery(void);
static uint8_t ethernetif_run_pending_recovery(void);
/*---------------------------------------------------------------------------
* Low Level Hardware Functions
@@ -113,6 +135,113 @@ static err_t low_level_init(struct netif *netif)
return ERR_OK;
}
static uint32_t ethernetif_record_counter(uint32_t *counter)
{
uint32_t value;
taskENTER_CRITICAL();
++(*counter);
value = *counter;
taskEXIT_CRITICAL();
return value;
}
static uint32_t ethernetif_record_event_and_request(uint32_t *counter, uint8_t recovery_type)
{
uint32_t value;
taskENTER_CRITICAL();
++(*counter);
if (recovery_type > s_recovery.pending)
{
s_recovery.pending = recovery_type;
}
value = *counter;
taskEXIT_CRITICAL();
return value;
}
static uint8_t ethernetif_claim_recovery(void)
{
uint8_t recovery_type = ETH_RECOVERY_NONE;
taskENTER_CRITICAL();
if ((s_recovery.in_progress == 0u) && (s_recovery.pending != ETH_RECOVERY_NONE))
{
recovery_type = s_recovery.pending;
s_recovery.pending = ETH_RECOVERY_NONE;
s_recovery.in_progress = 1u;
}
taskEXIT_CRITICAL();
return recovery_type;
}
static void ethernetif_finish_recovery(void)
{
taskENTER_CRITICAL();
s_recovery.in_progress = 0u;
taskEXIT_CRITICAL();
}
static uint8_t ethernetif_run_pending_recovery(void)
{
const device_config_t *cfg = config_get();
uint8_t recovery_type;
uint8_t link_status = 0u;
uint32_t count;
recovery_type = ethernetif_claim_recovery();
if (recovery_type == ETH_RECOVERY_NONE)
{
return ETH_RECOVERY_NONE;
}
if (spi_mutex != NULL)
{
xSemaphoreTake(spi_mutex, portMAX_DELAY);
}
if (recovery_type == ETH_RECOVERY_FULL)
{
ch390_software_reset();
ch390_default_config();
ch390_set_mac_address((uint8_t *)cfg->net.mac);
ch390_set_phy_mode(CH390_AUTO);
ch390_rx_enable(1);
link_status = (uint8_t)ch390_get_link_status();
}
else
{
ch390_rx_reset();
}
if (spi_mutex != NULL)
{
xSemaphoreGive(spi_mutex);
}
if (recovery_type == ETH_RECOVERY_FULL)
{
ch390_interrupt_init();
count = ethernetif_record_counter(&s_recovery.full_recovery_count);
debug_log_printf("[ETH] rec full n=%lu link=%u\r\n",
(unsigned long)count,
(unsigned int)link_status);
ethernetif_update_link(link_status);
}
else
{
count = ethernetif_record_counter(&s_recovery.rx_recovery_count);
debug_log_printf("[ETH] rec rx n=%lu\r\n", (unsigned long)count);
}
ethernetif_finish_recovery();
return recovery_type;
}
/**
* @brief Transmit a packet via CH390
* @param netif Network interface structure
@@ -125,6 +254,7 @@ static err_t low_level_output(struct netif *netif, struct pbuf *p)
uint16_t offset;
uint16_t tx_len;
int tx_rc;
uint32_t tx_timeout_count;
(void)netif;
/* Take SPI mutex */
@@ -179,7 +309,8 @@ static err_t low_level_output(struct netif *netif, struct pbuf *p)
{
LINK_STATS_INC(link.drop);
LINK_STATS_INC(link.err);
debug_log_write("[ETH] tx timeout\r\n");
tx_timeout_count = ethernetif_record_event_and_request(&s_recovery.tx_timeout_count, ETH_RECOVERY_FULL);
debug_log_printf("[ETH] tx timeout n=%lu\r\n", (unsigned long)tx_timeout_count);
return ERR_TIMEOUT;
}
@@ -199,9 +330,12 @@ static struct pbuf *low_level_input(struct netif *netif)
struct pbuf *p = NULL;
struct pbuf *q;
uint16_t offset;
uint16_t peek_len;
uint16_t len;
uint32_t rx_len;
uint8_t rx_status;
uint32_t rx_bad_count;
int peek_ready;
/* Take SPI mutex */
if (spi_mutex != NULL)
@@ -209,6 +343,37 @@ static struct pbuf *low_level_input(struct netif *netif)
xSemaphoreTake(spi_mutex, portMAX_DELAY);
}
peek_len = 0u;
peek_ready = ch390_peek_packet(&rx_status, &peek_len);
ethernetif->rx_status = rx_status;
ethernetif->rx_len = peek_len;
if (peek_ready == 0)
{
if (spi_mutex != NULL)
{
xSemaphoreGive(spi_mutex);
}
return NULL;
}
if (((rx_status & 0x3Fu) != 0u) || (peek_len < 14u) || (peek_len > CH390_PKT_MAX))
{
if (spi_mutex != NULL)
{
xSemaphoreGive(spi_mutex);
}
LINK_STATS_INC(link.drop);
LINK_STATS_INC(link.err);
rx_bad_count = ethernetif_record_event_and_request(&s_recovery.rx_bad_count, ETH_RECOVERY_RX);
debug_log_printf("[ETH] rx bad n=%lu st=0x%02X len=%u\r\n",
(unsigned long)rx_bad_count,
(unsigned int)rx_status,
(unsigned int)peek_len);
return NULL;
}
rx_len = ch390_runtime_receive_packet(s_rx_buffer, &rx_status);
ethernetif->rx_status = rx_status;
ethernetif->rx_len = (uint16_t)rx_len;
@@ -219,6 +384,14 @@ static struct pbuf *low_level_input(struct netif *netif)
{
xSemaphoreGive(spi_mutex);
}
LINK_STATS_INC(link.drop);
LINK_STATS_INC(link.err);
rx_bad_count = ethernetif_record_event_and_request(&s_recovery.rx_bad_count, ETH_RECOVERY_RX);
debug_log_printf("[ETH] rx lost n=%lu st=0x%02X len=%u\r\n",
(unsigned long)rx_bad_count,
(unsigned int)rx_status,
(unsigned int)peek_len);
return NULL;
}
@@ -266,7 +439,6 @@ static struct pbuf *low_level_input(struct netif *netif)
else
{
/* No memory - drop packet */
ch390_drop_packet(ethernetif->rx_len);
LINK_STATS_INC(link.memerr);
LINK_STATS_INC(link.drop);
}
@@ -491,6 +663,8 @@ void ethernetif_poll(void)
struct ch390_runtime_status runtime_status;
struct pbuf *p;
err_t input_err;
uint32_t rx_overflow_count;
uint8_t recovery_type;
uint8_t rx_budget = 4u;
/* Take SPI mutex */
@@ -508,17 +682,20 @@ void ethernetif_poll(void)
xSemaphoreGive(spi_mutex);
}
/* Handle link change */
if ((runtime_status.int_status & ISR_LNKCHG) != 0u)
/* Handle RX overflow */
if ((runtime_status.int_status & (ISR_ROS | ISR_ROO)) != 0u)
{
ethernetif_update_link((uint8_t)ch390_runtime_link_up_from_status(&runtime_status));
LINK_STATS_INC(link.err);
rx_overflow_count = ethernetif_record_event_and_request(&s_recovery.rx_overflow_count, ETH_RECOVERY_RX);
debug_log_printf("[ETH] rx ovf n=%lu\r\n", (unsigned long)rx_overflow_count);
}
/* Handle RX overflow */
if ((runtime_status.int_status & ISR_ROS) != 0u)
recovery_type = ethernetif_run_pending_recovery();
/* Handle link change unless a full recovery already resynchronized link state. */
if ((recovery_type != ETH_RECOVERY_FULL) && ((runtime_status.int_status & ISR_LNKCHG) != 0u))
{
/* RX overflow - packets might be corrupted */
LINK_STATS_INC(link.err);
ethernetif_update_link((uint8_t)ch390_runtime_link_up_from_status(&runtime_status));
}
/* Always attempt to drain RX FIFO so packet handling does not depend only on ISR_PR timing. */
@@ -539,6 +716,8 @@ void ethernetif_poll(void)
--rx_budget;
}
(void)ethernetif_run_pending_recovery();
if (rx_budget == 0u)
{
taskYIELD();