This post is older than 2 years and might not be relevant anymore
More Info: Consider searching for newer posts

mcumgr on UART shell / async API and power management

We are trying to use mcumgr/mcuboot with Zephyr on nRF53. During development, we have enabled the Zephyr shell functionality and would like to use mcumgr with it (CONFIG_MCUMGR_SMP_SHELL=y).

The default interrupt-driven UARTE driver implementation on nRF53 does not seem to work reliably in this scenario: received bytes will be lost, causing the mcumgr connection to time out. Are you aware of this issue? It manifests when trying to upload an image at 460800 baud.

We have tried modifying shell_uart.c to use the async API (CONFIG_UART_ASYNC_API=y). This works so far, but now we run into a problem when trying to combine this with power management.

After the serial cable is disconnected, RX will go low and we receive the UART_RX_DISABLED event. We then set the UART to DEVICE_PM_LOW_POWER_STATE and configure an interrupt on RX order to re-enable it when the cable is again connected. When calling device_set_power_state with DEVICE_PM_ACTIVE_STATE, the new active state is not stored by the driver.

uart_nrfx_uarte.c has the following code in the function uarte_nrfx_set_power_state:

	if (new_state == DEVICE_PM_ACTIVE_STATE) {
		uarte_nrfx_pins_enable(dev, true);
		nrf_uarte_enable(uarte);
#ifdef CONFIG_UART_ASYNC_API
		if (hw_rx_counting_enabled(get_dev_data(dev))) {
			nrfx_timer_enable(&get_dev_config(dev)->timer);
		}
		if (get_dev_data(dev)->async) {
			return;
		}
#endif
		if (nrf_uarte_rx_pin_get(uarte) !=
			NRF_UARTE_PSEL_DISCONNECTED) {
			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
		}

		data->pm_state = new_state;
	} else {

If the async API is used (async != NULL), the function returns early and data->pm_state is never set to the new state.

Is this on purpose and how is the power management supposed to be used with the async API?

  • Our modified copy of shell_uart.c with async support:

    /*
     * Copyright (c) 2018 Nordic Semiconductor ASA
     *
     * SPDX-License-Identifier: Apache-2.0
     */
    
    #include <shell/shell_uart.h>
    #include <drivers/gpio.h>
    #include <drivers/uart.h>
    #include <init.h>
    #include <logging/log.h>
    #include <net/buf.h>
    
    #define LOG_MODULE_NAME shell_uart
    LOG_MODULE_REGISTER(shell_uart);
    
    #ifdef CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD
    #define RX_POLL_PERIOD K_MSEC(CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD)
    #else
    #define RX_POLL_PERIOD K_NO_WAIT
    #endif
    
    #ifdef CONFIG_MCUMGR_SMP_SHELL
    NET_BUF_POOL_DEFINE(smp_shell_rx_pool, CONFIG_MCUMGR_SMP_SHELL_RX_BUF_COUNT,
    		    SMP_SHELL_RX_BUF_SIZE, 0, NULL);
    #endif /* CONFIG_MCUMGR_SMP_SHELL */
    
    SHELL_UART_DEFINE(shell_transport_uart,
    		  CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE,
    		  CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE);
    SHELL_DEFINE(shell_uart, CONFIG_SHELL_PROMPT_UART, &shell_transport_uart,
    	     CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_SIZE,
    	     CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_TIMEOUT,
    	     SHELL_FLAG_OLF_CRLF);
    
    #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
    static void uart_rx_handle(const struct device *dev,
    			   struct uart_event_rx *evt,
    			   const struct shell_uart *sh_uart)
    {
    	uint8_t *data;
    	uint32_t len;
    	uint32_t rd_len;
    	bool new_data = false;
    #ifdef CONFIG_MCUMGR_SMP_SHELL
    	struct smp_shell_data *const smp = &sh_uart->ctrl_blk->smp;
    #endif
    
    	do {
    		len = ring_buf_put_claim(sh_uart->rx_ringbuf, &data,
    					 sh_uart->rx_ringbuf->size);
    
    		if (len > 0) {
    			rd_len = MIN(evt->len, len);
    			memcpy(data, evt->buf + evt->offset, rd_len);
    			evt->offset += rd_len;
    			evt->len -= rd_len;
    
    			/* If there is any new data to be either taken into
    			 * ring buffer or consumed by the SMP, signal the
    			 * shell_thread.
    			 */
    			if (rd_len > 0) {
    				new_data = true;
    			}
    #ifdef CONFIG_MCUMGR_SMP_SHELL
    			/* Divert bytes from shell handling if it is
    			 * part of an mcumgr frame.
    			 */
    			size_t i = smp_shell_rx_bytes(smp, data, rd_len);
    
    			rd_len -= i;
    
    			if (rd_len) {
    				for (uint32_t j = 0; j < rd_len; j++) {
    					data[j] = data[i + j];
    				}
    			}
    #endif /* CONFIG_MCUMGR_SMP_SHELL */
    			int err = ring_buf_put_finish(sh_uart->rx_ringbuf,
    						      rd_len);
    			(void)err;
    			__ASSERT_NO_MSG(err == 0);
    		} else {
    			uint8_t dummy;
    
    			/* No space in the ring buffer - consume byte. */
    			LOG_WRN("RX ring buffer full.");
    
    			rd_len = MIN(evt->len, 1);
    			memcpy(&dummy, evt->buf + evt->offset, rd_len);
    			evt->offset += rd_len;
    			evt->len -= rd_len;
    #ifdef CONFIG_MCUMGR_SMP_SHELL
    			/* If successful in getting byte from the fifo, try
    			 * feeding it to SMP as a part of mcumgr frame.
    			 */
    			if ((rd_len != 0) &&
    			    (smp_shell_rx_bytes(smp, &dummy, 1) == 1)) {
    				new_data = true;
    			}
    #endif /* CONFIG_MCUMGR_SMP_SHELL */
    		}
    	} while (rd_len && (rd_len == len));
    
    	if (new_data) {
    		sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_RX_RDY,
    					   sh_uart->ctrl_blk->context);
    	}
    }
    
    static void uart_tx_handle(const struct device *dev,
    			   const struct shell_uart *sh_uart)
    {
    	uint32_t len;
    	int err;
    	const uint8_t *data;
    
    	len = ring_buf_get_claim(sh_uart->tx_ringbuf, (uint8_t **)&data,
    				 sh_uart->tx_ringbuf->size);
    	if (len) {
    		err = uart_tx(dev, data, len, SYS_FOREVER_MS);
    		__ASSERT_NO_MSG(err == 0);
    	} else {
    		sh_uart->ctrl_blk->tx_busy = 0;
    	}
    
    	sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_TX_RDY,
    				   sh_uart->ctrl_blk->context);
    }
    
    #define BUF_SIZE 128
    static K_MEM_SLAB_DEFINE(uart_slab, BUF_SIZE, 3, 4);
    static const struct device *rx_port;
    #define RX_PIN DT_PROP(DT_NODELABEL(uart0), rx_pin)
    
    static void uart_callback(const struct device *dev,
    			  struct uart_event *evt,
    			  void *user_data)
    {
    	const struct shell_uart *sh_uart = user_data;
    
    	switch (evt->type) {
    	case UART_TX_DONE:
    		LOG_DBG("Tx sent %d bytes", evt->data.tx.len);
    		int err = ring_buf_get_finish(sh_uart->tx_ringbuf, evt->data.tx.len);
    		(void)err;
    		__ASSERT_NO_MSG(err == 0);
    		uart_tx_handle(dev, sh_uart);
    		break;
    
    	case UART_TX_ABORTED:
    		LOG_ERR("Tx aborted");
    		break;
    
    	case UART_RX_RDY:
    		LOG_DBG("Received data %d bytes", evt->data.rx.len);
    		uart_rx_handle(dev, &evt->data.rx, sh_uart);
    		break;
    
    	case UART_RX_BUF_REQUEST:
    		LOG_DBG("rx buf request");
    		{
    			uint8_t *buf;
    
    			int err = k_mem_slab_alloc(&uart_slab, (void **)&buf, K_NO_WAIT);
    			__ASSERT(err == 0, "Failed to allocate slab");
    
    			err = uart_rx_buf_rsp(dev, buf, BUF_SIZE);
    			__ASSERT(err == 0, "Failed to provide new buffer");
    		}
    		break;
    
    	case UART_RX_BUF_RELEASED:
    		LOG_DBG("rx buf released");
    		k_mem_slab_free(&uart_slab, (void **)&evt->data.rx_buf.buf);
    		break;
    
    	case UART_RX_DISABLED:
    		LOG_INF("rx disabled");
    		/* device_set_power_state(dev, DEVICE_PM_LOW_POWER_STATE, NULL, NULL); */
    		gpio_pin_configure(rx_port, RX_PIN & 31, GPIO_INPUT | GPIO_INT_LEVEL_ACTIVE | GPIO_ACTIVE_HIGH);
    		break;
    
    	case UART_RX_STOPPED:
    		LOG_DBG("rx stopped (%d)", evt->data.rx_stop.reason);
    		break;
    
    	default:
    		LOG_INF("event %d", evt->type);
    		break;
    	}
    }
    #endif /* CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN */
    
    static void uart_irq_init(const struct shell_uart *sh_uart)
    {
    #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
    	const struct device *dev = sh_uart->ctrl_blk->dev;
    
    	uint8_t *buf;
    	LOG_DBG("slab alloc %d", BUF_SIZE);
    	int err = k_mem_slab_alloc(&uart_slab, (void **)&buf, K_NO_WAIT);
    	__ASSERT(err == 0, "Failed to allocate slab");
    
    	LOG_DBG("set callback");
    	err = uart_callback_set(dev, uart_callback, (void *)sh_uart);
    	__ASSERT(err == 0, "Failed to set callback");
    
    	LOG_DBG("rx enable");
    	err = uart_rx_enable(dev, buf, BUF_SIZE, 1);
    	__ASSERT(err == 0, "Failed to enable RX");
    #endif
    }
    
    typedef struct {
    	const struct shell_uart *sh_uart;
    	struct gpio_callback     callback;
    } cb_t;
    
    static cb_t cb;
    
    static void gpio_callback_handler_f(const struct device *port,
                                        struct gpio_callback *cb,
                                        gpio_port_pins_t pins)
    {
    	const struct shell_uart *sh_uart = CONTAINER_OF(cb, cb_t, callback)->sh_uart;
    	gpio_pin_interrupt_configure(port, RX_PIN & 31, GPIO_INT_DISABLE);
    	/* device_set_power_state(sh_uart->ctrl_blk->dev, DEVICE_PM_ACTIVE_STATE, NULL, NULL); */
    	uart_irq_init(sh_uart);
    	LOG_INF("rx re-enabled");
    }
    
    static void timer_handler(struct k_timer *timer)
    {
    	uint8_t c;
    	const struct shell_uart *sh_uart = k_timer_user_data_get(timer);
    
    	while (uart_poll_in(sh_uart->ctrl_blk->dev, &c) == 0) {
    		if (ring_buf_put(sh_uart->rx_ringbuf, &c, 1) == 0U) {
    			/* ring buffer full. */
    			LOG_WRN("RX ring buffer full.");
    		}
    		sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_RX_RDY,
    					   sh_uart->ctrl_blk->context);
    	}
    }
    
    static int init(const struct shell_transport *transport,
    		const void *config,
    		shell_transport_handler_t evt_handler,
    		void *context)
    {
    	const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
    
    	sh_uart->ctrl_blk->dev = (const struct device *)config;
    	sh_uart->ctrl_blk->handler = evt_handler;
    	sh_uart->ctrl_blk->context = context;
    
    #ifdef CONFIG_MCUMGR_SMP_SHELL
    	sh_uart->ctrl_blk->smp.buf_pool = &smp_shell_rx_pool;
    	k_fifo_init(&sh_uart->ctrl_blk->smp.buf_ready);
    #endif
    
    	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN)) {
    		uart_irq_init(sh_uart);
    	} else {
    		k_timer_init(sh_uart->timer, timer_handler, NULL);
    		k_timer_user_data_set(sh_uart->timer, (void *)sh_uart);
    		k_timer_start(sh_uart->timer, RX_POLL_PERIOD, RX_POLL_PERIOD);
    	}
    
    #if RX_PIN < 32
    	rx_port = device_get_binding("GPIO_0");
    #else
    	rx_port = device_get_binding("GPIO_1");
    #endif
    	cb.sh_uart = sh_uart;
    	gpio_init_callback(&cb.callback, gpio_callback_handler_f, BIT(RX_PIN & 31));
    	gpio_add_callback(rx_port, &cb.callback);
    
    	return 0;
    }
    
    static int uninit(const struct shell_transport *transport)
    {
    	const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
    
    	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN)) {
    		const struct device *dev = sh_uart->ctrl_blk->dev;
    
    		uart_rx_disable(dev);
    	} else {
    		k_timer_stop(sh_uart->timer);
    	}
    
    	return 0;
    }
    
    static int enable(const struct shell_transport *transport, bool blocking_tx)
    {
    	const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
    
    	sh_uart->ctrl_blk->blocking_tx = blocking_tx;
    
    	if (blocking_tx) {
    #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
    #endif
    	}
    
    	return 0;
    }
    
    static void irq_write(const struct shell_uart *sh_uart, const void *data,
    		     size_t length, size_t *cnt)
    {
    	*cnt = ring_buf_put(sh_uart->tx_ringbuf, data, length);
    
    	if (atomic_set(&sh_uart->ctrl_blk->tx_busy, 1) == 0) {
    #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
    		uint8_t *buf;
    		uint32_t len = ring_buf_get_claim(sh_uart->tx_ringbuf, &buf, sh_uart->tx_ringbuf->size);
    		int err = uart_tx(sh_uart->ctrl_blk->dev, buf, len, SYS_FOREVER_MS);
    		(void)err;
    		__ASSERT_NO_MSG(err == 0);
    #endif
    	}
    }
    
    static int write(const struct shell_transport *transport,
    		 const void *data, size_t length, size_t *cnt)
    {
    	const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
    	const uint8_t *data8 = (const uint8_t *)data;
    
    	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN) &&
    		!sh_uart->ctrl_blk->blocking_tx) {
    		irq_write(sh_uart, data, length, cnt);
    	} else {
    		for (size_t i = 0; i < length; i++) {
    			uart_poll_out(sh_uart->ctrl_blk->dev, data8[i]);
    		}
    
    		*cnt = length;
    
    		sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_TX_RDY,
    					   sh_uart->ctrl_blk->context);
    	}
    
    	return 0;
    }
    
    static int read(const struct shell_transport *transport,
    		void *data, size_t length, size_t *cnt)
    {
    	struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
    
    	*cnt = ring_buf_get(sh_uart->rx_ringbuf, data, length);
    
    	return 0;
    }
    
    #ifdef CONFIG_MCUMGR_SMP_SHELL
    static void update(const struct shell_transport *transport)
    {
    	struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
    
    	smp_shell_process(&sh_uart->ctrl_blk->smp);
    }
    #endif /* CONFIG_MCUMGR_SMP_SHELL */
    
    const struct shell_transport_api shell_uart_transport_api = {
    	.init = init,
    	.uninit = uninit,
    	.enable = enable,
    	.write = write,
    	.read = read,
    #ifdef CONFIG_MCUMGR_SMP_SHELL
    	.update = update,
    #endif /* CONFIG_MCUMGR_SMP_SHELL */
    };
    
    static int enable_shell_uart(const struct device *arg)
    {
    	ARG_UNUSED(arg);
    	const struct device *dev =
    			device_get_binding(CONFIG_UART_SHELL_ON_DEV_NAME);
    	bool log_backend = CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > 0;
    	uint32_t level =
    		(CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > LOG_LEVEL_DBG) ?
    		CONFIG_LOG_MAX_LEVEL : CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL;
    
    	if (dev == NULL) {
    		return -ENODEV;
    	}
    
    	if (IS_ENABLED(CONFIG_MCUMGR_SMP_SHELL)) {
    		smp_shell_init();
    	}
    
    	shell_init(&shell_uart, dev, true, log_backend, level);
    
    	return 0;
    }
    SYS_INIT(enable_shell_uart, POST_KERNEL,
    	 CONFIG_SHELL_BACKEND_SERIAL_INIT_PRIORITY);
    
    const struct shell *shell_backend_uart_get_ptr(void)
    {
    	return &shell_uart;
    }
    

    This currently works without power management. You can see the commented out calls to device_set_power_state, which we would like to use to reduce power consumption when no cable is connected.

  • Thanks for letting us know about this Stephan, This is great that you came back to report your modifications.

    Atleast I did not knew about this and if the team also did not knew about this, I can create an internal ticket to follow this up. Thanks again.

Related