always limit number of NAK per endpoint per usb frame to save cpu usage. Default to 1 NAK per endpoint

This commit is contained in:
hathach 2024-04-02 12:36:00 +07:00
parent 14ce05fea3
commit bd2210b812
No known key found for this signature in database
GPG Key ID: 26FAB84F615C3C52
6 changed files with 55 additions and 92 deletions

View File

@ -72,7 +72,6 @@
#if CFG_TUSB_MCU == OPT_MCU_RP2040 #if CFG_TUSB_MCU == OPT_MCU_RP2040
// #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller // #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller
// #define CFG_TUH_MAX3421 1 // use max3421 as host controller // #define CFG_TUH_MAX3421 1 // use max3421 as host controller
// #define CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME 3 // max. transfer attempts per frame
// host roothub port is 1 if using either pio-usb or max3421 // host roothub port is 1 if using either pio-usb or max3421
#if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421) #if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421)

View File

@ -72,7 +72,6 @@
#if CFG_TUSB_MCU == OPT_MCU_RP2040 #if CFG_TUSB_MCU == OPT_MCU_RP2040
// #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller // #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller
// #define CFG_TUH_MAX3421 1 // use max3421 as host controller // #define CFG_TUH_MAX3421 1 // use max3421 as host controller
// #define CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME 3 // max. transfer attempts per frame
// host roothub port is 1 if using either pio-usb or max3421 // host roothub port is 1 if using either pio-usb or max3421
#if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421) #if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421)

View File

@ -77,7 +77,6 @@
#if CFG_TUSB_MCU == OPT_MCU_RP2040 #if CFG_TUSB_MCU == OPT_MCU_RP2040
// #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller // #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller
// #define CFG_TUH_MAX3421 1 // use max3421 as host controller // #define CFG_TUH_MAX3421 1 // use max3421 as host controller
// #define CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME 3 // max. transfer attempts per frame
// host roothub port is 1 if using either pio-usb or max3421 // host roothub port is 1 if using either pio-usb or max3421
#if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421) #if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421)

View File

@ -72,7 +72,6 @@
#if CFG_TUSB_MCU == OPT_MCU_RP2040 #if CFG_TUSB_MCU == OPT_MCU_RP2040
// #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller // #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller
// #define CFG_TUH_MAX3421 1 // use max3421 as host controller // #define CFG_TUH_MAX3421 1 // use max3421 as host controller
// #define CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME 3 // max. transfer attempts per frame
// host roothub port is 1 if using either pio-usb or max3421 // host roothub port is 1 if using either pio-usb or max3421
#if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421) #if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421)

View File

@ -72,7 +72,6 @@
#if CFG_TUSB_MCU == OPT_MCU_RP2040 #if CFG_TUSB_MCU == OPT_MCU_RP2040
// #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller // #define CFG_TUH_RPI_PIO_USB 1 // use pio-usb as host controller
// #define CFG_TUH_MAX3421 1 // use max3421 as host controller // #define CFG_TUH_MAX3421 1 // use max3421 as host controller
// #define CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME 3 // max. transfer attempts per frame
// host roothub port is 1 if using either pio-usb or max3421 // host roothub port is 1 if using either pio-usb or max3421
#if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421) #if (defined(CFG_TUH_RPI_PIO_USB) && CFG_TUH_RPI_PIO_USB) || (defined(CFG_TUH_MAX3421) && CFG_TUH_MAX3421)

View File

@ -167,33 +167,15 @@ enum {
}; };
enum { enum {
EP_STATE_IDLE = 0, MAX_NAK_DEFAULT = 1 // Number of NAK per endpoint per usb frame
EP_STATE_ATTEMPT1 = 1, // pending 1st attempt
#if CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME
EP_STATE_ATTEMPT2 = 2, // don't change order
EP_STATE_ATTEMPT3 = 3, // need to be incrementable
EP_STATE_ATTEMPT4 = 4,
EP_STATE_ATTEMPT5 = 5,
EP_STATE_ATTEMPT6 = 6,
EP_STATE_ATTEMPT7 = 7,
EP_STATE_ATTEMPT8 = 8,
EP_STATE_ATTEMPT9 = 9,
EP_STATE_ATTEMPT10 = 10,
EP_STATE_SUSPENDED, // keep next behind last attempt
#endif
EP_STATE_COMPLETE,
EP_STATE_QUANTITY // not used. only to get quantity. keep last
}; };
TU_VERIFY_STATIC(EP_STATE_QUANTITY <= 16, "no more than 16 states possible. ep->state has 4 bits"); enum {
EP_STATE_IDLE = 0,
#if CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME EP_STATE_COMPLETE,
#define EP_STATE_ATTEMPT_QUANTITY (EP_STATE_SUSPENDED - 1) EP_STATE_ATTEMPT_1, // pending 1st attempt
TU_VERIFY_STATIC(CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME >= 1 && EP_STATE_ATTEMPT_MAX = 15
CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME <= EP_STATE_ATTEMPT_QUANTITY, "unsupported attempt quantity"); };
#else
#define EP_STATE_ATTEMPT_QUANTITY 1
#endif
//--------------------------------------------------------------------+ //--------------------------------------------------------------------+
// //
@ -223,6 +205,8 @@ typedef struct {
TU_VERIFY_STATIC(sizeof(max3421_ep_t) == 12, "size is not correct"); TU_VERIFY_STATIC(sizeof(max3421_ep_t) == 12, "size is not correct");
typedef struct { typedef struct {
volatile uint16_t frame_count;
// cached register // cached register
uint8_t sndbc; uint8_t sndbc;
uint8_t hirq; uint8_t hirq;
@ -232,17 +216,17 @@ typedef struct {
uint8_t hxfr; uint8_t hxfr;
atomic_flag busy; // busy transferring atomic_flag busy; // busy transferring
volatile uint16_t frame_count;
max3421_ep_t ep[CFG_TUH_MAX3421_ENDPOINT_TOTAL]; // [0] is reserved for addr0
OSAL_MUTEX_DEF(spi_mutexdef);
#if OSAL_MUTEX_REQUIRED #if OSAL_MUTEX_REQUIRED
OSAL_MUTEX_DEF(spi_mutexdef);
osal_mutex_t spi_mutex; osal_mutex_t spi_mutex;
#endif #endif
max3421_ep_t ep[CFG_TUH_MAX3421_ENDPOINT_TOTAL]; // [0] is reserved for addr0
} max3421_data_t; } max3421_data_t;
static max3421_data_t _hcd_data; static max3421_data_t _hcd_data;
static uint8_t _max_nak = MAX_NAK_DEFAULT; // max NAK before giving up in a usb frame
//--------------------------------------------------------------------+ //--------------------------------------------------------------------+
// API: SPI transfer with MAX3421E // API: SPI transfer with MAX3421E
@ -332,7 +316,6 @@ static void fifo_write(uint8_t rhport, uint8_t reg, uint8_t const * buffer, uint
tuh_max3421_spi_xfer_api(rhport, buffer, NULL, len); tuh_max3421_spi_xfer_api(rhport, buffer, NULL, len);
max3421_spi_unlock(rhport, in_isr); max3421_spi_unlock(rhport, in_isr);
} }
static void fifo_read(uint8_t rhport, uint8_t * buffer, uint16_t len, bool in_isr) { static void fifo_read(uint8_t rhport, uint8_t * buffer, uint16_t len, bool in_isr) {
@ -421,18 +404,22 @@ static void free_ep(uint8_t daddr) {
} }
} }
// Check if endpoint has an queued transfer
TU_ATTR_ALWAYS_INLINE static inline bool is_ep_pending(max3421_ep_t const * ep) {
uint8_t const state = ep->state;
return ep->packet_size && state >= EP_STATE_ATTEMPT_1 && state < EP_STATE_ATTEMPT_1 + _max_nak;
}
// Find the next pending endpoint using round-robin scheduling, starting from next endpoint.
// return NULL if not found
// TODO respect interrupt endpoint's interval
static max3421_ep_t * find_next_pending_ep(max3421_ep_t * cur_ep) { static max3421_ep_t * find_next_pending_ep(max3421_ep_t * cur_ep) {
// search for next pending endpoint using round-robin scheduling
// if no other endpoint is pending and current endpoint is still pending, the current endpoint will be returned
// if no other endpoint is pending and current endpoint is not pending, NULL will be returned
// TODO maybe prioritisation control/interrupt/bulk/iso
size_t const idx = (size_t) (cur_ep - _hcd_data.ep); size_t const idx = (size_t) (cur_ep - _hcd_data.ep);
// starting from next endpoint // starting from next endpoint
for (size_t i = idx + 1; i < CFG_TUH_MAX3421_ENDPOINT_TOTAL; i++) { for (size_t i = idx + 1; i < CFG_TUH_MAX3421_ENDPOINT_TOTAL; i++) {
max3421_ep_t* ep = &_hcd_data.ep[i]; max3421_ep_t* ep = &_hcd_data.ep[i];
if (ep->state >= EP_STATE_ATTEMPT1 && ep->state <= EP_STATE_ATTEMPT_QUANTITY && ep->packet_size) { if (is_ep_pending(ep)) {
// TU_LOG3("next pending i = %u\r\n", i);
return ep; return ep;
} }
} }
@ -440,8 +427,7 @@ static max3421_ep_t * find_next_pending_ep(max3421_ep_t * cur_ep) {
// wrap around including current endpoint // wrap around including current endpoint
for (size_t i = 0; i <= idx; i++) { for (size_t i = 0; i <= idx; i++) {
max3421_ep_t* ep = &_hcd_data.ep[i]; max3421_ep_t* ep = &_hcd_data.ep[i];
if (ep->state >= EP_STATE_ATTEMPT1 && ep->state <= EP_STATE_ATTEMPT_QUANTITY && ep->packet_size) { if (is_ep_pending(ep)) {
// TU_LOG3("next pending i = %u\r\n", i);
return ep; return ep;
} }
} }
@ -469,7 +455,9 @@ bool hcd_init(uint8_t rhport) {
tuh_max3421_int_api(rhport, false); tuh_max3421_int_api(rhport, false);
TU_LOG2_INT(sizeof(max3421_ep_t)); TU_LOG2_INT(sizeof(max3421_ep_t));
TU_LOG2_INT(sizeof(atomic_flag));
TU_LOG2_INT(sizeof(max3421_data_t)); TU_LOG2_INT(sizeof(max3421_data_t));
TU_LOG2_INT(offsetof(max3421_data_t, ep));
tu_memclr(&_hcd_data, sizeof(_hcd_data)); tu_memclr(&_hcd_data, sizeof(_hcd_data));
_hcd_data.peraddr = 0xff; // invalid _hcd_data.peraddr = 0xff; // invalid
@ -689,7 +677,7 @@ bool hcd_edpt_xfer(uint8_t rhport, uint8_t daddr, uint8_t ep_addr, uint8_t * buf
ep->buf = buffer; ep->buf = buffer;
ep->total_len = buflen; ep->total_len = buflen;
ep->xferred_len = 0; ep->xferred_len = 0;
ep->state = EP_STATE_ATTEMPT1; ep->state = EP_STATE_ATTEMPT_1;
if ( ep_num == 0 ) { if ( ep_num == 0 ) {
ep->is_setup = 0; ep->is_setup = 0;
@ -726,7 +714,7 @@ bool hcd_setup_send(uint8_t rhport, uint8_t daddr, uint8_t const setup_packet[8]
ep->buf = (uint8_t*)(uintptr_t) setup_packet; ep->buf = (uint8_t*)(uintptr_t) setup_packet;
ep->total_len = 8; ep->total_len = 8;
ep->xferred_len = 0; ep->xferred_len = 0;
ep->state = EP_STATE_ATTEMPT1; ep->state = EP_STATE_ATTEMPT_1;
// carry out transfer if not busy // carry out transfer if not busy
if ( !atomic_flag_test_and_set(&_hcd_data.busy) ) { if ( !atomic_flag_test_and_set(&_hcd_data.busy) ) {
@ -839,40 +827,24 @@ static void handle_xfer_done(uint8_t rhport, bool in_isr) {
break; break;
case HRSL_NAK: case HRSL_NAK:
// TODO no retry for iso in current frame
// TODO retry limitation over all (not only per frame)
if (ep_num == 0) { if (ep_num == 0) {
// setup/control => retry immediately // control endpoint -> retry immediately
hxfr_write(rhport, _hcd_data.hxfr, in_isr); hxfr_write(rhport, _hcd_data.hxfr, in_isr);
#if CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME
} else if (ep->state == CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME) {
// no more retry this frame if max. attempts are reached => suspend and retry it next frame
ep->state = EP_STATE_SUSPENDED;
max3421_ep_t * next_ep = find_next_pending_ep(ep);
if (next_ep) {
// switch to next pending endpoint TODO could have issue with double buffered if not clear previously out data
xact_inout(rhport, next_ep, true, in_isr);
} else {
// no more pending => clear busy
atomic_flag_clear(&_hcd_data.busy);
}
#endif
} else { } else {
// another attempt if (ep->state < EP_STATE_ATTEMPT_MAX) {
#if CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME
ep->state++; ep->state++;
#endif }
max3421_ep_t * next_ep = find_next_pending_ep(ep); max3421_ep_t * next_ep = find_next_pending_ep(ep);
if (ep == next_ep) { if (ep == next_ep) {
// this endpoint is only one pending => retry immediately // this endpoint is only one pending -> retry immediately
hxfr_write(rhport, _hcd_data.hxfr, in_isr); hxfr_write(rhport, _hcd_data.hxfr, in_isr);
} else if (next_ep) { } else if (next_ep) {
// switch to next pending endpoint TODO could have issue with double buffered if not clear previously out data // switch to next pending endpoint TODO could have issue with double buffered if not clear previously out data
xact_inout(rhport, next_ep, true, in_isr); xact_inout(rhport, next_ep, true, in_isr);
} else { } else {
TU_ASSERT(false,); // no more pending in this frame -> clear busy
atomic_flag_clear(&_hcd_data.busy);
} }
} }
return; return;
@ -957,28 +929,24 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
if (hirq & HIRQ_FRAME_IRQ) { if (hirq & HIRQ_FRAME_IRQ) {
_hcd_data.frame_count++; _hcd_data.frame_count++;
#if CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME max3421_ep_t* ep_retry = NULL;
// endpoints retry or restart attempt next frame
for (size_t i = 0; i < CFG_TUH_MAX3421_ENDPOINT_TOTAL; i++) {
max3421_ep_t * ep = &_hcd_data.ep[i];
// retry the endpoints that are suspended (NAK) last frame // reset all endpoints attempt counter
if (ep->state == EP_STATE_SUSPENDED) { for (size_t i = 0; i < CFG_TUH_MAX3421_ENDPOINT_TOTAL; i++) {
// resume and retry suspended endpoint with attempt 1 max3421_ep_t* ep = &_hcd_data.ep[i];
ep->state = EP_STATE_ATTEMPT1; if (ep->packet_size && ep->state > EP_STATE_ATTEMPT_1) {
ep->state = EP_STATE_ATTEMPT_1;
if (ep->packet_size) { // first test packet_size before atomic_flag_test_and_set() if (ep_retry == NULL) {
if (!atomic_flag_test_and_set(&_hcd_data.busy) ) { ep_retry = ep;
// trigger endpoint to be retried
xact_inout(rhport, ep, true, in_isr);
}
}
} else if (ep->state > EP_STATE_ATTEMPT1 && ep->state <= CFG_TUH_MAX3421_MAX_ATTEMPTS_PER_FRAME) {
// restart all other running/pending endpoints
ep->state = EP_STATE_ATTEMPT1;
} }
} }
#endif }
// start usb transfer if not busy
if (ep_retry != NULL && !atomic_flag_test_and_set(&_hcd_data.busy)) {
xact_inout(rhport, ep_retry, true, in_isr);
}
} }
if (hirq & HIRQ_CONDET_IRQ) { if (hirq & HIRQ_CONDET_IRQ) {
@ -987,17 +955,17 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
// queue more transfer in handle_xfer_done() can cause hirq to be set again while external IRQ may not catch and/or // queue more transfer in handle_xfer_done() can cause hirq to be set again while external IRQ may not catch and/or
// not call this handler again. So we need to loop until all IRQ are cleared // not call this handler again. So we need to loop until all IRQ are cleared
while ( hirq & (HIRQ_RCVDAV_IRQ | HIRQ_HXFRDN_IRQ) ) { while (hirq & (HIRQ_RCVDAV_IRQ | HIRQ_HXFRDN_IRQ)) {
if ( hirq & HIRQ_RCVDAV_IRQ ) { if (hirq & HIRQ_RCVDAV_IRQ) {
uint8_t const ep_num = _hcd_data.hxfr & HXFR_EPNUM_MASK; uint8_t const ep_num = _hcd_data.hxfr & HXFR_EPNUM_MASK;
max3421_ep_t *ep = find_opened_ep(_hcd_data.peraddr, ep_num, 1); max3421_ep_t* ep = find_opened_ep(_hcd_data.peraddr, ep_num, 1);
uint8_t xact_len = 0; uint8_t xact_len = 0;
// RCVDAV_IRQ can trigger 2 times (dual buffered) // RCVDAV_IRQ can trigger 2 times (dual buffered)
while ( hirq & HIRQ_RCVDAV_IRQ ) { while (hirq & HIRQ_RCVDAV_IRQ) {
uint8_t rcvbc = reg_read(rhport, RCVBC_ADDR, in_isr); uint8_t rcvbc = reg_read(rhport, RCVBC_ADDR, in_isr);
xact_len = (uint8_t) tu_min16(rcvbc, ep->total_len - ep->xferred_len); xact_len = (uint8_t) tu_min16(rcvbc, ep->total_len - ep->xferred_len);
if ( xact_len ) { if (xact_len) {
fifo_read(rhport, ep->buf, xact_len, in_isr); fifo_read(rhport, ep->buf, xact_len, in_isr);
ep->buf += xact_len; ep->buf += xact_len;
ep->xferred_len += xact_len; ep->xferred_len += xact_len;
@ -1008,12 +976,12 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
hirq = reg_read(rhport, HIRQ_ADDR, in_isr); hirq = reg_read(rhport, HIRQ_ADDR, in_isr);
} }
if ( xact_len < ep->packet_size || ep->xferred_len >= ep->total_len ) { if (xact_len < ep->packet_size || ep->xferred_len >= ep->total_len) {
ep->state = EP_STATE_COMPLETE; ep->state = EP_STATE_COMPLETE;
} }
} }
if ( hirq & HIRQ_HXFRDN_IRQ ) { if (hirq & HIRQ_HXFRDN_IRQ) {
hirq_write(rhport, HIRQ_HXFRDN_IRQ, in_isr); hirq_write(rhport, HIRQ_HXFRDN_IRQ, in_isr);
handle_xfer_done(rhport, in_isr); handle_xfer_done(rhport, in_isr);
} }