more progress on dwc2 hcd, initial code for edpt xfer

This commit is contained in:
hathach 2024-10-25 00:20:34 +07:00
parent 8461525d48
commit 063661e3a3
No known key found for this signature in database
GPG Key ID: 26FAB84F615C3C52
9 changed files with 407 additions and 172 deletions

View File

@ -123,11 +123,15 @@ TU_ATTR_ALWAYS_INLINE static inline int tu_memcpy_s(void *dest, size_t destsz, c
//------------- Bytes -------------//
TU_ATTR_ALWAYS_INLINE static inline uint32_t tu_u32(uint8_t b3, uint8_t b2, uint8_t b1, uint8_t b0) {
return ( ((uint32_t) b3) << 24) | ( ((uint32_t) b2) << 16) | ( ((uint32_t) b1) << 8) | b0;
return (((uint32_t)b3) << 24) | (((uint32_t)b2) << 16) | (((uint32_t)b1) << 8) | b0;
}
TU_ATTR_ALWAYS_INLINE static inline uint32_t tu_u32_from_u16(uint16_t high, uint16_t low) {
return (((uint32_t)high) << 16) | low;
}
TU_ATTR_ALWAYS_INLINE static inline uint16_t tu_u16(uint8_t high, uint8_t low) {
return (uint16_t) ((((uint16_t) high) << 8) | low);
return (uint16_t)((((uint16_t)high) << 8) | low);
}
TU_ATTR_ALWAYS_INLINE static inline uint8_t tu_u32_byte3(uint32_t ui32) { return TU_U32_BYTE3(ui32); }

View File

@ -56,10 +56,10 @@ typedef enum {
/// defined base on USB Specs Endpoint's bmAttributes
typedef enum {
TUSB_XFER_CONTROL = 0 ,
TUSB_XFER_ISOCHRONOUS ,
TUSB_XFER_BULK ,
TUSB_XFER_INTERRUPT
TUSB_XFER_CONTROL = 0,
TUSB_XFER_ISOCHRONOUS = 1,
TUSB_XFER_BULK = 2,
TUSB_XFER_INTERRUPT = 3
} tusb_xfer_type_t;
typedef enum {

View File

@ -56,7 +56,7 @@ static xfer_ctl_t xfer_status[DWC2_EP_MAX][2];
// EP0 transfers are limited to 1 packet - larger sizes has to be split
static uint16_t ep0_pending[2]; // Index determines direction as tusb_dir_t type
static uint16_t _dfifo_top; // top free location in FIFO RAM
static uint16_t _dfifo_top; // top free location in DFIFO in words
// Number of IN endpoints active
static uint8_t _allocated_ep_in_count;
@ -68,6 +68,12 @@ static bool _sof_en;
// DMA
//--------------------------------------------------------------------
TU_ATTR_ALWAYS_INLINE static inline bool dma_device_enabled(const dwc2_regs_t* dwc2) {
(void) dwc2;
// Internal DMA only
return CFG_TUD_DWC2_DMA && dwc2->ghwcfg2_bm.arch == GHWCFG2_ARCH_INTERNAL_DMA;
}
static void dma_setup_prepare(uint8_t rhport) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
@ -103,11 +109,9 @@ static void dma_setup_prepare(uint8_t rhport) {
possible since the free space is located between the RX and TX FIFOs.
---------------- ep_fifo_size
| EPInfo |
| for DMA |
| EPInfo DMA |
|-------------|-- gdfifocfg.EPINFOBASE (max is ghwcfg3.dfifo_depth)
| IN FIFO 0 |
| control |
| IN FIFO 0 | control EP
|-------------|
| IN FIFO 1 |
|-------------|
@ -126,13 +130,13 @@ static void dma_setup_prepare(uint8_t rhport) {
- All EP OUT shared a unique OUT FIFO which uses (for Slave or Buffer DMA, Scatt/Gather DMA use different formula):
- 13 for setup packets + control words (up to 3 setup packets).
- 1 for global NAK (not required/used here).
- Largest-EPsize / 4 + 1. ( FS: 64 bytes, HS: 512 bytes). Recommended is "2 x (Largest-EPsize/4) + 1"
- Largest-EPsize/4 + 1. ( FS: 64 bytes, HS: 512 bytes). Recommended is "2 x (Largest-EPsize/4 + 1)"
- 2 for each used OUT endpoint
Therefore GRXFSIZ = 13 + 1 + 2 x (Largest-EPsize/4 + 1) + 2 x EPOUTnum
*/
TU_ATTR_ALWAYS_INLINE static inline uint16_t calc_grxfsiz(uint16_t largest_ep_size, uint8_t ep_count) {
TU_ATTR_ALWAYS_INLINE static inline uint16_t calc_device_grxfsiz(uint16_t largest_ep_size, uint8_t ep_count) {
return 13 + 1 + 2 * ((largest_ep_size / 4) + 1) + 2 * ep_count;
}
@ -148,7 +152,7 @@ static bool dfifo_alloc(uint8_t rhport, uint8_t ep_addr, uint16_t packet_size) {
uint16_t fifo_size = tu_div_ceil(packet_size, 4);
if (dir == TUSB_DIR_OUT) {
// Calculate required size of RX FIFO
uint16_t const new_sz = calc_grxfsiz(4 * fifo_size, ep_count);
uint16_t const new_sz = calc_device_grxfsiz(4 * fifo_size, ep_count);
// If size_rx needs to be extended check if there is enough free space
if (dwc2->grxfsiz < new_sz) {
@ -184,17 +188,18 @@ static bool dfifo_alloc(uint8_t rhport, uint8_t ep_addr, uint16_t packet_size) {
return true;
}
static void dfifo_init(uint8_t rhport) {
static void dfifo_device_init(uint8_t rhport) {
const dwc2_controller_t* dwc2_controller = &_dwc2_controller[rhport];
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
dwc2->grxfsiz = calc_grxfsiz(CFG_TUD_ENDPOINT0_SIZE, dwc2_controller->ep_count);
dwc2->grxfsiz = calc_device_grxfsiz(CFG_TUD_ENDPOINT0_SIZE, dwc2_controller->ep_count);
if(dwc2_dma_enabled(dwc2, TUSB_ROLE_DEVICE)) {
// DMA use last DFIFO to store metadata
_dfifo_top = dma_cal_epfifo_base(rhport);
}else {
_dfifo_top = dwc2_controller->ep_fifo_size / 4;
// Scatter/Gather DMA mode is not yet supported. Buffer DMA only need 1 words per endpoint direction
const bool is_dma = dma_device_enabled(dwc2);
_dfifo_top = dwc2_controller->ep_fifo_size/4;
if (is_dma) {
_dfifo_top -= 2 * dwc2_controller->ep_count;
}
dwc2->gdfifocfg = (_dfifo_top << GDFIFOCFG_EPINFOBASE_SHIFT) | _dfifo_top;
// Allocate FIFO for EP0 IN
dfifo_alloc(rhport, 0x80, CFG_TUD_ENDPOINT0_SIZE);
@ -357,7 +362,7 @@ static void bus_reset(uint8_t rhport) {
dwc2->diepmsk = DIEPMSK_TOM | DIEPMSK_XFRCM;
// 4. Set up DFIFO
dfifo_init(rhport);
dfifo_device_init(rhport);
// 5. Reset device address
dwc2->dcfg &= ~DCFG_DAD_Msk;
@ -369,7 +374,7 @@ static void bus_reset(uint8_t rhport) {
xfer_status[0][TUSB_DIR_OUT].max_size = 64;
xfer_status[0][TUSB_DIR_IN].max_size = 64;
if(dwc2_dma_enabled(dwc2, TUSB_ROLE_DEVICE)) {
if(dma_device_enabled(dwc2)) {
dma_setup_prepare(rhport);
} else {
dwc2->epout[0].doeptsiz |= (3 << DOEPTSIZ_STUPCNT_Pos);
@ -401,7 +406,7 @@ static void edpt_schedule_packets(uint8_t rhport, uint8_t const epnum, uint8_t c
dep->dieptsiz = (num_packets << DIEPTSIZ_PKTCNT_Pos) |
((total_bytes << DIEPTSIZ_XFRSIZ_Pos) & DIEPTSIZ_XFRSIZ_Msk);
if(dwc2_dma_enabled(dwc2, TUSB_ROLE_DEVICE)) {
if(dma_device_enabled(dwc2)) {
dep->diepdma = (uintptr_t)xfer->buffer;
// For ISO endpoint set correct odd/even bit for next frame.
@ -439,7 +444,7 @@ static void edpt_schedule_packets(uint8_t rhport, uint8_t const epnum, uint8_t c
dep->doepctl |= (odd_frame_now ? DOEPCTL_SD0PID_SEVNFRM_Msk : DOEPCTL_SODDFRM_Msk);
}
if(dwc2_dma_enabled(dwc2, TUSB_ROLE_DEVICE)) {
if(dma_device_enabled(dwc2)) {
dep->doepdma = (uintptr_t)xfer->buffer;
}
@ -451,11 +456,12 @@ static void edpt_schedule_packets(uint8_t rhport, uint8_t const epnum, uint8_t c
// Controller API
//--------------------------------------------------------------------
bool dcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
(void) rh_init;
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
// Core Initialization
const bool is_highspeed = dwc2_core_is_highspeed(dwc2, rh_init);
const bool is_dma = dwc2_dma_enabled(dwc2, TUSB_ROLE_DEVICE);
const bool is_highspeed = dwc2_core_is_highspeed(dwc2, TUSB_ROLE_DEVICE);
const bool is_dma = dma_device_enabled(dwc2);
TU_ASSERT(dwc2_core_init(rhport, is_highspeed, is_dma));
// Device Initialization
@ -463,7 +469,7 @@ bool dcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
// Set device max speed
uint32_t dcfg = dwc2->dcfg & ~DCFG_DSPD_Msk;
if (dwc2_core_is_highspeed(dwc2, rh_init)) {
if (is_highspeed) {
dcfg |= DCFG_DSPD_HS << DCFG_DSPD_Pos;
// XCVRDLY: transceiver delay between xcvr_sel and txvalid during device chirp is required
@ -611,7 +617,7 @@ void dcd_edpt_close_all(uint8_t rhport) {
dfifo_flush_tx(dwc2, 0x10); // all tx fifo
dfifo_flush_rx(dwc2);
dfifo_init(rhport); // re-init dfifo
dfifo_device_init(rhport); // re-init dfifo
}
bool dcd_edpt_iso_alloc(uint8_t rhport, uint8_t ep_addr, uint16_t largest_packet_size) {
@ -690,8 +696,9 @@ void dcd_edpt_close(uint8_t rhport, uint8_t ep_addr) {
}
void dcd_edpt_stall(uint8_t rhport, uint8_t ep_addr) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
edpt_disable(rhport, ep_addr, true);
if((tu_edpt_number(ep_addr) == 0) && dwc2_dma_enabled(DWC2_REG(rhport), TUSB_ROLE_DEVICE)) {
if((tu_edpt_number(ep_addr) == 0) && dma_device_enabled(dwc2)) {
dma_setup_prepare(rhport);
}
}
@ -808,7 +815,7 @@ static void handle_epout_irq(uint8_t rhport) {
if (doepint & DOEPINT_SETUP) {
epout->doepint = DOEPINT_SETUP;
if(dwc2_dma_enabled(dwc2, TUSB_ROLE_DEVICE)) {
if(dma_device_enabled(dwc2)) {
dma_setup_prepare(rhport);
}
@ -824,7 +831,7 @@ static void handle_epout_irq(uint8_t rhport) {
if (!(doepint & (DOEPINT_SETUP | DOEPINT_STPKTRX | DOEPINT_STSPHSRX))) {
xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, TUSB_DIR_OUT);
if(dwc2_dma_enabled(dwc2, TUSB_ROLE_DEVICE)) {
if(dma_device_enabled(dwc2)) {
if ((epnum == 0) && ep0_pending[TUSB_DIR_OUT]) {
// EP0 can only handle one packet Schedule another packet to be received.
edpt_schedule_packets(rhport, epnum, TUSB_DIR_OUT, 1, ep0_pending[TUSB_DIR_OUT]);
@ -874,7 +881,7 @@ static void handle_epin_irq(uint8_t rhport) {
// Schedule another packet to be transmitted.
edpt_schedule_packets(rhport, n, TUSB_DIR_IN, 1, ep0_pending[TUSB_DIR_IN]);
} else {
if((n == 0) && dwc2_dma_enabled(dwc2, TUSB_ROLE_DEVICE)) {
if((n == 0) && dma_device_enabled(dwc2)) {
dma_setup_prepare(rhport);
}
dcd_event_xfer_complete(rhport, n | TUSB_DIR_IN_MASK, xfer->total_len, XFER_RESULT_SUCCESS, true);

View File

@ -168,16 +168,16 @@ static bool check_dwc2(dwc2_regs_t* dwc2) {
//--------------------------------------------------------------------
//
//--------------------------------------------------------------------
bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, const tusb_rhport_init_t* rh_init) {
bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, tusb_role_t role) {
(void)dwc2;
#if CFG_TUD_ENABLED
if (rh_init->role == TUSB_ROLE_DEVICE && !TUD_OPT_HIGH_SPEED) {
if (role == TUSB_ROLE_DEVICE && !TUD_OPT_HIGH_SPEED) {
return false;
}
#endif
#if CFG_TUH_ENABLED
if (rh_init->role == TUSB_ROLE_HOST && !TUH_OPT_HIGH_SPEED) {
if (role == TUSB_ROLE_HOST && !TUH_OPT_HIGH_SPEED) {
return false;
}
#endif
@ -234,15 +234,7 @@ bool dwc2_core_init(uint8_t rhport, bool is_highspeed, bool is_dma) {
dwc2->gintmsk = 0;
// TODO can be enabled with device as well but tested with host for now
// if (rh_init->role == TUSB_ROLE_HOST) {
// dwc2->gintmsk |= OTG_INT_COMMON;
// }
if (is_dma) {
const uint16_t epinfo_base = dma_cal_epfifo_base(rhport);
dwc2->gdfifocfg = (epinfo_base << GDFIFOCFG_EPINFOBASE_SHIFT) | epinfo_base;
// DMA seems to be only settable after a core reset, and not possible to switch on-the-fly
dwc2->gahbcfg |= GAHBCFG_DMAEN | GAHBCFG_HBSTLEN_2;
} else {

View File

@ -61,7 +61,9 @@ enum {
OTG_INT_COMMON = 0 // GINTSTS_DISCINT | GINTSTS_CONIDSTSCHNG
};
//------------- Core -------------//
//--------------------------------------------------------------------+
// Core/Controller
//--------------------------------------------------------------------+
TU_ATTR_ALWAYS_INLINE static inline dwc2_regs_t* DWC2_REG(uint8_t rhport) {
if (rhport >= DWC2_CONTROLLER_COUNT) {
// user mis-configured, ignore and use first controller
@ -70,11 +72,13 @@ TU_ATTR_ALWAYS_INLINE static inline dwc2_regs_t* DWC2_REG(uint8_t rhport) {
return (dwc2_regs_t*)_dwc2_controller[rhport].reg_base;
}
bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, const tusb_rhport_init_t* rh_init);
bool dwc2_core_is_highspeed(dwc2_regs_t* dwc2, tusb_role_t role);
bool dwc2_core_init(uint8_t rhport, bool is_highspeed, bool is_dma);
void dwc2_core_handle_common_irq(uint8_t rhport, bool in_isr);
//------------- DFIFO -------------//
//--------------------------------------------------------------------+
// DFIFO
//--------------------------------------------------------------------+
TU_ATTR_ALWAYS_INLINE static inline void dfifo_flush_tx(dwc2_regs_t* dwc2, uint8_t fnum) {
// flush TX fifo and wait for it cleared
dwc2->grstctl = GRSTCTL_TXFFLSH | (fnum << GRSTCTL_TXFNUM_Pos);
@ -86,27 +90,8 @@ TU_ATTR_ALWAYS_INLINE static inline void dfifo_flush_rx(dwc2_regs_t* dwc2) {
while (dwc2->grstctl & GRSTCTL_RXFFLSH_Msk) {}
}
//------------- DMA -------------//
TU_ATTR_ALWAYS_INLINE static inline bool dwc2_dma_enabled(const dwc2_regs_t* dwc2, tusb_role_t role) {
(void) dwc2;
if (CFG_TUD_DWC2_DMA == 0 && role == TUSB_ROLE_DEVICE) {
return false;
}
if (CFG_TUH_DWC2_DMA == 0 && role == TUSB_ROLE_HOST) {
return false;
}
// Internal DMA only
return dwc2->ghwcfg2_bm.arch == GHWCFG2_ARCH_INTERNAL_DMA;
}
TU_ATTR_ALWAYS_INLINE static inline uint16_t dma_cal_epfifo_base(uint8_t rhport) {
// Scatter/Gather DMA mode is not yet supported. Buffer DMA only need 1 words per endpoint direction
const dwc2_controller_t* dwc2_controller = &_dwc2_controller[rhport];
return dwc2_controller->ep_fifo_size/4 - 2*dwc2_controller->ep_count;
}
//--------------------------------------------------------------------+
// DMA
//--------------------------------------------------------------------+
#endif

View File

@ -24,7 +24,6 @@ dwc2_reg_value = {
'ST U5A5 HS': [0x5000, 0x4F54411A, 0, 0x228FE052, 0x03B882E8, 0xE2103E30],
'GD32VF103': [0x1000, 0, 0, 0, 0, 0],
'XMC4500': [0xAEC000, 0x4F54292A, 0, 0x228F5930, 0x027A01E5, 0xDBF08030]
}
# Combine dwc2_info with dwc2_reg_list

View File

@ -138,6 +138,13 @@ enum {
GINTSTS_CMODE_HOST = 1,
};
enum {
HCTSIZ_PID_DATA0 = 0,
HCTSIZ_PID_DATA2 = 1,
HCTSIZ_PID_DATA1 = 2,
HCTSIZ_PID_SETUP = 3,
};
//--------------------------------------------------------------------
// Register bitfield definitions
//--------------------------------------------------------------------
@ -334,17 +341,49 @@ typedef struct TU_ATTR_PACKED {
uint32_t speed : 2; // 17..18 Port speed
uint32_t rsv19_31 :13; // 19..31 Reserved
}dwc2_hprt_t;
TU_VERIFY_STATIC(sizeof(dwc2_hprt_t) == 4, "incorrect size");
typedef struct TU_ATTR_PACKED {
uint32_t ep_size : 11; // 0..10 Maximum packet size
uint32_t ep_num : 4; // 11..14 Endpoint number
uint32_t ep_dir : 1; // 15 Endpoint direction
uint32_t rsv16 : 1; // 16 Reserved
uint32_t low_speed_dev : 1; // 17 Low-speed device
uint32_t ep_type : 2; // 18..19 Endpoint type
uint32_t err_multi_count : 2; // 20..21 Error (splitEn = 1) / Multi (SplitEn = 0) count
uint32_t dev_addr : 7; // 22..28 Device address
uint32_t odd_frame : 1; // 29 Odd frame
uint32_t disable : 1; // 30 Channel disable
uint32_t enable : 1; // 31 Channel enable
} dwc2_channel_char_t;
TU_VERIFY_STATIC(sizeof(dwc2_channel_char_t) == 4, "incorrect size");
typedef struct TU_ATTR_PACKED {
uint32_t hub_port : 7; // 0..6 Hub port number
uint32_t hub_addr : 7; // 7..13 Hub address
uint32_t xact_pos : 2; // 14..15 Transaction position
uint32_t split_compl : 1; // 16 Split completion
uint32_t rsv17_30 : 14; // 17..30 Reserved
uint32_t split_en : 1; // 31 Split enable
} dwc2_channel_split_t;
TU_VERIFY_STATIC(sizeof(dwc2_channel_split_t) == 4, "incorrect size");
// Host Channel
typedef struct {
volatile uint32_t hcchar; // 500 + 20*ch Host Channel Characteristics
volatile uint32_t hcsplt; // 504 + 20*ch Host Channel Split Control
volatile uint32_t hcint; // 508 + 20*ch Host Channel Interrupt
volatile uint32_t hcintmsk; // 50C + 20*ch Host Channel Interrupt Mask
volatile uint32_t hctsiz; // 510 + 20*ch Host Channel Transfer Size
volatile uint32_t hcdma; // 514 + 20*ch Host Channel DMA Address
uint32_t reserved518; // 518 + 20*ch
volatile uint32_t hcdmab; // 51C + 20*ch Host Channel DMA Address
union {
volatile uint32_t hcchar; // 500 + 20*ch Host Channel Characteristics
volatile dwc2_channel_char_t hcchar_bm;
};
union {
volatile uint32_t hcsplt; // 504 + 20*ch Host Channel Split Control
volatile dwc2_channel_split_t hcsplt_bm;
};
volatile uint32_t hcint; // 508 + 20*ch Host Channel Interrupt
volatile uint32_t hcintmsk; // 50C + 20*ch Host Channel Interrupt Mask
volatile uint32_t hctsiz; // 510 + 20*ch Host Channel Transfer Size
volatile uint32_t hcdma; // 514 + 20*ch Host Channel DMA Address
uint32_t reserved518; // 518 + 20*ch
volatile uint32_t hcdmab; // 51C + 20*ch Host Channel DMA Address
} dwc2_channel_t;
// Endpoint IN
@ -1720,15 +1759,15 @@ TU_VERIFY_STATIC(offsetof(dwc2_regs_t, fifo ) == 0x1000, "incorrect size");
#define HCSPLT_SPLITEN HCSPLT_SPLITEN_Msk // Split enable
/******************** Bit definition for HCINT register ********************/
#define HCINT_XFRC_Pos (0U)
#define HCINT_XFRC_Msk (0x1UL << HCINT_XFRC_Pos) // 0x00000001
#define HCINT_XFRC HCINT_XFRC_Msk // Transfer completed
#define HCINT_CHH_Pos (1U)
#define HCINT_CHH_Msk (0x1UL << HCINT_CHH_Pos) // 0x00000002
#define HCINT_CHH HCINT_CHH_Msk // Channel halted
#define HCINT_AHBERR_Pos (2U)
#define HCINT_AHBERR_Msk (0x1UL << HCINT_AHBERR_Pos) // 0x00000004
#define HCINT_AHBERR HCINT_AHBERR_Msk // AHB error
#define HCINT_XFER_COMPLETE_Pos (0U)
#define HCINT_XFER_COMPLETE_Msk (0x1UL << HCINT_XFER_COMPLETE_Pos) // 0x00000001
#define HCINT_XFER_COMPLETE HCINT_XFER_COMPLETE_Msk // Transfer completed
#define HCINT_CHANNEL_HALTED_Pos (1U)
#define HCINT_CHANNEL_HALTED_Msk (0x1UL << HCINT_CHANNEL_HALTED_Pos) // 0x00000002
#define HCINT_CHANNEL_HALTED HCINT_CHANNEL_HALTED_Msk // Channel halted
#define HCINT_AHB_ERR_Pos (2U)
#define HCINT_AHB_ERR_Msk (0x1UL << HCINT_AHB_ERR_Pos) // 0x00000004
#define HCINT_AHB_ERR HCINT_AHB_ERR_Msk // AHB error
#define HCINT_STALL_Pos (3U)
#define HCINT_STALL_Msk (0x1UL << HCINT_STALL_Pos) // 0x00000008
#define HCINT_STALL HCINT_STALL_Msk // STALL response received interrupt
@ -1741,18 +1780,27 @@ TU_VERIFY_STATIC(offsetof(dwc2_regs_t, fifo ) == 0x1000, "incorrect size");
#define HCINT_NYET_Pos (6U)
#define HCINT_NYET_Msk (0x1UL << HCINT_NYET_Pos) // 0x00000040
#define HCINT_NYET HCINT_NYET_Msk // Response received interrupt
#define HCINT_TXERR_Pos (7U)
#define HCINT_TXERR_Msk (0x1UL << HCINT_TXERR_Pos) // 0x00000080
#define HCINT_TXERR HCINT_TXERR_Msk // Transaction error
#define HCINT_BBERR_Pos (8U)
#define HCINT_BBERR_Msk (0x1UL << HCINT_BBERR_Pos) // 0x00000100
#define HCINT_BBERR HCINT_BBERR_Msk // Babble error
#define HCINT_FRMOR_Pos (9U)
#define HCINT_FRMOR_Msk (0x1UL << HCINT_FRMOR_Pos) // 0x00000200
#define HCINT_FRMOR HCINT_FRMOR_Msk // Frame overrun
#define HCINT_DTERR_Pos (10U)
#define HCINT_DTERR_Msk (0x1UL << HCINT_DTERR_Pos) // 0x00000400
#define HCINT_DTERR HCINT_DTERR_Msk // Data toggle error
#define HCINT_XACT_ERR_Pos (7U)
#define HCINT_XACT_ERR_Msk (0x1UL << HCINT_XACT_ERR_Pos) // 0x00000080
#define HCINT_XACT_ERR HCINT_XACT_ERR_Msk // Transaction error
#define HCINT_BABBLE_ERR_Pos (8U)
#define HCINT_BABBLE_ERR_Msk (0x1UL << HCINT_BABBLE_ERR_Pos) // 0x00000100
#define HCINT_BABBLE_ERR HCINT_BABBLE_ERR_Msk // Babble error
#define HCINT_FARME_OVERRUN_Pos (9U)
#define HCINT_FARME_OVERRUN_Msk (0x1UL << HCINT_FARME_OVERRUN_Pos) // 0x00000200
#define HCINT_FARME_OVERRUN HCINT_FARME_OVERRUN_Msk // Frame overrun
#define HCINT_DATATOGGLE_ERR_Pos (10U)
#define HCINT_DATATOGGLE_ERR_Msk (0x1UL << HCINT_DATATOGGLE_ERR_Pos) // 0x00000400
#define HCINT_DATATOGGLE_ERR HCINT_DATATOGGLE_ERR_Msk // Data toggle error
#define HCINT_BUFFER_NAK_Pos (11U)
#define HCINT_BUFFER_NAK_Msk (0x1UL << HCINT_BUFFER_NAK_Pos) // 0x00000800
#define HCINT_BUFFER_NAK HCINT_BUFFER_NAK_Msk // Buffer not available interrupt
#define HCINT_XCS_XACT_ERR_Pos (12U)
#define HCINT_XCS_XACT_ERR_Msk (0x1UL << HCINT_XCS_XACT_ERR_Pos) // 0x00001000
#define HCINT_XCS_XACT_ERR HCINT_XCS_XACT_ERR_Msk // Excessive transaction error
#define HCINT_DESC_ROLLOVER_Pos (13U)
#define HCINT_DESC_ROLLOVER_Msk (0x1UL << HCINT_DESC_ROLLOVER_Pos) // 0x00002000
#define HCINT_DESC_ROLLOVER HCINT_DESC_ROLLOVER_Msk // Descriptor rollover
/******************** Bit definition for DIEPINT register ********************/
#define DIEPINT_XFRC_Pos (0U)
@ -1795,41 +1843,6 @@ TU_VERIFY_STATIC(offsetof(dwc2_regs_t, fifo ) == 0x1000, "incorrect size");
#define DIEPINT_NAK_Msk (0x1UL << DIEPINT_NAK_Pos) // 0x00002000
#define DIEPINT_NAK DIEPINT_NAK_Msk // NAK interrupt
/******************** Bit definition for HCINTMSK register ********************/
#define HCINTMSK_XFRCM_Pos (0U)
#define HCINTMSK_XFRCM_Msk (0x1UL << HCINTMSK_XFRCM_Pos) // 0x00000001
#define HCINTMSK_XFRCM HCINTMSK_XFRCM_Msk // Transfer completed mask
#define HCINTMSK_CHHM_Pos (1U)
#define HCINTMSK_CHHM_Msk (0x1UL << HCINTMSK_CHHM_Pos) // 0x00000002
#define HCINTMSK_CHHM HCINTMSK_CHHM_Msk // Channel halted mask
#define HCINTMSK_AHBERR_Pos (2U)
#define HCINTMSK_AHBERR_Msk (0x1UL << HCINTMSK_AHBERR_Pos) // 0x00000004
#define HCINTMSK_AHBERR HCINTMSK_AHBERR_Msk // AHB error
#define HCINTMSK_STALLM_Pos (3U)
#define HCINTMSK_STALLM_Msk (0x1UL << HCINTMSK_STALLM_Pos) // 0x00000008
#define HCINTMSK_STALLM HCINTMSK_STALLM_Msk // STALL response received interrupt mask
#define HCINTMSK_NAKM_Pos (4U)
#define HCINTMSK_NAKM_Msk (0x1UL << HCINTMSK_NAKM_Pos) // 0x00000010
#define HCINTMSK_NAKM HCINTMSK_NAKM_Msk // NAK response received interrupt mask
#define HCINTMSK_ACKM_Pos (5U)
#define HCINTMSK_ACKM_Msk (0x1UL << HCINTMSK_ACKM_Pos) // 0x00000020
#define HCINTMSK_ACKM HCINTMSK_ACKM_Msk // ACK response received/transmitted interrupt mask
#define HCINTMSK_NYET_Pos (6U)
#define HCINTMSK_NYET_Msk (0x1UL << HCINTMSK_NYET_Pos) // 0x00000040
#define HCINTMSK_NYET HCINTMSK_NYET_Msk // response received interrupt mask
#define HCINTMSK_TXERRM_Pos (7U)
#define HCINTMSK_TXERRM_Msk (0x1UL << HCINTMSK_TXERRM_Pos) // 0x00000080
#define HCINTMSK_TXERRM HCINTMSK_TXERRM_Msk // Transaction error mask
#define HCINTMSK_BBERRM_Pos (8U)
#define HCINTMSK_BBERRM_Msk (0x1UL << HCINTMSK_BBERRM_Pos) // 0x00000100
#define HCINTMSK_BBERRM HCINTMSK_BBERRM_Msk // Babble error mask
#define HCINTMSK_FRMORM_Pos (9U)
#define HCINTMSK_FRMORM_Msk (0x1UL << HCINTMSK_FRMORM_Pos) // 0x00000200
#define HCINTMSK_FRMORM HCINTMSK_FRMORM_Msk // Frame overrun mask
#define HCINTMSK_DTERRM_Pos (10U)
#define HCINTMSK_DTERRM_Msk (0x1UL << HCINTMSK_DTERRM_Pos) // 0x00000400
#define HCINTMSK_DTERRM HCINTMSK_DTERRM_Msk // Data toggle error mask
/******************** Bit definition for DIEPTSIZ register ********************/
#define DIEPTSIZ_XFRSIZ_Pos (0U)
@ -1851,11 +1864,9 @@ TU_VERIFY_STATIC(offsetof(dwc2_regs_t, fifo ) == 0x1000, "incorrect size");
#define HCTSIZ_DOPING_Pos (31U)
#define HCTSIZ_DOPING_Msk (0x1UL << HCTSIZ_DOPING_Pos) // 0x80000000
#define HCTSIZ_DOPING HCTSIZ_DOPING_Msk // Do PING
#define HCTSIZ_DPID_Pos (29U)
#define HCTSIZ_DPID_Msk (0x3UL << HCTSIZ_DPID_Pos) // 0x60000000
#define HCTSIZ_DPID HCTSIZ_DPID_Msk // Data PID
#define HCTSIZ_DPID_0 (0x1UL << HCTSIZ_DPID_Pos) // 0x20000000
#define HCTSIZ_DPID_1 (0x2UL << HCTSIZ_DPID_Pos) // 0x40000000
#define HCTSIZ_PID_Pos (29U)
#define HCTSIZ_PID_Msk (0x3UL << HCTSIZ_PID_Pos) // 0x60000000
#define HCTSIZ_PID HCTSIZ_PID_Msk // Data PID
/******************** Bit definition for DIEPDMA register ********************/
#define DIEPDMA_DMAADDR_Pos (0U)

View File

@ -34,10 +34,37 @@
#include "host/hcd.h"
#include "dwc2_common.h"
// DWC2 has limit number of channel, in order to support all endpoints we can store channel char/split to swap later on
#ifndef CFG_TUH_DWC2_CHANNEL_MAX
#define CFG_TUH_DWC2_CHANNEL_MAX (CFG_TUH_DEVICE_MAX*CFG_TUH_ENDPOINT_MAX + CFG_TUH_HUB)
#endif
enum {
HPRT_W1C_MASK = HPRT_CONN_DETECT | HPRT_ENABLE | HPRT_ENABLE_CHANGE | HPRT_OVER_CURRENT_CHANGE
};
typedef struct {
union {
uint32_t hcchar;
dwc2_channel_char_t hcchar_bm;
};
union {
uint32_t hcsplt;
dwc2_channel_split_t hcsplt_bm;
};
uint8_t next_data_toggle;
} hcd_pipe_t;
typedef struct {
hcd_pipe_t pipe[CFG_TUH_DWC2_CHANNEL_MAX];
} dwc2_hcd_t;
dwc2_hcd_t _hcd_data;
//--------------------------------------------------------------------
//
//--------------------------------------------------------------------
TU_ATTR_ALWAYS_INLINE static inline tusb_speed_t convert_hprt_speed(uint32_t hprt_speed) {
tusb_speed_t speed;
switch(hprt_speed) {
@ -49,6 +76,85 @@ TU_ATTR_ALWAYS_INLINE static inline tusb_speed_t convert_hprt_speed(uint32_t hpr
return speed;
}
TU_ATTR_ALWAYS_INLINE static inline bool dma_host_enabled(const dwc2_regs_t* dwc2) {
(void) dwc2;
// Internal DMA only
return CFG_TUH_DWC2_DMA && dwc2->ghwcfg2_bm.arch == GHWCFG2_ARCH_INTERNAL_DMA;
}
/* USB Data FIFO Layout
The FIFO is split up into
- EPInfo: for storing DMA metadata (check dcd_dwc2.c for more details)
- 1 RX FIFO: for receiving data
- 1 TX FIFO for non-periodic (NPTX)
- 1 TX FIFO for periodic (PTX)
We allocated TX FIFO from top to bottom (using top pointer), this to allow the RX FIFO to grow dynamically which is
possible since the free space is located between the RX and TX FIFOs.
----------------- ep_fifo_size
| EPInfo DMA |
|--------------|-- gdfifocfg.EPINFOBASE (max is ghwcfg3.dfifo_depth)
| Non-Periodic |
| TX FIFO |
|--------------|--- GNPTXFSIZ.addr (fixed size)
| Periodic |
| TX FIFO |
|--------------|--- HPTXFSIZ.addr (expandable downward)
| FREE |
| |
|--------------|-- GRXFSIZ (expandable upward)
| RX FIFO |
---------------- 0
*/
/* Programming Guide 2.1.2 FIFO RAM allocation
* RX
* - Largest-EPsize/4 + 2 (status info). recommended x2 if high bandwidth or multiple ISO are used.
* - 2 for transfer complete and channel halted status
* - 1 for each Control/Bulk out endpoint to Handle NAK/NYET (i.e max is number of host channel)
*
* TX non-periodic (NPTX)
* - At least largest-EPsize/4, recommended x2
*
* TX periodic (PTX)
* - At least largest-EPsize*MulCount/4 (MulCount up to 3 for high-bandwidth ISO/interrupt)
*/
static void dfifo_host_init(uint8_t rhport) {
const dwc2_controller_t* dwc2_controller = &_dwc2_controller[rhport];
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
// Scatter/Gather DMA mode is not yet supported. Buffer DMA only need 1 words per channel
const bool is_dma = dma_host_enabled(dwc2);
uint16_t dfifo_top = dwc2_controller->ep_fifo_size/4;
if (is_dma) {
dfifo_top -= dwc2->ghwcfg2_bm.num_host_ch;
}
// fixed allocation for now, improve later:
// - ptx_largest is limited to 256 for FS since most FS core only has 1024 bytes total
bool is_highspeed = dwc2_core_is_highspeed(dwc2, TUSB_ROLE_HOST);
uint32_t nptx_largest = is_highspeed ? TUSB_EPSIZE_BULK_HS/4 : TUSB_EPSIZE_BULK_FS/4;
uint32_t ptx_largest = is_highspeed ? TUSB_EPSIZE_ISO_HS_MAX/4 : 256/4;
uint16_t nptxfsiz = 2 * nptx_largest;
uint16_t rxfsiz = 2 * (ptx_largest + 2) + dwc2->ghwcfg2_bm.num_host_ch;
TU_ASSERT(dfifo_top >= (nptxfsiz + rxfsiz),);
uint16_t ptxfsiz = dfifo_top - (nptxfsiz + rxfsiz);
dwc2->gdfifocfg = (dfifo_top << GDFIFOCFG_EPINFOBASE_SHIFT) | dfifo_top;
dfifo_top -= rxfsiz;
dwc2->grxfsiz = rxfsiz;
dfifo_top -= nptxfsiz;
dwc2->gnptxfsiz = tu_u32_from_u16(nptxfsiz, dfifo_top);
dfifo_top -= ptxfsiz;
dwc2->hptxfsiz = tu_u32_from_u16(ptxfsiz, dfifo_top);
}
//--------------------------------------------------------------------+
// Controller API
//--------------------------------------------------------------------+
@ -64,11 +170,14 @@ bool hcd_configure(uint8_t rhport, uint32_t cfg_id, const void* cfg_param) {
// Initialize controller to host mode
bool hcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
(void) rh_init;
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
tu_memclr(&_hcd_data, sizeof(_hcd_data));
// Core Initialization
const bool is_highspeed = dwc2_core_is_highspeed(dwc2, rh_init);
const bool is_dma = dwc2_dma_enabled(dwc2, TUSB_ROLE_HOST);
const bool is_highspeed = dwc2_core_is_highspeed(dwc2, TUSB_ROLE_HOST);
const bool is_dma = dma_host_enabled(dwc2);
TU_ASSERT(dwc2_core_init(rhport, is_highspeed, is_dma));
//------------- 3.1 Host Initialization -------------//
@ -98,6 +207,9 @@ bool hcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
dwc2->gusbcfg = (dwc2->gusbcfg & ~GUSBCFG_FDMOD) | GUSBCFG_FHMOD;
while( (dwc2->gintsts & GINTSTS_CMOD) != GINTSTS_CMODE_HOST) {}
// configure fixed-allocated fifo scheme
dfifo_host_init(rhport);
dwc2->hprt = HPRT_W1C_MASK; // clear all write-1-clear bits
dwc2->hprt = HPRT_POWER; // turn on VBUS
@ -169,23 +281,108 @@ void hcd_device_close(uint8_t rhport, uint8_t dev_addr) {
//--------------------------------------------------------------------+
// Open an endpoint
bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const * ep_desc) {
// channel0 is reserved for dev0 control endpoint
bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_endpoint_t const * desc_ep) {
(void) rhport;
(void) dev_addr;
(void) ep_desc;
//dwc2_regs_t* dwc2 = DWC2_REG(rhport);
hcd_devtree_info_t devtree_info;
hcd_devtree_get_info(dev_addr, &devtree_info);
// find a free pipe
for (uint32_t i = 0; i < CFG_TUH_DWC2_CHANNEL_MAX; i++) {
hcd_pipe_t* pipe = &_hcd_data.pipe[i];
dwc2_channel_char_t* hcchar_bm = &pipe->hcchar_bm;
dwc2_channel_split_t* hcsplt_bm = &pipe->hcsplt_bm;
if (hcchar_bm->enable == 0) {
hcchar_bm->ep_size = tu_edpt_packet_size(desc_ep);
hcchar_bm->ep_num = tu_edpt_number(desc_ep->bEndpointAddress);
hcchar_bm->ep_dir = tu_edpt_dir(desc_ep->bEndpointAddress);
hcchar_bm->low_speed_dev = (devtree_info.speed == TUSB_SPEED_LOW) ? 1 : 0;
hcchar_bm->ep_type = desc_ep->bmAttributes.xfer;
hcchar_bm->err_multi_count = 0;
hcchar_bm->dev_addr = dev_addr;
hcchar_bm->odd_frame = 0;
hcchar_bm->disable = 0;
hcchar_bm->enable = 1;
hcsplt_bm->hub_port = devtree_info.hub_port;
hcsplt_bm->hub_addr = devtree_info.hub_addr;
// TODO not support split transaction yet
hcsplt_bm->xact_pos = 0;
hcsplt_bm->split_compl = 0;
hcsplt_bm->split_en = 0;
pipe->next_data_toggle = HCTSIZ_PID_DATA0;
return true;
}
}
return false;
}
TU_ATTR_ALWAYS_INLINE static inline uint8_t find_free_channel(dwc2_regs_t* dwc2) {
const uint8_t max_channel = tu_min8(dwc2->ghwcfg2_bm.num_host_ch, 16);
for (uint8_t i=0; i<max_channel; i++) {
// haintmsk bit enabled means channel is currently in use
if (!tu_bit_test(dwc2->haintmsk, i)) {
return i;
}
}
return TUSB_INDEX_INVALID_8;
}
TU_ATTR_ALWAYS_INLINE static inline uint8_t find_opened_pipe(uint8_t dev_addr, uint8_t ep_addr) {
for (uint32_t i = 0; i < CFG_TUH_DWC2_CHANNEL_MAX; i++) {
const dwc2_channel_char_t* hcchar_bm = &_hcd_data.pipe[i].hcchar_bm;
if (hcchar_bm->enable && hcchar_bm->dev_addr == dev_addr &&
ep_addr == tu_edpt_addr(hcchar_bm->ep_num, hcchar_bm->ep_dir)) {
return i;
}
}
return TUSB_INDEX_INVALID_8;
}
// Submit a transfer, when complete hcd_event_xfer_complete() must be invoked
bool hcd_edpt_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr, uint8_t * buffer, uint16_t buflen) {
(void) rhport;
(void) dev_addr;
(void) ep_addr;
(void) buffer;
(void) buflen;
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
return false;
uint8_t pipe_id = find_opened_pipe(dev_addr, ep_addr);
TU_ASSERT(pipe_id < CFG_TUH_DWC2_CHANNEL_MAX); // no opened pipe
hcd_pipe_t* pipe = &_hcd_data.pipe[pipe_id];
const dwc2_channel_char_t* hcchar_bm = &pipe->hcchar_bm;
uint8_t ch_id = find_free_channel(dwc2);
TU_ASSERT(ch_id < 16); // all channel are in use
dwc2->haintmsk |= TU_BIT(ch_id);
dwc2_channel_t* channel = &dwc2->channel[ch_id];
channel->hcintmsk = HCINT_XFER_COMPLETE | HCINT_CHANNEL_HALTED | HCINT_STALL |
HCINT_AHB_ERR | HCINT_XACT_ERR | HCINT_BABBLE_ERR | HCINT_DATATOGGLE_ERR;
const uint16_t packet_count = tu_div_ceil(buflen, hcchar_bm->ep_size);
channel->hctsiz = (pipe->next_data_toggle << HCTSIZ_PID_Pos) | (packet_count << HCTSIZ_PKTCNT_Pos) | buflen;
// Control transfer always start with DATA1 for data and status stage. May has issue with ZLP
if (pipe->next_data_toggle == HCTSIZ_PID_DATA0 || tu_edpt_number(ep_addr) == 0) {
pipe->next_data_toggle = HCTSIZ_PID_DATA1;
} else {
pipe->next_data_toggle = HCTSIZ_PID_DATA0;
}
if (dma_host_enabled(dwc2)) {
channel->hcdma = (uint32_t) buffer;
} else {
TU_ASSERT(false); // not yet support
}
// TODO support split transaction
channel->hcsplt = pipe->hcsplt;
channel->hcchar = pipe->hcchar; // kick-off transfer
return true;
}
// Abort a queued transfer. Note: it can only abort transfer that has not been started
@ -199,12 +396,13 @@ bool hcd_edpt_abort_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr) {
}
// Submit a special transfer to send 8-byte Setup Packet, when complete hcd_event_xfer_complete() must be invoked
bool hcd_setup_send(uint8_t rhport, uint8_t dev_addr, uint8_t const setup_packet[8]) {
(void) rhport;
(void) dev_addr;
(void) setup_packet;
bool hcd_setup_send(uint8_t rhport, uint8_t dev_addr, const uint8_t setup_packet[8]) {
uint8_t pipe_id = find_opened_pipe(dev_addr, 0);
TU_ASSERT(pipe_id < CFG_TUH_DWC2_CHANNEL_MAX); // no opened pipe
hcd_pipe_t* pipe = &_hcd_data.pipe[pipe_id];
pipe->next_data_toggle = HCTSIZ_PID_SETUP;
return false;
return hcd_edpt_xfer(rhport, dev_addr, 0, (uint8_t*)(uintptr_t) setup_packet, 8);
}
// clear stall, data toggle is also reset to DATA0
@ -300,23 +498,55 @@ TU_ATTR_ALWAYS_INLINE static inline void handle_hprt_irq(uint8_t rhport, bool in
dwc2->hprt = hprt; // clear interrupt
}
/* Interrupt Hierarchy
void handle_channel_irq(uint8_t rhport, bool in_isr) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
for(uint8_t ch_id=0; ch_id<32; ch_id++) {
if (tu_bit_test(dwc2->haint, ch_id)) {
dwc2_channel_t* channel = &dwc2->channel[ch_id];
uint32_t hcint = channel->hcint;
hcint &= channel->hcintmsk;
HCINTn.XferCompl HCINTMSKn.XferComplMsk
| |
+---------- AND --------+
|
HAINT.CHn HAINTMSK.CHn
| |
+---------- AND --------+
|
GINTSTS.PrtInt GINTMSK.PrtInt
| |
+---------- AND --------+
|
GAHBCFG.GblIntrMsk
|
IRQn
xfer_result_t result = XFER_RESULT_FAILED;
if (hcint & HCINT_XFER_COMPLETE) {
result = XFER_RESULT_SUCCESS;
}
if (hcint & HCINT_STALL) {
result = XFER_RESULT_STALLED;
}
if (hcint & (HCINT_CHANNEL_HALTED | HCINT_AHB_ERR | HCINT_XACT_ERR | HCINT_BABBLE_ERR | HCINT_DATATOGGLE_ERR |
HCINT_BUFFER_NAK | HCINT_XCS_XACT_ERR | HCINT_DESC_ROLLOVER)) {
result = XFER_RESULT_FAILED;
}
const uint8_t ep_addr = tu_edpt_addr(channel->hcchar_bm.ep_num, channel->hcchar_bm.ep_dir);
hcd_event_xfer_complete(channel->hcchar_bm.dev_addr, ep_addr, 0, result, in_isr);
channel->hcint = hcint; // clear all interrupt flags
// de-allocate channel by clearing haintmsk
dwc2->haintmsk &= ~TU_BIT(ch_id);
}
}
}
/* Interrupt Hierarchy
HCINTn.XferCompl HCINTMSKn.XferComplMsk HPRT ConnDetect PrtEnChng OverCurChng
| | | | |
+---------- AND --------+ +------------ OR -----------+
| |
HAINT.CHn HAINTMSK.CHn |
| | |
+---------- AND --------+ |
| |
GINTSTS.HCInt GINTMSK.HCInt GINTSTS.PrtInt GINTMSK.PrtInt
| | | |
+---------- AND ---------+ +---------- AND ---------+
| |
+-------------------- OR ---------------------------+
|
GAHBCFG.GblIntrMsk
|
IRQn
*/
void hcd_int_handler(uint8_t rhport, bool in_isr) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
@ -336,10 +566,17 @@ void hcd_int_handler(uint8_t rhport, bool in_isr) {
}
if (int_status & GINTSTS_HPRTINT) {
// Host port interrupt: source is cleared in HPRT register
TU_LOG1_HEX(dwc2->hprt);
handle_hprt_irq(rhport, in_isr);
}
if (int_status & GINTSTS_HCINT) {
// Host Channel interrupt: source is cleared in HCINT register
TU_LOG1_HEX(dwc2->hprt);
handle_channel_irq(rhport, in_isr);
}
// RxFIFO non-empty interrupt handling.
if (int_status & GINTSTS_RXFLVL) {
// RXFLVL bit is read-only

View File

@ -255,7 +255,7 @@
#endif
#ifndef CFG_TUH_DWC2_DMA
#define CFG_TUH_DWC2_DMA 0
#define CFG_TUH_DWC2_DMA 1
#endif
// Enable PIO-USB software host controller