From d03a4ee1933d27cf041163d930d66ed90badd700 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 7 Sep 2021 15:46:44 -0500 Subject: [PATCH 001/123] Setup: add missing long_description_content_type to setup.cfg. --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 33ebb59dc..47e9339b2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,6 +2,7 @@ name = pyocd description = Cortex-M debugger for Python long_description = file: README.md +long_description_content_type = text/markdown maintainer = Chris Reed maintainer_email = chris.reed@arm.com url = https://github.com/pyocd/pyOCD From 9f8ff3d479b4831d95a1c3998ce6bb161d8f3bce Mon Sep 17 00:00:00 2001 From: ozersa <46590392+ozersa@users.noreply.github.com> Date: Sun, 12 Sep 2021 21:57:34 +0300 Subject: [PATCH 002/123] Add MAX32660 target and MAX32660EVSYS board (#1208) * Add MAX32660 target and MAX32660EVSYS board * PR Review Updates: Update copyright Remove parameters from super function call Signed-off-by: Sadik.Ozer --- pyocd/board/board_ids.py | 1 + pyocd/debug/svd/data/max32660.svd | 10650 ++++++++++++++++++++++ pyocd/target/builtin/__init__.py | 2 + pyocd/target/builtin/target_MAX32660.py | 91 + test/data/binaries/max32660evsys.bin | Bin 0 -> 31472 bytes 5 files changed, 10744 insertions(+) create mode 100644 pyocd/debug/svd/data/max32660.svd create mode 100644 pyocd/target/builtin/target_MAX32660.py create mode 100644 test/data/binaries/max32660evsys.bin diff --git a/pyocd/board/board_ids.py b/pyocd/board/board_ids.py index c1f76dab6..ca431da84 100644 --- a/pyocd/board/board_ids.py +++ b/pyocd/board/board_ids.py @@ -98,6 +98,7 @@ def __init__(self, name, target, binary): "0417": BoardInfo( "MAX32630MBED", "max32630", None ), "0418": BoardInfo( "MAX32620FTHR", "max32620", "max32620fthr.bin", ), "0420": BoardInfo( "MAX32630HSP3", "max32630", None ), + "0421": BoardInfo( "MAX32660EVSYS", "max32660", "max32660evsys.bin", ), "0451": BoardInfo( "MTB MXChip EMW3166", "stm32f412xg", "mtb_mxchip_emw3166.bin",), "0459": BoardInfo( "MTB Advantech WISE-1530", "stm32f412xg", "mtb_wise-1530.bin", ), "0462": BoardInfo( "MTB USI WM-BN-BM-22", "stm32f412xg", "mtb_usi_wm-bn-bm-22.bin",), diff --git a/pyocd/debug/svd/data/max32660.svd b/pyocd/debug/svd/data/max32660.svd new file mode 100644 index 000000000..564a9c89c --- /dev/null +++ b/pyocd/debug/svd/data/max32660.svd @@ -0,0 +1,10650 @@ + + + Maxim Integrated + Maxim + max32660 + ARMCM4 + 1.0 + MAX32660 32-bit ARM Cortex-M4 microcontroller with 96KB of system RAM and 256KB of flash memory. + + CM4 + r2p1 + little + true + true + 3 + false + + 8 + 32 + 0x20 + read-write + 0x00000000 + 0xFFFFFFFF + + + BBFC + Battery-Backed Function Control. + 0x40005800 + + 0x00 + 0x400 + registers + + + + BBFCR0 + Function Control Register 0. + 0x00 + read-write + + + CKPDRV + Hyperbus CK Pad Driver Control. + 0 + 4 + + + CKNPDRV + Hyperbus CKN Pad Driver Control. + 4 + 4 + + + RDSDLLEN + Hyperbus RDS DLL Power Up Control. + 8 + 1 + + + dis + Disabled. + 0 + + + en + Enabled. + 1 + + + + + + + + + + BBSIR + Battery-Backed Registers. + 0x40005400 + + 0x00 + 0x400 + registers + + + + rsv0 + RFU + 0x00 + + + BB_SIR2 + System Init. Configuration Register 2. + 0x08 + read-only + + + BB_SIR3 + System Init. Configuration Register 3. + 0x0C + read-only + + + + + + DMA + DMA Controller Fully programmable, chaining capable DMA channels. + 0x40028000 + 32 + + 0x00 + 0x1000 + registers + + + DMA0 + 28 + + + DMA1 + 29 + + + DMA2 + 30 + + + DMA3 + 31 + + + DMA4 + 68 + + + DMA5 + 69 + + + DMA6 + 70 + + + DMA7 + 71 + + + DMA8 + 72 + + + DMA9 + 73 + + + DMA10 + 74 + + + DMA11 + 75 + + + DMA12 + 76 + + + DMA13 + 77 + + + DMA14 + 78 + + + DMA15 + 79 + + + + CN + DMA Control Register. + 0x000 + + + CH0_IEN + Channel 0 Interrupt Enable. + 0 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + CH1_IEN + Channel 1 Interrupt Enable. + 1 + 1 + + + CH2_IEN + Channel 2 Interrupt Enable. + 2 + 1 + + + CH3_IEN + Channel 3 Interrupt Enable. + 3 + 1 + + + + + INTR + DMA Interrupt Register. + 0x004 + read-only + + + CH0_IPEND + Channel Interrupt. To clear an interrupt, all active interrupt bits of the DMA_ST must be cleared. The interrupt bits are set only if their corresponding interrupt enable bits are set in DMA_CN. + 0 + 1 + + ch_ipend_enum + + inactive + No interrupt is pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + CH1_IPEND + 1 + 1 + + + CH2_IPEND + 2 + 1 + + + CH3_IPEND + 3 + 1 + + + + + 4 + 4 + CH[%s] + DMA Channel registers. + dma_ch + 0x100 + read-write + + CFG + DMA Channel Configuration Register. + 0x100 + + + CHEN + Channel Enable. This bit is automatically cleared when DMA_ST.CH_ST changes from 1 to 0. + 0 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + RLDEN + Reload Enable. Setting this bit to 1 enables DMA_SRC, DMA_DST and DMA_CNT to be reloaded with their corresponding reload registers upon count-to-zero. This bit is also writeable in the Count Reload Register. Refer to the description on Buffer Chaining for use of this bit. If buffer chaining is not used this bit must be written with a 0. This bit should be set after the reload registers have been programmed. + 1 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + PRI + DMA Priority. + 2 + 2 + + + high + Highest Priority. + 0 + + + medHigh + Medium High Priority. + 1 + + + medLow + Medium Low Priority. + 2 + + + low + Lowest Priority. + 3 + + + + + REQSEL + Request Select. Select DMA request line for this channel. If memory-to-memory is selected, the channel operates as if the request is always active. + 4 + 6 + + + MEMTOMEM + Memory To Memory + 0x00 + + + SPI0RX + SPI0 RX + 0x01 + + + SPI1RX + SPI1 RX + 0x02 + + + UART0RX + UART0 RX + 0x04 + + + UART1RX + UART1 RX + 0x05 + + + I2C0RX + I2C0 RX + 0x07 + + + I2C1RX + I2C1 RX + 0x08 + + + SPI0TX + SPI0 TX + 0x21 + + + SPI1TX + SPI1 TX + 0x22 + + + UART0TX + UART0 TX + 0x24 + + + UART1TX + UART1 TX + 0x25 + + + I2C0TX + I2C0 TX + 0x27 + + + I2C1TX + I2C1 TX + 0x28 + + + + + REQWAIT + Request Wait Enable. When enabled, delay timer start until DMA request transitions from active to inactive. + 10 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + TOSEL + Time-Out Select. Selects the number of prescale clocks seen by the channel timer before a time-out conditions is generated for this channel. Important note: since the prescaler runs independent of the individual channel timers, the actual number of Pre-Scale clock edges seen has a margin of error equal to a single Pre-Scale clock. + 11 + 3 + + + to4 + Timeout of 3 to 4 prescale clocks. + 0 + + + to8 + Timeout of 7 to 8 prescale clocks. + 1 + + + to16 + Timeout of 15 to 16 prescale clocks. + 2 + + + to32 + Timeout of 31 to 32 prescale clocks. + 3 + + + to64 + Timeout of 63 to 64 prescale clocks. + 4 + + + to128 + Timeout of 127 to 128 prescale clocks. + 5 + + + to256 + Timeout of 255 to 256 prescale clocks. + 6 + + + to512 + Timeout of 511 to 512 prescale clocks. + 7 + + + + + PSSEL + Pre-Scale Select. Selects the Pre-Scale divider for timer clock input. + 14 + 2 + + + dis + Disable timer. + 0 + + + div256 + hclk / 256. + 1 + + + div64k + hclk / 64k. + 2 + + + div16M + hclk / 16M. + 3 + + + + + SRCWD + Source Width. In most cases, this will be the data width of each AHB transactions. However, the width will be reduced in the cases where DMA_CNT indicates a smaller value. + 16 + 2 + + + byte + Byte. + 0 + + + halfWord + Halfword. + 1 + + + word + Word. + 2 + + + + + SRCINC + Source Increment Enable. This bit enables DMA_SRC increment upon every AHB transaction. This bit is forced to 0 for DMA receive from peripherals. + 18 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + DSTWD + Destination Width. Indicates the width of the each AHB transactions to the destination peripheral or memory. (The actual width may be less than this if there are insufficient bytes in the DMA FIFO for the full width). + 20 + 2 + + + byte + Byte. + 0 + + + halfWord + Halfword. + 1 + + + word + Word. + 2 + + + + + DSTINC + Destination Increment Enable. This bit enables DMA_DST increment upon every AHB transaction. This bit is forced to 0 for DMA transmit to peripherals. + 22 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + BRST + Burst Size. The number of bytes to be transferred into and out of the DMA FIFO in a single burst. Burst size equals 1 + value stored in this field. + 24 + 5 + + + CHDIEN + Channel Disable Interrupt Enable. When enabled, the IPEND will be set to 1 whenever CH_ST changes from 1 to 0. + 30 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + CTZIEN + Count-to-zero Interrupts Enable. When enabled, the IPEND will be set to 1 whenever a count-to-zero event occurs. + 31 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + + + ST + DMA Channel Status Register. + 0x104 + + + CH_ST + Channel Status. This bit is used to indicate to the programmer when it is safe to change the configuration, address, and count registers for the channel. Whenever this bit is cleared by hardware, the DMA_CFG.CHEN bit is also cleared (if not cleared already). + 0 + 1 + read-only + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + IPEND + Channel Interrupt. + 1 + 1 + read-only + + + inactive + No interrupt is pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + CTZ_ST + Count-to-Zero (CTZ) Status + 2 + 1 + oneToClear + + ctz_st_enum_rd + read + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + ctz_st_enum_wr + write + + Clear + Clears the interrupt flag + 1 + + + + + RLD_ST + Reload Status. + 3 + 1 + oneToClear + + read + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + write + + Clear + Clears the interrupt flag + 1 + + + + + BUS_ERR + Bus Error. Indicates that an AHB abort was received and the channel has been disabled. + 4 + 1 + oneToClear + + read + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + write + + Clear + Clears the interrupt flag + 1 + + + + + TO_ST + Time-Out Status. + 6 + 1 + oneToClear + + read + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + write + + Clear + Clears the interrupt flag + 1 + + + + + + + SRC + Source Device Address. If SRCINC=1, the counter bits are incremented by 1,2, or 4, depending on the data width of each AHB cycle. For peripheral transfers, some or all of the actual address bits are fixed. If SRCINC=0, this register remains constant. In the case where a count-to-zero condition occurs while RLDEN=1, the register is reloaded with the contents of DMA_SRC_RLD. + 0x108 + + + ADDR + 0 + 32 + + + + + DST + Destination Device Address. For peripheral transfers, some or all of the actual address bits are fixed. If DSTINC=1, this register is incremented on every AHB write out of the DMA FIFO. They are incremented by 1, 2, or 4, depending on the data width of each AHB cycle. In the case where a count-to-zero condition occurs while RLDEN=1, the register is reloaded with DMA_DST_RLD. + 0x10C + + + ADDR + 0 + 32 + + + + + CNT + DMA Counter. The user loads this register with the number of bytes to transfer. This counter decreases on every AHB cycle into the DMA FIFO. The decrement will be 1, 2, or 4 depending on the data width of each AHB cycle. When the counter reaches 0, a count-to-zero condition is triggered. + 0x110 + + + CNT + DMA Counter. + 0 + 24 + + + + + SRC_RLD + Source Address Reload Value. The value of this register is loaded into DMA0_SRC upon a count-to-zero condition. + 0x114 + + + SRC_RLD + Source Address Reload Value. + 0 + 31 + + + + + DST_RLD + Destination Address Reload Value. The value of this register is loaded into DMA0_DST upon a count-to-zero condition. + 0x118 + + + DST_RLD + Destination Address Reload Value. + 0 + 31 + + + + + CNT_RLD + DMA Channel Count Reload Register. + 0x11C + + + CNT_RLD + Count Reload Value. The value of this register is loaded into DMA0_CNT upon a count-to-zero condition. + 0 + 24 + + + RLDEN + Reload Enable. This bit should be set after the address reload registers have been programmed. This bit is automatically cleared to 0 when reload occurs. + 31 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + + + + + + + FLC + Flash Memory Control. + FLSH_ + 0x40029000 + + 0x00 + 0x1000 + registers + + + Flash_Controller + Flash Controller interrupt. + 23 + + + + ADDR + Flash Write Address. + 0x00 + + + ADDR + Address for next operation. + 0 + 32 + + + + + CLKDIV + Flash Clock Divide. The clock (PLL0) is divided by this value to generate a 1 MHz clock for Flash controller. + 0x04 + 0x00000064 + + + CLKDIV + Flash Clock Divide. The clock is divided by this value to generate a 1MHz clock for flash controller. + 0 + 8 + + + + + CN + Flash Control Register. + 0x08 + + + WR + Write. This bit is automatically cleared after the operation. + 0 + 1 + + + complete + No operation/complete. + 0 + + + start + Start operation. + 1 + + + + + ME + Mass Erase. This bit is automatically cleared after the operation. + 1 + 1 + + + PGE + Page Erase. This bit is automatically cleared after the operation. + 2 + 1 + + + WDTH + Data Width. This bits selects write data width. + 4 + 1 + + + size128 + 128-bit. + 0 + + + size32 + 32-bit. + 1 + + + + + ERASE_CODE + Erase Code. The ERASE_CODE must be set up property before erase operation can be initiated. These bits are automatically cleared after the operation is complete. + 8 + 8 + + + nop + No operation. + 0 + + + erasePage + Enable Page Erase. + 0x55 + + + eraseAll + Enable Mass Erase. The debug port must be enabled. + 0xAA + + + + + PEND + Flash Pending. When Flash operation is in progress (busy), Flash reads and writes will fail. When PEND is set, write to all Flash registers, with exception of the Flash interrupt register, are ignored. + 24 + 1 + read-only + + + idle + Idle. + 0 + + + busy + Busy. + 1 + + + + + LVE + Low Voltage Read Enable + 25 + 1 + read-only + + lve_read + read + + dis + Disabled + 0 + + + en + Enabled + 1 + + + + + BRST + Burst Mode Enable. + 27 + 1 + + + disable + Disable + 0 + + + enable + Enable + 1 + + + + + UNLOCK + Flash Unlock. The correct unlock code must be written to these four bits before any Flash write or erase operation is allowed. + 28 + 4 + + + unlocked + Flash Unlocked + 2 + + + locked + Flash Locked + 3 + + + + + + + INTR + Flash Interrupt Register. + 0x024 + + + DONE + Flash Done Interrupt. This bit is set to 1 upon Flash write or erase completion. + 0 + 1 + + + inactive + No interrupt is pending + 0 + + + pending + An interrupt is pending + 1 + + + + + AF + Flash Access Fail. This bit is set when an attempt is made to write the flash while the flash is busy or the flash is locked. This bit can only be set to 1 by hardware. + 1 + 1 + + + noError + No Failure. + 0 + + + error + Failure occurs. + 1 + + + + + DONEIE + Flash Done Interrupt Enable. + 8 + 1 + + + disable + Disable. + 0 + + + enable + Enable. + 1 + + + + + AFIE + 9 + 1 + + + + + 4 + 4 + DATA[%s] + Flash Write Data. + 0x30 + + + DATA + Data next operation. + 0 + 32 + + + + + ACNTL + Access Control Register. Writing the ACNTL register with the following values in the order shown, allows read and write access to the system and user Information block: pflc-acntl = 0x3a7f5ca3; pflc-acntl = 0xa1e34f20; pflc-acntl = 0x9608b2c1. When unlocked, a write of any word will disable access to system and user information block. Readback of this register is always zero. + 0x40 + write-only + + + ACNTL + Access control. + 0 + 32 + + + + + + + + GCR + Global Control Registers. + 0x40000000 + + 0 + 0x400 + registers + + + + SCON + System Control. + 0x00 + 0xFFFFFFFE + + + SBUSARB + System bus abritration scheme. These bits are used to select between Fixed-burst abritration and Round-Robin scheme. The Round-Robin scheme is selected by default. These bits are reset by the system reset. + 1 + 2 + + + fix + Fixed Burst abritration. + 0 + + + round + Round-robin scheme. + 1 + + + + + FLASH_PAGE_FLIP + Flips the Flash bottom and top halves. (Depending on the total flash size, each half is either 256K or 512K). Initiating a flash page flip will cause a flush of both the data buffer on the DCODE bus and the internal instruction buffer. + 4 + 1 + + + normal + Physical layout matches logical layout. + 0 + + + swapped + Bottom half mapped to logical top half and vice versa. + 1 + + + + + FPU_DIS + Floating Point Unit Disable + 5 + 1 + + + enable + enable Floating point unit + 0 + + + disable + disable floating point unit + 1 + + + + + CCACHE_FLUSH + Code Cache Flush. This bit is used to flush the code caches and the instruction buffer of the Cortex-M4. + 6 + 1 + + + normal + Normal Code Cache Operation + 0 + + + flush + Code Caches and CPU instruction buffer are flushed + 1 + + + + + SWD_DIS + Serial Wire Debug Disable + 14 + 1 + + + enable + Enable JTAG SWD + 0 + + + disable + Disable JTAG SWD + 1 + + + + + + + RSTR0 + Reset. + 0x04 + + + DMA + DMA Reset. + 0 + 1 + + dma_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + dma_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + WDT + Watchdog Timer Reset. + 1 + 1 + + wdt_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + wdt_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + GPIO0 + GPIO0 Reset. Setting this bit to 1 resets GPIO0 pins to their default states. + 2 + 1 + + gpio0_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + gpio0_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + TIMER0 + Timer0 Reset. Setting this bit to 1 resets Timer 0 blocks. + 5 + 1 + + timer0_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + timer0_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + TIMER1 + Timer1 Reset. Setting this bit to 1 resets Timer 1 blocks. + 6 + 1 + + timer1_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + timer1_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + TIMER2 + Timer2 Reset. Setting this bit to 1 resets Timer 2 blocks. + 7 + 1 + + timer2_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + timer2_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + UART0 + UART0 Reset. Setting this bit to 1 resets all UART 0 blocks. + 11 + 1 + + uart0_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + uart0_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + UART1 + UART1 Reset. Setting this bit to 1 resets all UART 1 blocks. + 12 + 1 + + uart1_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + uart1_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + SPI0 + SPI0 Reset. Setting this bit to 1 resets all SPI 0 blocks. + 13 + 1 + + spi0_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + spi0_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + SPI1 + SPI1 Reset. Setting this bit to 1 resets all SPI 1 blocks. + 14 + 1 + + spi1_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + xpi1_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + I2C0 + I2C0 Reset. + 16 + 1 + + i2c0_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + i2c0_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + RTC + Real Time Clock Reset. + 17 + 1 + + rtc_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + rtc_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + SRST + Soft Reset.Write 1 to perform a Soft Reset. A soft reset performs a Peripheral Reset and also resets the GPIO peripheral but does not reset the CPU or Watchdog Timer. + 29 + 1 + + srst_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + srst_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + PRST + Peripheral Reset. Setting this bit to 1 resets all peripherals. The CPU core, the watchdog timer, and all GPIO pins are unaffected by this reset. + 30 + 1 + + prst_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + prst_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + SYSTEM + System Reset. Setting this bit to 1 resets the CPU core and all peripherals, including the watchdog timer. + 31 + 1 + + system_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + system_read + read + + Reset_Done + Reset Complete + 0 + + + Busy + Reset Busy + 1 + + + + + + + CLKCN + Clock Control. + 0x08 + 0x00000008 + + + PSC + Prescaler Select. This 3 bit field sets the system operating frequency by controlling the prescaler that divides the output of the PLL0. + 6 + 3 + + + div1 + Divide by 1. + 0 + + + div2 + Divide by 2. + 1 + + + div4 + Divide by 4. + 2 + + + div8 + Divide by 8. + 3 + + + div16 + Divide by 16. + 4 + + + div32 + Divide by 32. + 5 + + + div64 + Divide by 64. + 6 + + + div128 + Divide by 128. + 7 + + + + + CLKSEL + Clock Source Select. This 3 bit field selects the source for the system clock. + 9 + 3 + + + HIRC + The internal 96 MHz oscillator is used for the system clock. + 0 + + + nanoRing + The nano-ring output is used for the system clock. + 3 + + + hfxIn + HFXIN is used for the system clock. + 6 + + + + + CKRDY + Clock Ready. This read only bit reflects whether the currently selected system clock source is running. + 13 + 1 + read-only + + + busy + Switchover to the new clock source (as selected by CLKSEL) has not yet occurred. + 0 + + + ready + System clock running from CLKSEL clock source. + 1 + + + + + X32K_EN + 32kHz Crystal Oscillator Enable. + 17 + 1 + + + dis + Is Disabled. + 0 + + + en + Is Enabled. + 1 + + + + + HIRC_EN + 60MHz High Frequency Internal Reference Clock Enable. + 18 + 1 + + + dis + Is Disabled. + 0 + + + en + Is Enabled. + 1 + + + + + X32K_RDY + 32kHz Crystal Oscillator Ready + 25 + 1 + read-only + + + not + Not Ready + 0 + + + Ready + X32K Ready + 1 + + + + + HIRC_RDY + 60MHz HIRC Ready. + 26 + 1 + + + not + Not Ready + 0 + + + ready + HIRC Ready + 1 + + + + + LIRC8K_RDY + 8kHz Low Frequency Reference Clock Ready. + 29 + 1 + + + not + Not Ready + 0 + + + ready + Clock Ready + 1 + + + + + + + PM + Power Management. + 0x0C + + + MODE + Operating Mode. This two bit field selects the current operating mode for the device. Note that code execution only occurs during ACTIVE mode. + 0 + 3 + + + active + Active Mode. + 0 + + + shutdown + Shutdown Mode. + 3 + + + backup + Backup Mode. + 4 + + + + + GPIOWKEN + GPIO Wake Up Enable. This bit enables all GPIO pins as potential wakeup sources. Any GPIO configured for wakeup is capable of causing an exit from IDLE or STANDBY modes when this bit is set. + 4 + 1 + + + dis + Wake Up Disable. + 0 + + + en + Wake Up Enable. + 1 + + + + + RTCWKEN + RTC Alarm Wake Up Enable. This bit enables RTC alarm as wakeup source. If enabled, the desired RTC alarm must be configured via the RTC control registers. + 5 + 1 + + + dis + Wake Up Disable. + 0 + + + en + Wake Up Enable. + 1 + + + + + HIRCPD + HIRC Power Down. This bit selects HIRC power state in DEEPSLEEP mode. + 15 + 1 + + + active + Mode is Active. + 0 + + + deepsleep + Powered down in DEEPSLEEP. + 1 + + + + + + + PCKDIV + Peripheral Clock Divider. + 0x18 + 0x00000001 + + + AONCD + Always-ON(AON) domain CLock Divider. These bits define the AON domain clock divider. + 0 + 2 + + + div_4 + PCLK divide by 4. + 0 + + + div_8 + PCLK divide by 8. + 1 + + + div_16 + PCLK divide by 16. + 2 + + + div_32 + PCLK divide by 32. + 3 + + + + + + + PERCKCN0 + Peripheral Clock Disable. + 0x24 + + + GPIO0D + GPIO0 Disable. + 0 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + DMAD + DMA Disable. + 5 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + SPI0D + SPI 0 Disable. + 6 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + SPI1D + SPI 1 Disable. + 7 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + UART0D + UART 0 Disable. + 9 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + UART1D + UART 1 Disable. + 10 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + I2C0D + I2C 0 Disable. + 13 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + T0D + Timer 0 Disable. + 15 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + T1D + Timer 1 Disable. + 16 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + T2D + Timer 2 Disable. + 17 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + I2C1D + I2C 1 Disable. + 28 + 1 + + GPIODisable + + en + enable it. + 0 + + + dis + disable it. + 1 + + + + + + + MEMCKCN + Memory Clock Control Register. + 0x28 + + + FWS + Flash Wait State. These bits define the number of wait-state cycles per Flash data read access. Minimum wait state is 2. + 0 + 3 + + + SYSRAM0LS + System RAM 0 Light Sleep Mode. + 8 + 1 + + + active + Memory is active. + 0 + + + light_sleep + Memory is in Light Sleep mode. + 1 + + + + + SYSRAM1LS + System RAM 1 Light Sleep Mode. + 9 + 1 + + + active + Memory is active. + 0 + + + light_sleep + Memory is in Light Sleep mode. + 1 + + + + + SYSRAM2LS + System RAM 2 Light Sleep Mode. + 10 + 1 + + + active + Memory is active. + 0 + + + light_sleep + Memory is in Light Sleep mode. + 1 + + + + + SYSRAM3LS + System RAM 3 Light Sleep Mode. + 11 + 1 + + + active + Memory is active. + 0 + + + light_sleep + Memory is in Light Sleep mode. + 1 + + + + + ICACHELS + ICache RAM Light Sleep Mode. + 12 + 1 + + + active + Memory is active. + 0 + + + light_sleep + Memory is in Light Sleep mode. + 1 + + + + + + + MEMZCN + Memory Zeroize Control. + 0x2C + + + SRAM0Z + System RAM Block 0. + 0 + 1 + + + nop + No operation/complete. + 0 + + + start + Start operation. + 1 + + + + + ICACHEZ + Instruction Cache. + 1 + 1 + + + nop + No operation/complete. + 0 + + + start + Start operation. + 1 + + + + + + + SCCK + Smart Card Clock Control. + 0x34 + 0x00001414 + + + MPRI0 + Master Priority Control Register 0. + 0x38 + 0x00001414 + + + MPRI1 + Mater Priority Control Register 1. + 0x3C + 0x00001414 + + + SYSST + System Status Register. + 0x40 + + + ICECLOCK + ARM ICE Lock Status. + 0 + 1 + + + unlocked + ICE is unlocked. + 0 + + + locked + ICE is locked. + 1 + + + + + CODEINTERR + Code Integrity Error Flag. This bit indicates a code integrity error has occured in XiP interface. + 1 + 1 + + + norm + Normal Operating Condition. + 0 + + + code + Code Integrity Error. + 1 + + + + + SCMEMF + System Cache Memory Fault Flag. This bit indicates a memory fault has occured in the System Cache while receiving data from the Hyperbus Interface. + 5 + 1 + + + norm + Normal Operating Condition. + 0 + + + memory + Memory Fault. + 1 + + + + + + + RSTR1 + Reset 1. + 0x44 + + + I2C1 + I2C1 Reset. + 0 + 1 + + reset_write + write + + RFU + Reserved. Do not use. + 0 + + + reset + Starts reset operation. + 1 + + + + reset_read + read + + reset_done + Reset complete. + 0 + + + busy + Reset in progress. + 1 + + + + + + + PERCKCN1 + Peripheral Clock Disable. + 0x48 + + + FLCD + Secure Flash Controller Disable. + 3 + 1 + + + en + Enable. + 0 + + + dis + Disable. + 1 + + + + + ICACHED + ICache Clock Disable. + 11 + 1 + + + en + Enable. + 0 + + + dis + Disable. + 1 + + + + + + + EVTEN + Event Enable Register. + 0x4C + + + DMAEVENT + Enable DMA event. When this bit is set, a DMA event will cause an RXEV event to wake the CPU from WFE sleep mode. + 0 + 1 + + + RXEVENT + Enable RXEV pin event. When this bit is set, a logic high of GPIO0[24] will cause an RXEV event to wake the CPU from WFE sleep mode. + 1 + 1 + + + + + REVISION + Revision Register. + 0x50 + read-only + + + REVISION + Manufacturer Chip Revision. + 0 + 16 + + + + + SYSSIE + System Status Interrupt Enable Register. + 0x54 + + + ICEULIE + ARM ICE Unlock Interrupt Enable. + 0 + 1 + + + dis + disabled. + 0 + + + en + enabled. + 1 + + + + + CIEIE + Code Integrity Error Interrupt Enable. + 1 + 1 + + + dis + disabled. + 0 + + + en + enabled. + 1 + + + + + SCMFIE + System Cache Memory Fault Interrupt Enable. + 5 + 1 + + + dis + disabled. + 0 + + + en + enabled. + 1 + + + + + + + + + + GPIO0 + Individual I/O for each GPIO + GPIO + 0x40008000 + + 0x00 + 0x1000 + registers + + + GPIO0 + GPIO0 interrupt. + 24 + + + + EN + GPIO Function Enable Register. Each bit controls the GPIO_EN setting for one GPIO pin on the associated port. + 0x00 + + + GPIO_EN + Mask of all of the pins on the port. + 0 + 32 + + + alternate + Alternate function enabled. + 0 + + + GPIO + GPIO function is enabled. + 1 + + + + + + + EN_SET + GPIO Set Function Enable Register. Writing a 1 to one or more bits in this register sets the bits in the same positions in GPIO_EN to 1, without affecting other bits in that register. + 0x04 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + EN_CLR + GPIO Clear Function Enable Register. Writing a 1 to one or more bits in this register clears the bits in the same positions in GPIO_EN to 0, without affecting other bits in that register. + 0x08 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + OUT_EN + GPIO Output Enable Register. Each bit controls the GPIO_OUT_EN setting for one GPIO pin in the associated port. + 0x0C + + + GPIO_OUT_EN + Mask of all of the pins on the port. + 0 + 32 + + + dis + GPIO Output Disable + 0 + + + en + GPIO Output Enable + 1 + + + + + + + OUT_EN_SET + GPIO Output Enable Set Function Enable Register. Writing a 1 to one or more bits in this register sets the bits in the same positions in GPIO_OUT_EN to 1, without affecting other bits in that register. + 0x10 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + OUT_EN_CLR + GPIO Output Enable Clear Function Enable Register. Writing a 1 to one or more bits in this register clears the bits in the same positions in GPIO_OUT_EN to 0, without affecting other bits in that register. + 0x14 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + OUT + GPIO Output Register. Each bit controls the GPIO_OUT setting for one pin in the associated port. This register can be written either directly, or by using the GPIO_OUT_SET and GPIO_OUT_CLR registers. + 0x18 + + + GPIO_OUT + Mask of all of the pins on the port. + 0 + 32 + + + low + Drive Logic 0 (low) on GPIO output. + 0 + + + high + Drive logic 1 (high) on GPIO output. + 1 + + + + + + + OUT_SET + GPIO Output Set. Writing a 1 to one or more bits in this register sets the bits in the same positions in GPIO_OUT to 1, without affecting other bits in that register. + 0x1C + write-only + + + GPIO_OUT_SET + Mask of all of the pins on the port. + 0 + 32 + + + no + No Effect. + 0 + + + set + Set GPIO_OUT bit in this position to '1' + 1 + + + + + + + OUT_CLR + GPIO Output Clear. Writing a 1 to one or more bits in this register clears the bits in the same positions in GPIO_OUT to 0, without affecting other bits in that register. + 0x20 + write-only + + + GPIO_OUT_CLR + Mask of all of the pins on the port. + 0 + 32 + + + + + IN + GPIO Input Register. Read-only register to read from the logic states of the GPIO pins on this port. + 0x24 + read-only + + + GPIO_IN + Mask of all of the pins on the port. + 0 + 32 + + + + + INT_MOD + GPIO Interrupt Mode Register. Each bit in this register controls the interrupt mode setting for the associated GPIO pin on this port. + 0x28 + + + GPIO_INT_MOD + Mask of all of the pins on the port. + 0 + 32 + + + level + Interrupts for this pin are level triggered. + 0 + + + edge + Interrupts for this pin are edge triggered. + 1 + + + + + + + INT_POL + GPIO Interrupt Polarity Register. Each bit in this register controls the interrupt polarity setting for one GPIO pin in the associated port. + 0x2C + + + GPIO_INT_POL + Mask of all of the pins on the port. + 0 + 32 + + + falling + Interrupts are latched on a falling edge or low level condition for this pin. + 0 + + + rising + Interrupts are latched on a rising edge or high condition for this pin. + 1 + + + + + + + INT_EN + GPIO Interrupt Enable Register. Each bit in this register controls the GPIO interrupt enable for the associated pin on the GPIO port. + 0x34 + + + GPIO_INT_EN + Mask of all of the pins on the port. + 0 + 32 + + + dis + Interrupts are disabled for this GPIO pin. + 0 + + + en + Interrupts are enabled for this GPIO pin. + 1 + + + + + + + INT_EN_SET + GPIO Interrupt Enable Set. Writing a 1 to one or more bits in this register sets the bits in the same positions in GPIO_INT_EN to 1, without affecting other bits in that register. + 0x38 + + + GPIO_INT_EN_SET + Mask of all of the pins on the port. + 0 + 32 + + + no + No effect. + 0 + + + set + Set GPIO_INT_EN bit in this position to '1' + 1 + + + + + + + INT_EN_CLR + GPIO Interrupt Enable Clear. Writing a 1 to one or more bits in this register clears the bits in the same positions in GPIO_INT_EN to 0, without affecting other bits in that register. + 0x3C + + + GPIO_INT_EN_CLR + Mask of all of the pins on the port. + 0 + 32 + + + no + No Effect. + 0 + + + clear + Clear GPIO_INT_EN bit in this position to '0' + 1 + + + + + + + INT_STAT + GPIO Interrupt Status Register. Each bit in this register contains the pending interrupt status for the associated GPIO pin in this port. + 0x40 + read-only + + + GPIO_INT_STAT + Mask of all of the pins on the port. + 0 + 32 + + + no + No Interrupt is pending on this GPIO pin. + 0 + + + pending + An Interrupt is pending on this GPIO pin. + 1 + + + + + + + INT_CLR + GPIO Status Clear. Writing a 1 to one or more bits in this register clears the bits in the same positions in GPIO_INT_STAT to 0, without affecting other bits in that register. + 0x48 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + WAKE_EN + GPIO Wake Enable Register. Each bit in this register controls the PMU wakeup enable for the associated GPIO pin in this port. + 0x4C + + + GPIO_WAKE_EN + Mask of all of the pins on the port. + 0 + 32 + + + dis + PMU wakeup for this GPIO is disabled. + 0 + + + en + PMU wakeup for this GPIO is enabled. + 1 + + + + + + + WAKE_EN_SET + GPIO Wake Enable Set. Writing a 1 to one or more bits in this register sets the bits in the same positions in GPIO_WAKE_EN to 1, without affecting other bits in that register. + 0x50 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + WAKE_EN_CLR + GPIO Wake Enable Clear. Writing a 1 to one or more bits in this register clears the bits in the same positions in GPIO_WAKE_EN to 0, without affecting other bits in that register. + 0x54 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + INT_DUAL_EDGE + GPIO Interrupt Dual Edge Mode Register. Each bit in this register selects dual edge mode for the associated GPIO pin in this port. + 0x5C + + + GPIO_INT_DUAL_EDGE + Mask of all of the pins on the port. + 0 + 32 + + + no + No Effect. + 0 + + + en + Dual Edge mode is enabled. If edge-triggered interrupts are enabled on this GPIO pin, then both rising and falling edges will trigger interrupts regardless of the GPIO_INT_POL setting. + 1 + + + + + + + PAD_CFG1 + GPIO Input Mode Config 1. Each bit in this register enables the weak pull-up for the associated GPIO pin in this port. + 0x60 + + + GPIO_PAD_CFG1 + The two bits in GPIO_PAD_CFG1 and GPIO_PAD_CFG2 for each GPIO pin work together to determine the pad mode when the GPIO is set to input mode. + 0 + 32 + + + impedance + High Impedance. + 0 + + + pu + Weak pull-up mode. + 1 + + + pd + weak pull-down mode. + 2 + + + + + + + PAD_CFG2 + GPIO Input Mode Config 2. Each bit in this register enables the weak pull-up for the associated GPIO pin in this port. + 0x64 + + + GPIO_PAD_CFG2 + The two bits in GPIO_PAD_CFG1 and GPIO_PAD_CFG2 for each GPIO pin work together to determine the pad mode when the GPIO is set to input mode. + 0 + 32 + + + impedance + High Impedance. + 0 + + + pu + Weak pull-up mode. + 1 + + + pd + weak pull-down mode. + 2 + + + + + + + EN1 + GPIO Alternate Function Enable Register. Each bit in this register selects between primary/secondary functions for the associated GPIO pin in this port. + 0x68 + + + GPIO_EN1 + Mask of all of the pins on the port. + 0 + 32 + + + primary + Primary function selected. + 0 + + + secondary + Secondary function selected. + 1 + + + + + + + EN1_SET + GPIO Alternate Function Set. Writing a 1 to one or more bits in this register sets the bits in the same positions in GPIO_EN1 to 1, without affecting other bits in that register. + 0x6C + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + EN1_CLR + GPIO Alternate Function Clear. Writing a 1 to one or more bits in this register clears the bits in the same positions in GPIO_EN1 to 0, without affecting other bits in that register. + 0x70 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + EN2 + GPIO Alternate Function Enable Register. Each bit in this register selects between primary/secondary functions for the associated GPIO pin in this port. + 0x74 + + + GPIO_EN2 + Mask of all of the pins on the port. + 0 + 32 + + + primary + Primary function selected. + 0 + + + secondary + Secondary function selected. + 1 + + + + + + + EN2_SET + GPIO Alternate Function 2 Set. Writing a 1 to one or more bits in this register sets the bits in the same positions in GPIO_EN2 to 1, without affecting other bits in that register. + 0x78 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + EN2_CLR + GPIO Wake Alternate Function Clear. Writing a 1 to one or more bits in this register clears the bits in the same positions in GPIO_EN2 to 0, without affecting other bits in that register. + 0x7C + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + IS + Input Hysteresis Enable Register + 0xA8 + + + SR + Slew Rate Select Register. + 0xAC + + + DS + GPIO Drive Strength Register. Each bit in this register selects the drive strength for the associated GPIO pin in this port. Refer to the Datasheet for sink/source current of GPIO pins in each mode. + 0xB0 + + + DS + Mask of all of the pins on the port. + 0 + 32 + + + ld + GPIO port pin is in low-drive mode. + 0 + + + hd + GPIO port pin is in high-drive mode. + 1 + + + + + + + DS1 + GPIO Drive Strength 1 Register. Each bit in this register selects the drive strength for the associated GPIO pin in this port. Refer to the Datasheet for sink/source current of GPIO pins in each mode. + 0xB4 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + PS + GPIO Pull Select Mode. + 0xB8 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + VSSEL + GPIO Voltage Select. + 0xC0 + + + ALL + Mask of all of the pins on the port. + 0 + 32 + + + + + + + + I2C0 + Inter-Integrated Circuit. + I2C + 0x4001D000 + 32 + + 0x00 + 0x1000 + registers + + + I2C0 + I2C0 IRQ + 13 + + + + CTRL + Control Register0. + 0x00 + + + I2C_EN + I2C Enable. + [0:0] + read-write + + + dis + Disable I2C. + 0 + + + en + enable I2C. + 1 + + + + + MST + Master Mode Enable. + [1:1] + read-write + + + slave_mode + Slave Mode. + 0 + + + master_mode + Master Mode. + 1 + + + + + GEN_CALL_ADDR + General Call Address Enable. + [2:2] + read-write + + + dis + Ignore Gneral Call Address. + 0 + + + en + Acknowledge general call address. + 1 + + + + + RX_MODE + Interactive Receive Mode. + [3:3] + read-write + + + dis + Disable Interactive Receive Mode. + 0 + + + en + Enable Interactive Receive Mode. + 1 + + + + + RX_MODE_ACK + Data Acknowledge. This bit defines the acknowledge bit returned by the I2C receiver while IRXM = 1 HW forces ACK to 0 when IRXM = 0. + [4:4] + read-write + + + ack + return ACK (pulling SDA LOW). + 0 + + + nack + return NACK (leaving SDA HIGH). + 1 + + + + + SCL_OUT + SCL Output. This bits control SCL output when SWOE =1. + [6:6] + read-write + + + drive_scl_low + Drive SCL low. + 0 + + + release_scl + Release SCL. + 1 + + + + + SDA_OUT + SDA Output. This bits control SDA output when SWOE = 1. + [7:7] + read-write + + + drive_sda_low + Drive SDA low. + 0 + + + release_sda + Release SDA. + 1 + + + + + SCL + SCL status. This bit reflects the logic gate of SCL signal. + [8:8] + read-only + + + SDA + SDA status. THis bit reflects the logic gate of SDA signal. + [9:9] + read-only + + + SW_OUT_EN + Software Output Enable. + [10:10] + read-write + + + outputs_disable + I2C Outputs SCLO and SDAO disabled. + 0 + + + outputs_enable + I2C Outputs SCLO and SDAO enabled. + 1 + + + + + READ + Read. This bit reflects the R/W bit of an address match (AMI = 1) or general call match(GCI = 1). This bit is valid 3 cycles after the relevant interrupt bit is set. + [11:11] + read-only + + + write + Write. + 0 + + + read + Read. + 1 + + + + + SCL_CLK_STRECH_DIS + This bit will disable slave clock stretching when set. + [12:12] + read-write + + + en + Slave clock stretching enabled. + 0 + + + dis + Slave clock stretching disabled. + 1 + + + + + SCL_PP_MODE + SCL Push-Pull Mode. This bit controls whether SCL is operated in a the I2C standard open-drain mode, or in a non-standard push-pull mode where the Hi-Z output isreplaced with Drive-1. The non-standard mode should only be used when operating as a master and communicating with slaves that are guaranteed to never drive SCL low. + [13:13] + read-write + + + dis + Standard open-drain operation: drive low for 0, Hi-Z for 1 + 0 + + + en + Non-standard push-pull operation: drive low for 0, drive high for 1 + 1 + + + + + HS_MODE + Hs-mode Enable. + 15 + 1 + + + dis + Hs-mode disabled. + 0 + + + en + Hs-mode enabled. + 1 + + + + + + + STATUS + Status Register. + 0x04 + + + BUS + Bus Status. + [0:0] + read-only + + + idle + I2C Bus Idle. + 0 + + + busy + I2C Bus Busy. + 1 + + + + + RX_EMPTY + RX empty. + [1:1] + read-only + + + not_empty + Not Empty. + 0 + + + empty + Empty. + 1 + + + + + RX_FULL + RX Full. + [2:2] + read-only + + + not_full + Not Full. + 0 + + + full + Full. + 1 + + + + + TX_EMPTY + TX Empty. + [3:3] + + + not_empty + Not Empty. + 0 + + + empty + Empty. + 1 + + + + + TX_FULL + TX Full. + [4:4] + + + not_empty + Not Empty. + 0 + + + empty + Empty. + 1 + + + + + CLK_MODE + Clock Mode. + [5:5] + read-only + + + not_actively_driving_scl_clock + Device not actively driving SCL clock cycles. + 0 + + + actively_driving_scl_clock + Device operating as master and actively driving SCL clock cycles. + 1 + + + + + STATUS + Controller Status. + [11:8] + + + idle + Controller Idle. + 0 + + + mtx_addr + master Transmit address. + 1 + + + mrx_addr_ack + Master Receive address ACK. + 2 + + + mtx_ex_addr + Master Transmit extended address. + 3 + + + mrx_ex_addr + Master Receive extended address ACK. + 4 + + + srx_addr + Slave Receive address. + 5 + + + stx_addr_ack + Slave Transmit address ACK. + 6 + + + srx_ex_addr + Slave Receive extended address. + 7 + + + stx_ex_addr_ack + Slave Transmit extended address ACK. + 8 + + + tx + Transmit data (master or slave). + 9 + + + rx_ack + Receive data ACK (master or slave). + 10 + + + rx + Receive data (master or slave). + 11 + + + tx_ack + Transmit data ACK (master or slave). + 12 + + + nack + NACK stage (master or slave). + 13 + + + by_st + Bystander state (ongoing transaction but not participant- another master addressing another slave). + 15 + + + + + + + INT_FL0 + Interrupt Status Register. + 0x08 + + + DONE + Transfer Done Interrupt. + [0:0] + + INT_FL0_Done + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + RX_MODE + Interactive Receive Interrupt. + [1:1] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + GEN_CALL_ADDR + Slave General Call Address Match Interrupt. + [2:2] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + ADDR_MATCH + Slave Address Match Interrupt. + [3:3] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + RX_THRESH + Receive Threshold Interrupt. This bit is automaticcaly cleared when RX_FIFO is below the threshold level. + [4:4] + + + inactive + No interrupt is pending. + 0 + + + pending + An interrupt is pending. RX_FIFO equal or more bytes than the threshold. + 1 + + + + + TX_THRESH + Transmit Threshold Interrupt. This bit is automaticcaly cleared when TX_FIFO is above the threshold level. + [5:5] + + + inactive + No interrupt is pending. + 0 + + + pending + An interrupt is pending. TX_FIFO has equal or less bytes than the threshold. + 1 + + + + + STOP + STOP Interrupt. + [6:6] + + + inactive + No interrupt is pending. + 0 + + + pending + An interrupt is pending. TX_FIFO has equal or less bytes than the threshold. + 1 + + + + + ADDR_ACK + Address Acknowledge Interrupt. + [7:7] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + ARB_ER + Arbritation error Interrupt. + [8:8] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + TO_ER + timeout Error Interrupt. + [9:9] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + ADDR_NACK_ER + Address NACK Error Interrupt. + [10:10] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + DATA_ER + Data NACK Error Interrupt. + [11:11] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + DO_NOT_RESP_ER + Do Not Respond Error Interrupt. + [12:12] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + START_ER + Start Error Interrupt. + [13:13] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + STOP_ER + Stop Error Interrupt. + [14:14] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + TX_LOCK_OUT + Transmit Lock Out Interrupt. + [15:15] + + + + + INT_EN0 + Interrupt Enable Register. + 0x0C + read-write + + + DONE + Transfer Done Interrupt Enable. + [0:0] + read-write + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled when DONE = 1. + 1 + + + + + RX_MODE + Description not available. + [1:1] + read-write + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled when RX_MODE = 1. + 1 + + + + + GEN_CTRL_ADDR + Slave mode general call address match received input enable. + [2:2] + read-write + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled when GEN_CTRL_ADDR = 1. + 1 + + + + + ADDR_MATCH + Slave mode incoming address match interrupt. + [3:3] + read-write + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled when ADDR_MATCH = 1. + 1 + + + + + RX_THRESH + RX FIFO Above Treshold Level Interrupt Enable. + [4:4] + read-write + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + TX_THRESH + TX FIFO Below Treshold Level Interrupt Enable. + [5:5] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + STOP + Stop Interrupt Enable + [6:6] + read-write + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled when STOP = 1. + 1 + + + + + ADDR_ACK + Received Address ACK from Slave Interrupt. + [7:7] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + ARB_ER + Master Mode Arbitration Lost Interrupt. + [8:8] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + TO_ER + Timeout Error Interrupt Enable. + [9:9] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + ADDR_ER + Master Mode Address NACK Received Interrupt. + [10:10] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + DATA_ER + Master Mode Data NACK Received Interrupt. + [11:11] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + DO_NOT_RESP_ER + Slave Mode Do Not Respond Interrupt. + [12:12] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + START_ER + Out of Sequence START condition detected interrupt. + [13:13] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + STOP_ER + Out of Sequence STOP condition detected interrupt. + [14:14] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled. + 1 + + + + + TX_LOCK_OUT + TX FIFO Locked Out Interrupt. + [15:15] + + + dis + Interrupt disabled. + 0 + + + en + Interrupt enabled when TXLOIE = 1. + 1 + + + + + + + INT_FL1 + Interrupt Status Register 1. + 0x10 + + + RX_OVERFLOW + Receiver Overflow Interrupt. When operating as a slave receiver, this bit is set when you reach the first data bit and the RX FIFO and shift register are both full. + [0:0] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + TX_UNDERFLOW + Transmit Underflow Interrupt. When operating as a slave transmitter, this bit is set when you reach the first data bit and the TX FIFO is empty and the master is still asking for more data (i.e the master hasn't sent a NACK yet). + [1:1] + + + inactive + No Interrupt is Pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + + + INT_EN1 + Interrupt Staus Register 1. + 0x14 + read-write + + + RX_OVERFLOW + Receiver Overflow Interrupt Enable. + [0:0] + + + dis + No Interrupt is Pending. + 0 + + + en + An interrupt is pending. + 1 + + + + + TX_UNDERFLOW + Transmit Underflow Interrupt Enable. + [1:1] + + + dis + No Interrupt is Pending. + 0 + + + en + An interrupt is pending. + 1 + + + + + + + FIFO_LEN + FIFO Configuration Register. + 0x18 + + + RX_LEN + Receive FIFO Length. + [7:0] + read-only + + + TX_LEN + Transmit FIFO Length. + [15:8] + read-only + + + + + RX_CTRL0 + Receive Control Register 0. + 0x1C + + + DNR + Do Not Respond. + [0:0] + + + respond + Always respond to address match. + 0 + + + not_respond_rx_fifo_empty + Do not respond to address match when RX_FIFO is not empty. + 1 + + + + + RX_FLUSH + Receive FIFO Flush. This bit is automatically cleared to 0 after the operation. Setting this bit to 1 will affect RX_FIFO status. + [7:7] + + + not_flushed + FIFO not flushed. + 0 + + + flush + Flush RX_FIFO. + 1 + + + + + RX_THRESH + Receive FIFO Threshold. These bits define the RX_FIFO interrupt threshold. + [11:8] + + + + + RX_CTRL1 + Receive Control Register 1. + 0x20 + + + RX_CNT + Receive Count Bits. These bits define the number of bytes to be received in a transaction, except for the case RXCNT = 0. RXCNT = 0 means 256 bytes to be received in a transaction. + [7:0] + + + RX_FIFO + Receive FIFO Count. These bits reflect the number of byte in the RX_FIFO. These bits are flushed when I2CEN = 0. + [11:8] + read-only + + + + + TX_CTRL0 + Transmit Control Register 0. + 0x24 + + + TX_PRELOAD + Transmit FIFO Preaload Mode. Setting this bit will allow for high speed application to preload the transmit FIFO prior to Slave Address Match. + [0:0] + + + TX_READY_MODE + Transmit FIFO Ready Manual Mode. + [1:1] + + + en + HW control of I2CTXRDY enabled. + 0 + + + dis + HW control of I2CTXRDY disabled. + 1 + + + + + TX_FLUSH + Transmit FIFO Flush. This bit is automatically cleared to 0 after the operation. + [7:7] + + + not_flushed + FIFO not flushed. + 0 + + + flush + Flush TX_FIFO. + 1 + + + + + TX_THRESH + Transmit FIFO Threshold. These bits define the TX_FIFO interrupt threshold. + [11:8] + + + + + TX_CTRL1 + Transmit Control Register 1. + 0x28 + + + TX_READY + Transmit FIFO Preload Ready. + [0:0] + + + TX_LAST + Transmit Last. This bit is used in slave mod only. Do not use when preloading (cleared by hardware). + [1:1] + + + hold_scl_low + Hold SCL low on TX_FIFO empty. + 0 + + + end_transaction + End transaction on TX_FIFO empty. + 1 + + + + + TX_FIFO + Transmit FIFO Count. These bits reflect the number of bytes in the TX_FIFO. + [11:8] + read-only + + + + + FIFO + Data Register. + 0x2C + + + DATA + Data is read from or written to this location. Transmit and receive FIFO are separate but both are addressed at this location. + 0 + 8 + + + + + MASTER_CTRL + Master Control Register. + 0x30 + + + START + Setting this bit to 1 will start a master transfer. + [0:0] + + + RESTART + Setting this bit to 1 will generate a repeated START. + [1:1] + + + STOP + Setting this bit to 1 will generate a STOP condition. + [2:2] + + + SL_EX_ADDR + Slave Extend Address Select. + [7:7] + + + 7_bits_address + 7-bit address. + 0 + + + 10_bits_address + 10-bit address. + 1 + + + + + MASTER_CODE + Master Code. These bits set the Master Code used in Hs-mode operation. + [10:8] + + + SCL_SPEED_UP + Serial Clock speed Up. Setting this bit disables the master's monitoring of SCL state for other external masters or slaves. + [11:11] + + + en + Master monitors SCL state. + 0 + + + dis + SCL state monitoring disabled. + 1 + + + + + + + CLK_LO + Clock Low Register. + 0x34 + + + CLK_LO + Clock low. In master mode, these bits define the SCL low period. In slave mode, these bits define the time SCL will be held low after data is outputted. + [8:0] + + + + + CLK_HI + Clock high Register. + 0x38 + + + CKH + Clock High. In master mode, these bits define the SCL high period. + [8:0] + + + + + HS_CLK + HS-Mode Clock Control Register + 0x3C + + + HS_CLK_LO + Slave Address. + [7:0] + + + HS_CLK_HI + Slave Address. + [15:8] + + + + + TIMEOUT + Timeout Register + 0x40 + + + TO + Timeout + [15:0] + + + + + SLAVE_ADDR + Slave Address Register. + 0x44 + + + SLAVE_ADDR + Slave Address. + [9:0] + + + SLAVE_ADDR_DIS + Slave Address DIS. + [10:10] + + + SLAVE_ADDR_IDX + Slave Address Index. + [14:11] + + + EX_ADDR + Extended Address Select. + [15:15] + + + 7_bits_address + 7-bit address. + 0 + + + 10_bits_address + 10-bit address. + 1 + + + + + + + DMA + DMA Register. + 0x48 + + + TX_EN + TX channel enable. + [0:0] + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + RX_EN + RX channel enable. + [1:1] + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + + + + + + I2C1 + Inter-Integrated Circuit. 1 + 0x4001E000 + + I2C1 + I2C1 IRQ + 36 + + + + + ICC0 + Instruction Cache Controller Registers + 0x4002A000 + + 0x00 + 0x1000 + registers + + + + CACHE_ID + Cache ID Register. + 0x0000 + read-only + + + RELNUM + Release Number. Identifies the RTL release version. + 0 + 6 + + + PARTNUM + Part Number. This field reflects the value of C_ID_PART_NUMBER configuration parameter. + 6 + 4 + + + CCHID + Cache ID. This field reflects the value of the C_ID_CACHEID configuration parameter. + 10 + 6 + + + + + MEMCFG + Memory Configuration Register. + 0x0004 + read-only + 0x00080008 + + + CCHSZ + Cache Size. Indicates total size in Kbytes of cache. + 0 + 16 + + + MEMSZ + Main Memory Size. Indicates the total size, in units of 128 Kbytes, of code memory accessible to the cache controller. + 16 + 16 + + + + + CACHE_CTRL + Cache Control and Status Register. + 0x0100 + + + CACHE_EN + Cache Enable. Controls whether the cache is bypassed or is in use. Changing the state of this bit will cause the instruction cache to be flushed and its contents invalidated. + 0 + 1 + + + dis + Cache Bypassed. Instruction data is stored in the line fill buffer but is not written to main cache memory array. + 0 + + + en + Cache Enabled. + 1 + + + + + CACHE_RDY + Cache Ready flag. Cleared by hardware when at any time the cache as a whole is invalidated (including a system reset). When this bit is 0, the cache is effectively in bypass mode (instruction fetches will come from main memory or from the line fill buffer). Set by hardware when the invalidate operation is complete and the cache is ready. + 16 + 1 + read-only + + + notReady + Not Ready. + 0 + + + ready + Ready. + 1 + + + + + + + INVALIDATE + Invalidate All Registers. + 0x0700 + read-write + + + + + + ICC1 + Instruction Cache Controller Registers 1 + 0x4002F000 + + + + PWRSEQ + Power Sequencer / Low Power Control Register. + 0x40006800 + + 0x00 + 0x800 + registers + + + + LP_CTRL + Low Power Control Register. + 0x00 + + + RAMRET_SEL0 + System RAM 0 Data retention in BACKUP mode. + 0 + 1 + + + dis + Disabled. + 0 + + + en + Enabled. + 1 + + + + + RAMRET_SEL1 + System RAM 1 Data retention in BACKUP mode. + 1 + 1 + + + dis + Disabled. + 0 + + + en + Enabled. + 1 + + + + + RAMRET_SEL2 + System RAM 2 Data retention in BACKUP mode. + 2 + 1 + + + dis + Disabled. + 0 + + + en + Enabled. + 1 + + + + + RAMRET_SEL3 + System RAM 3 Data retention in BACKUP mode. + 3 + 1 + + + dis + Disabled. + 0 + + + en + Enabled. + 1 + + + + + OVR + Operating Voltage Range + 4 + 2 + + + 0_9V + 0.9V 24MHz + 0 + + + 1_0V + 1.0V 48MHz + 1 + + + 1_1V + 1.1V 96MHz + 2 + + + + + VCORE_DET_BYPASS + Bypass V CORE External Supply Detection + 6 + 1 + + + enabled + enable + 0 + + + Disable + disable + 1 + + + + + RETREG_EN + Retention Regulator Enable. This bit controls the retention regulator in BACKUP mode. + 8 + 1 + + + dis + Disabled. + 0 + + + en + Enabled. + 1 + + + + + FAST_WK_EN + Fast Wake-Up Mode. This bit enables fast wake-up from DeepSleep mode. + 10 + 1 + + + dis + Disabled. + 0 + + + en + Enabled. + 1 + + + + + BG_OFF + Band Gap Disable for DEEPSLEEP and BACKUP Mode + 11 + 1 + + + on + Bandgap is always ON. + 0 + + + off + Bandgap is OFF in DeepSleep mode(default). + 1 + + + + + VCORE_POR_DIS + V CORE POR Disable for DEEPSLEEP and BACKUP Mode + 12 + 1 + + + dis + Disabled. + 0 + + + en + Enabled. + 1 + + + + + LDO_DIS + LDO Disable + 16 + 1 + + + en + Enable if Bandgap is ON(default) + 0 + + + dis + Disabled. + 1 + + + + + VCORE_SVM_DIS + V CORE Supply Voltage Monitor Disable + 20 + 1 + + + en + Enable if Bandgap is ON(default) + 0 + + + dis + Disabled. + 1 + + + + + VDDIO_POR_DIS + VDDIO Power-On Reset Monitor Disable. This bit controls the Power-On Reset monitor on VDDIO supply in all operating mods. + 25 + 1 + + + en + Enabled. + 0 + + + dis + Disabled. + 1 + + + + + + + LP_WAKEFL + Low Power Mode Wakeup Flags for GPIO0 + 0x04 + + + WAKEST + Wakeup IRQ flags (write ones to clear). One or more of these bits will be set when the corresponding dedicated GPIO pin(s) transition(s) from low to high or high to low. If GPIO wakeup source is selected, using PM.GPIOWKEN register, and the corresponding bit is also selected in LPWKEN register, an interrupt will be gnerated to wake up the CPU from a low power mode. + 0 + 14 + + + + + LPWK_EN + Low Power I/O Wakeup Enable Register 0. This register enables low power wakeup functionality for GPIO0. + 0x08 + + + WAKEEN + Enable wakeup. These bits allow wakeup from the corresponding GPIO pin(s) on transition(s) from low to high or high to low when PM.GPIOWKEN is set. Wakeup status is indicated in PPWKST register. + 0 + 14 + + + + + LPMEMSD + Low Power Memory Shutdown Control. + 0x40 + + + SRAM0_OFF + System RAM block 0 Shut Down. + 0 + 1 + + + normal + Normal Operating Mode. + 0 + + + shutdown + Shutdown Mode. + 1 + + + + + SRAM1_OFF + System RAM block 1 Shut Down. + 1 + 1 + + + normal + Normal Operating Mode. + 0 + + + shutdown + Shutdown Mode. + 1 + + + + + SRAM2_OFF + System RAM block 2 Shut Down. + 2 + 1 + + + normal + Normal Operating Mode. + 0 + + + shutdown + Shutdown Mode. + 1 + + + + + SRAM3_OFF + System RAM block 3 Shut Down. + 3 + 1 + + + normal + Normal Operating Mode. + 0 + + + shutdown + Shutdown Mode. + 1 + + + + + + + + + + RTC + Real Time Clock and Alarm. + 0x40006000 + + 0x00 + 0x400 + registers + + + RTC + RTC interrupt. + 3 + + + + SEC + RTC Second Counter. This register contains the 32-bit second counter. + 0x00 + 0x00000000 + + + SSEC + RTC Sub-second Counter. This counter increments at 256Hz. RTC_SEC is incremented when this register rolls over from 0xFF to 0x00. + 0x04 + 0x00000000 + + + RTSS + RTC Sub-second Counter. + 0 + 8 + + + + + RAS + Time-of-day Alarm. + 0x08 + 0x00000000 + + + RAS + Time-of-day Alarm. + 0 + 20 + + + + + RSSA + RTC sub-second alarm. This register contains the reload value for the sub-second alarm. + 0x0C + 0x00000000 + + + RSSA + This register contains the reload value for the sub-second alarm. + 0 + 32 + + + + + CTRL + RTC Control Register. + 0x10 + 0x00000008 + 0xFFFFFF38 + + + RTCE + Real Time Clock Enable. This bit enables the Real Time Clock. This bit can only be written when WE=1 and BUSY =0. Change to this bit is effective only after BUSY is cleared from 1 to 0. + 0 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + ADE + Alarm Time-of-Day Interrupt Enable. Change to this bit is effective only after BUSY is cleared from 1 to 0. + 1 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + ASE + Alarm Sub-second Interrupt Enable. Change to this bit is effective only after BUSY is cleared from 1 to 0. + 2 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + BUSY + RTC Busy. This bit is set to 1 by hardware when changes to RTC registers required a synchronized version of the register to be in place. This bit is automatically cleared by hardware. + 3 + 1 + read-only + + + idle + Idle. + 0 + + + busy + Busy. + 1 + + + + + RDY + RTC Ready. This bit is set to 1 by hardware when the RTC count registers update. It can be cleared to 0 by software at any time. It will also be cleared to 0 by hardware just prior to an update of the RTC count register. + 4 + 1 + + + busy + Register has not updated. + 0 + + + ready + Ready. + 1 + + + + + RDYE + RTC Ready Interrupt Enable. + 5 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + ALDF + Time-of-Day Alarm Interrupt Flag. This alarm is qualified as wake-up source to the processor. + 6 + 1 + read-only + + + inactive + Not active + 0 + + + Pending + Active + 1 + + + + + ALSF + Sub-second Alarm Interrupt Flag. This alarm is qualified as wake-up source to the processor. + 7 + 1 + read-only + + + inactive + Not active + 0 + + + Pending + Active + 1 + + + + + SQE + Square Wave Output Enable. + 8 + 1 + + + inactive + Not active + 0 + + + Pending + Active + 1 + + + + + FT + Frequency Output Selection. When SQE=1, these bits specify the output frequency on the SQW pin. + 9 + 2 + + + freq1Hz + 1 Hz (Compensated). + 0 + + + freq512Hz + 512 Hz (Compensated). + 1 + + + freq4KHz + 4 KHz. + 2 + + + clkDiv8 + RTC Input Clock / 8. + 3 + + + + + X32KMD + 32KHz Oscillator Mode. + 11 + 2 + + + noiseImmuneMode + Always operate in Noise Immune Mode. Oscillator warm-up required. + 0 + + + quietMode + Always operate in Quiet Mode. No oscillator warm-up required. + 1 + + + quietInStopWithWarmup + Operate in Noise Immune Mode normally, switch to Quiet Mode on Stop Mode entry. Will wait for 32K oscillator warm-up before code execution on Stop Mode exit. + 2 + + + quietInStopNoWarmup + Operate in Noise Immune Mode normally, switch to Quiet Mode on Stop Mode entry. Will not wait for 32K oscillator warm-up before code execution on Stop Mode exit. + 3 + + + + + WE + Write Enable. This register bit serves as a protection mechanism against unintentional writes to critical RTC bits. + 15 + 1 + + + inactive + Not active + 0 + + + Pending + Active + 1 + + + + + + + TRIM + RTC Trim Register. + 0x14 + 0x00000000 + + + TRIM + RTC Trim. This register contains the 2's complement value that specifies the trim resolution. Each increment or decrement of the bit adds or subtracts 1ppm at each 4KHz clock value, with a maximum correction of +/- 127ppm. + 0 + 8 + + + VBATTMR + VBAT Timer Value. When RTC is running off of VBAT, this field is incremented every 32 seconds. + 8 + 24 + + + + + OSCCTRL + RTC Oscillator Control Register. + 0x18 + 0x00000000 + + + FLITER_EN + RTC Oscillator Filter Enable + 0 + 1 + + + IBIAS_SEL + RTC Oscillator 4X Bias Current Select + 1 + 1 + + + 2X + Selects 2X bias current for RTC oscillator + 0 + + + 4X + Selects 4X bias current for RTC oscillator + 1 + + + + + HYST_EN + RTC Oscillator Hysteresis Buffer Enable + 2 + 1 + + + IBIAS_EN + RTC Oscillator Bias Current Enable + 3 + 1 + + + BYPASS + RTC Crystal Bypass + 4 + 1 + + + OUT32K + RTC 32kHz Square Wave Output + 5 + 1 + + + + + + + + SIR + System Initialization Registers. + 0x40000400 + read-only + + 0x00 + 0x400 + registers + + + + SISTAT + System Initialization Status Register. + 0x00 + read-only + + + MAGIC + Magic Word Validation. This bit is set by the system initialization block following power-up. + 0 + 1 + read-only + + read + + magicNotSet + Magic word was not set (OTP has not been initialized properly). + 0 + + + magicSet + Magic word was set (OTP contains valid settings). + 1 + + + + + CRCERR + CRC Error Status. This bit is set by the system initialization block following power-up. + 1 + 1 + read-only + + read + + noError + No CRC errors occurred during the read of the OTP memory block. + 0 + + + error + A CRC error occurred while reading the OTP. The address of the failure location in the OTP memory is stored in the ERRADDR register. + 1 + + + + + + + ERRADDR + Read-only field set by the SIB block if a CRC error occurs during the read of the OTP memory. Contains the failing address in OTP memory (when CRCERR equals 1). + 0x04 + read-only + + + ERRADDR + 0 + 32 + + + + + FSTAT + funcstat register. + 0x100 + read-only + + + FPU + FPU Function. + 0 + 1 + + + no + 0 + + + yes + 1 + + + + + USB + USB Device. + 1 + 1 + + + no + 0 + + + yes + 1 + + + + + ADC + 10-bit Sigma Delta ADC. + 2 + 1 + + + no + 0 + + + yes + 1 + + + + + XIP + XiP function. + 3 + 1 + + + no + 0 + + + yes + 1 + + + + + PBM + PBM function. + 4 + 1 + + + no + 0 + + + yes + 1 + + + + + HBC + HBC function. + 5 + 1 + + + no + 0 + + + yes + 1 + + + + + SDHC + SDHC function. + 6 + 1 + + + no + 0 + + + yes + 1 + + + + + SMPHR + SMPHR function. + 7 + 1 + + + no + 0 + + + yes + 1 + + + + + SCACHE + System Cache function. + 8 + 1 + + + no + 0 + + + yes + 1 + + + + + + + SFSTAT + secfuncstat register. + 0x104 + read-only + + + TRNG + TRNG function. + 2 + 1 + + + no + 0 + + + yes + 1 + + + + + AES + AES function. + 3 + 1 + + + no + 0 + + + yes + 1 + + + + + SHA + SHA function. + 4 + 1 + + + no + 0 + + + yes + 1 + + + + + MAA + MAA function. + 5 + 1 + + + no + 0 + + + yes + 1 + + + + + + + + + + SMON + The Security Monitor block used to monitor system threat conditions. + 0x40004000 + + 0x00 + 0x1000 + registers + + + + EXTSCN + External Sensor Control Register. + 0x00 + 0x3800FFC0 + + + EXTS_EN0 + External Sensor Enable for input/output pair 0. + 0 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + EXTS_EN1 + External Sensor Enable for input/output pair 1. + 1 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + EXTS_EN2 + External Sensor Enable for input/output pair 2. + 2 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + EXTS_EN3 + External Sensor Enable for input/output pair 3. + 3 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + EXTS_EN4 + External Sensor Enable for input/output pair 4. + 4 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + EXTS_EN5 + External Sensor Enable for input/output pair 5. + 5 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + EXTCNT + External Sensor Error Counter. These bits set the number of external sensor accepted mismatches that have to occur within a single bit period before an external sensor alarm is triggered. + 16 + 5 + + + EXTFRQ + External Sensor Frequency. These bits define the frequency at which the external sensors are clocked to/from the EXTS_IN and EXTS_OUT pair. + 21 + 3 + + + freq2000Hz + Div 4 (2000Hz). + 0 + + + freq1000Hz + Div 8 (1000Hz). + 1 + + + freq500Hz + Div 16 (500Hz). + 2 + + + freq250Hz + Div 32 (250Hz). + 3 + + + freq125Hz + Div 64 (125Hz). + 4 + + + freq63Hz + Div 128 (63Hz). + 5 + + + freq31Hz + Div 256 (31Hz). + 6 + + + RFU + Reserved. Do not use. + 7 + + + + + DIVCLK + Clock Divide. These bits are used to divide the 8KHz input clock. The resulting divided clock is used for all logic within the Security Monitor Block. Note: If the input clock is divided with these bits, the error count threshold table and output frequency will be affected accordingly with the same divide factor. + 24 + 3 + + + div1 + Divide by 1 (8000 Hz). + 0 + + + div2 + Divide by 2 (4000 Hz). + 1 + + + div4 + Divide by 4 (2000 Hz). + 2 + + + div8 + Divide by 8 (1000 Hz). + 3 + + + div16 + Divide by 16 (500 Hz). + 4 + + + div32 + Divide by 32 (250 Hz). + 5 + + + div64 + Divide by 64 (125 Hz). + 6 + + + + + BUSY + Busy. This bit is set to 1 by hardware after EXTSCN register is written to. This bit is automatically cleared to 0 after this register information has been transferred to the security monitor domain. + 30 + 1 + read-only + + + idle + Idle. + 0 + + + busy + Update in Progress. + 1 + + + + + LOCK + Lock Register. Once locked, the EXTSCN register can no longer be modified. Only a battery disconnect will clear this bit. VBAT powers this register. + 31 + 1 + + + unlocked + Unlocked. + 0 + + + locked + Locked. + 1 + + + + + + + INTSCN + Internal Sensor Control Register. + 0x04 + 0x7F00FFF7 + + + SHIELD_EN + Die Shield Enable. + 0 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + TEMP_EN + Temperature Sensor Enable. + 1 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + VBAT_EN + Battery Monitor Enable. + 2 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + LOTEMP_SEL + Low Temperature Detection Select. + 16 + 1 + + + neg50C + -50 degrees C. + 0 + + + neg30C + -30 degrees C. + 1 + + + + + VCORELOEN + VCORE Undervoltage Detect Enable. + 18 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + VCOREHIEN + VCORE Overvoltage Detect Enable. + 19 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + VDDLOEN + VDD Undervoltage Detect Enable. + 20 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + VDDHIEN + VDD Overvoltage Detect Enable. + 21 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + VGLEN + Voltage Glitch Detection Enable. + 22 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + LOCK + Lock Register. Once locked, the INTSCN register can no longer be modified. Only a battery disconnect will clear this bit. VBAT powers this register. + 31 + 1 + + + unlocked + Unlocked. + 0 + + + locked + Locked. + 1 + + + + + + + SECALM + Security Alarm Register. + 0x08 + 0x00000000 + 0x00000000 + + + DRS + Destructive Reset Trigger. Setting this bit will generate a DRS. This bit is self-cleared by hardware. + 0 + 1 + + + complete + No operation/complete. + 0 + + + start + Start operation. + 1 + + + + + KEYWIPE + Key Wipe Trigger. Set to 1 to initiate a wipe of the AES key register. It does not reset the part, or log a timestamp. AES and DES registers are not affected by this bit. This bit is automatically cleared to 0 after the keys have been wiped. + 1 + 1 + + + complete + No operation/complete. + 0 + + + start + Start operation. + 1 + + + + + SHIELDF + Die Shield Flag. + 2 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + LOTEMP + Low Temperature Detect. + 3 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + HITEMP + High Temperature Detect. + 4 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + BATLO + Battery Undervoltage Detect. + 5 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + BATHI + Battery Overvoltage Detect. + 6 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTF + External Sensor Flag. This bit is set to 1 when any of the EXTSTAT bits are set. + 7 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + VDDLO + VDD Undervoltage Detect Flag. + 8 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + VCORELO + VCORE Undervoltage Detect Flag. + 9 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + VCOREHI + VCORE Overvoltage Detect Flag. + 10 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + VDDHI + VDD Overvoltage Flag. + 11 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + VGL + Voltage Glitch Detection Flag. + 12 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT0 + External Sensor 0 Detect. The tamper detect is only active when it is enabled. This bits needs to be cleared in software after a tamper event to re-arm the sensor. + 16 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT1 + External Sensor 1 Detect. The tamper detect is only active when it is enabled. This bits needs to be cleared in software after a tamper event to re-arm the sensor. + 17 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT2 + External Sensor 2 Detect. The tamper detect is only active when it is enabled. This bits needs to be cleared in software after a tamper event to re-arm the sensor. + 18 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT3 + External Sensor 3 Detect. The tamper detect is only active when it is enabled. This bits needs to be cleared in software after a tamper event to re-arm the sensor. + 19 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT4 + External Sensor 4 Detect. The tamper detect is only active when it is enabled. This bits needs to be cleared in software after a tamper event to re-arm the sensor. + 20 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT5 + External Sensor 5 Detect. The tamper detect is only active when it is enabled. This bits needs to be cleared in software after a tamper event to re-arm the sensor. + 21 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSWARN0 + External Sensor 0 Warning Ready flag. The tamper detect warning flags are set, regardless of whether the external sensors are enabled. + 24 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSWARN1 + External Sensor 1 Warning Ready flag. The tamper detect warning flags are set, regardless of whether the external sensors are enabled. + 25 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSWARN2 + External Sensor 2 Warning Ready flag. The tamper detect warning flags are set, regardless of whether the external sensors are enabled. + 26 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSWARN3 + External Sensor 3 Warning Ready flag. The tamper detect warning flags are set, regardless of whether the external sensors are enabled. + 27 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSWARN4 + External Sensor 4 Warning Ready flag. The tamper detect warning flags are set, regardless of whether the external sensors are enabled. + 28 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSWARN5 + External Sensor 5 Warning Ready flag. The tamper detect warning flags are set, regardless of whether the external sensors are enabled. + 29 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + + + SECDIAG + Security Diagnostic Register. + 0x0C + read-only + 0x00000001 + 0xFFC0FE02 + + + BORF + Battery-On-Reset Flag. This bit is set once the back up battery is conneted. + 0 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + SHIELDF + Die Shield Flag. + 2 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + LOTEMP + Low Temperature Detect. + 3 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + HITEMP + High Temperature Detect. + 4 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + BATLO + Battery Undervoltage Detect. + 5 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + BATHI + Battery Overvoltage Detect. + 6 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + DYNF + Dynamic Sensor Flag. This bit is set to 1 when any of the EXTSTAT bits are set. + 7 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + AESKT + AES Key Transfer. This bit is set to 1 when AES Key has been transferred from the TRNG to the battery backed AES key register. This bit can only be reset by a BOR. + 8 + 1 + + + incomplete + Key has not been transferred. + 0 + + + complete + Key has been transferred. + 1 + + + + + EXTSTAT0 + External Sensor 0 Detect. + 16 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT1 + External Sensor 1 Detect. + 17 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT2 + External Sensor 2 Detect. + 18 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT3 + External Sensor 3 Detect. + 19 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT4 + External Sensor 4 Detect. + 20 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + EXTSTAT5 + External Sensor 5 Detect. + 21 + 1 + + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + + + DLRTC + DRS Log RTC Value. This register contains the 32 bit value in the RTC second register when the last DRS event occurred. + 0x10 + read-only + 0x00000000 + + + DLRTC + DRS Log RTC Value. This register contains the 32 bit value in the RTC second register when the last DRS event occured. + 0 + 32 + + + + + SECST + Security Monitor Status Register. + 0x34 + read-only + + + EXTSRS + External Sensor Control Register Status. + 0 + 1 + + + allowed + Access authorized. + 0 + + + notAllowed + Access not authorized. + 1 + + + + + INTSRS + Internal Sensor Control Register Status. + 1 + 1 + + + allowed + Access authorized. + 0 + + + notAllowed + Access not authorized. + 1 + + + + + SECALRS + Security Alarm Register Status. + 2 + 1 + + + allowed + Access authorized. + 0 + + + notAllowed + Access not authorized. + 1 + + + + + + + + + + SPI17Y + SPI peripheral. + 0x40046000 + + 0x00 + 0x1000 + registers + + + SPI0 + 16 + + + + DATA32 + Register for reading and writing the FIFO. + 0x00 + 32 + read-write + + + DATA + Read to pull from RX FIFO, write to put into TX FIFO. + 0 + 32 + + + + + 2 + 2 + DATA16[%s] + Register for reading and writing the FIFO. + DATA32 + 0x00 + 16 + read-write + + + DATA + Read to pull from RX FIFO, write to put into TX FIFO. + 0 + 16 + + + + + 4 + 1 + DATA8[%s] + Register for reading and writing the FIFO. + DATA32 + 0x00 + 8 + read-write + + + DATA + Read to pull from RX FIFO, write to put into TX FIFO. + 0 + 8 + + + + + CTRL0 + Register for controlling SPI peripheral. + 0x04 + read-write + + + EN + SPI Enable. + 0 + 1 + + + dis + SPI is disabled. + 0 + + + en + SPI is enabled. + 1 + + + + + MASTER + Master Mode Enable. + 1 + 1 + + + dis + SPI is Slave mode. + 0 + + + en + SPI is Master mode. + 1 + + + + + SS_IO + Slave Select 0, IO direction, to support Multi-Master mode,Slave Select 0 can be input in Master mode. This bit has no effect in slave mode. + 4 + 1 + + + output + Slave select 0 is output. + 0 + + + input + Slave Select 0 is input, only valid if MMEN=1. + 1 + + + + + START + Start Transmit. + 5 + 1 + + + start + Master Initiates a transaction, this bit is self clearing when transactions are done. If a transaction cimpletes, and the TX FIFO is empty, the Master halts, if a transaction completes, and the TX FIFO is not empty, the Master initiates another transaction. + 1 + + + + + SS_CTRL + Start Select Control. Used in Master mode to control the behavior of the Slave Select signal at the end of a transaction. + 8 + 1 + + + DEASSERT + SPI De-asserts Slave Select at the end of a transaction. + 0 + + + ASSERT + SPI leaves Slave Select asserted at the end of a transaction. + 1 + + + + + SS + Slave Select, when in Master mode selects which Slave devices are selected. More than one Slave device can be selected. + 16 + 4 + + + SS0 + SS0 is selected. + 0x1 + + + SS1 + SS1 is selected. + 0x2 + + + SS2 + SS2 is selected. + 0x4 + + + SS3 + SS3 is selected. + 0x8 + + + + + + + CTRL1 + Register for controlling SPI peripheral. + 0x08 + read-write + + + TX_NUM_CHAR + Nubmer of Characters to transmit. + 0 + 16 + + + RX_NUM_CHAR + Nubmer of Characters to receive. + 16 + 16 + + + + + CTRL2 + Register for controlling SPI peripheral. + 0x0C + read-write + + + CPHA + Clock Phase. + 0 + 1 + + + Rising_Edge + Data Sampled on clock rising edge. Use when in SPI Mode 0 and Mode 2 + 0 + + + Falling_Edge + Data Sampled on clock falling edge. Use when in SPI Mode 1 and Mode 3 + 1 + + + + + CPOL + Clock Polarity. + 1 + 1 + + + Normal + Normal Clock. Use when in SPI Mode 0 and Mode 1 + 0 + + + Inverted + Inverted Clock. Use when in SPI Mode 2 and Mode 3 + 1 + + + + + SCLK_INV + Reserved - Must Always Be Cleared to 0. + 4 + 1 + + + NUMBITS + Number of Bits per character. + 8 + 4 + + + 0 + 16 bits per character. + 0 + + + + + DATA_WIDTH + SPI Data width. + 12 + 2 + + + Mono + 1 data pin. + 0 + + + Dual + 2 data pins. + 1 + + + Quad + 4 data pins. + 2 + + + + + THREE_WIRE + Three Wire mode. MOSI/MISO pin(s) shared. Only Mono mode suports Four-Wire. + 15 + 1 + + + dis + Use four wire mode (Mono only). + 0 + + + en + Use three wire mode. + 1 + + + + + SS_POL + Slave Select Polarity, each Slave Select can have unique polarity. + 16 + 8 + + + SS0_high + SS0 active high. + 0x1 + + + SS1_high + SS1 active high. + 0x2 + + + SS2_high + SS2 active high. + 0x4 + + + SS3_high + SS3 active high. + 0x8 + + + + + SRPOL + Slave Ready Polarity, each Slave Ready can have unique polarity. + 24 + 8 + + + SR0_high + SR0 active high. + 0x1 + + + SR1_high + SR1 active high. + 0x2 + + + SR2_high + SR2 active high. + 0x4 + + + SR3_high + SR3 active high. + 0x8 + + + SR4_high + SR4 active high. + 0x10 + + + SR5_high + SR5 active high. + 0x20 + + + SR6_high + SR6 active high. + 0x40 + + + SR7_high + SR7 active high. + 0x80 + + + + + + + SS_TIME + Register for controlling SPI peripheral/Slave Select Timing. + 0x10 + read-write + + + PRE + Slave Select Pre delay 1. + 0 + 8 + + + 256 + 256 system clocks between SS active and first serial clock edge. + 0 + + + + + POST + Slave Select Post delay 2. + 8 + 8 + + + 256 + 256 system clocks between last serial clock edge and SS inactive. + 0 + + + + + INACT + Slave Select Inactive delay. + 16 + 8 + + + 256 + 256 system clocks between transactions. + 0 + + + + + + + CLK_CFG + Register for controlling SPI clock rate. + 0x14 + read-write + + + LO + Low duty cycle control. In timer mode, reload[7:0]. + 0 + 8 + + + Dis + Duty cycle control of serial clock generation is disabled. + 0 + + + + + HI + High duty cycle control. In timer mode, reload[15:8]. + 8 + 8 + + + Dis + Duty cycle control of serial clock generation is disabled. + 0 + + + + + SCALE + System Clock scale factor. Scales the AMBA clock by 2^SCALE before generating serial clock. + 16 + 4 + + + + + DMA + Register for controlling DMA. + 0x1C + read-write + + + TX_FIFO_LEVEL + Transmit FIFO level that will trigger a DMA request, also level for threshold status. When TX FIFO has fewer than this many bytes, the associated events and conditions are triggered. + 0 + 5 + + + TX_FIFO_EN + Transmit FIFO enabled for SPI transactions. + 6 + 1 + + + dis + Transmit FIFO is not enabled. + 0 + + + en + Transmit FIFO is enabled. + 1 + + + + + TX_FIFO_CLEAR + Clear TX FIFO, clear is accomplished by resetting the read and write + pointers. This should be done when FIFO is not being accessed on the SPI side. + . + 7 + 1 + + + CLEAR + Clear the Transmit FIFO, clears any pending TX FIFO status. + 1 + + + + + TX_FIFO_CNT + Count of entries in TX FIFO. + 8 + 6 + read-only + + + TX_DMA_EN + TX DMA Enable. + 15 + 1 + + + DIS + TX DMA requests are disabled, andy pending DMA requests are cleared. + 0 + + + en + TX DMA requests are enabled. + 1 + + + + + RX_FIFO_LEVEL + Receive FIFO level that will trigger a DMA request, also level for threshold status. When RX FIFO has more than this many bytes, the associated events and conditions are triggered. + 16 + 5 + + + RX_FIFO_EN + Receive FIFO enabled for SPI transactions. + 22 + 1 + + + DIS + Receive FIFO is not enabled. + 0 + + + en + Receive FIFO is enabled. + 1 + + + + + RX_FIFO_CLEAR + Clear RX FIFO, clear is accomplished by resetting the read and write pointers. This should be done when FIFO is not being accessed on the SPI side. + 23 + 1 + + + CLEAR + Clear the Receive FIFO, clears any pending RX FIFO status. + 1 + + + + + RX_FIFO_CNT + Count of entries in RX FIFO. + 24 + 6 + read-only + + + RX_DMA_EN + RX DMA Enable. + 31 + 1 + + + dis + RX DMA requests are disabled, any pending DMA requests are cleared. + 0 + + + en + RX DMA requests are enabled. + 1 + + + + + + + INT_FL + Register for reading and clearing interrupt flags. All bits are write 1 to clear. + 0x20 + read-write + + + TX_THRESH + TX FIFO Threshold Crossed. + 0 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + TX_EMPTY + TX FIFO Empty. + 1 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + RX_THRESH + RX FIFO Threshold Crossed. + 2 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + RX_FULL + RX FIFO FULL. + 3 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + SSA + Slave Select Asserted. + 4 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + SSD + Slave Select Deasserted. + 5 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + FAULT + Multi-Master Mode Fault. + 8 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + ABORT + Slave Abort Detected. + 9 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + M_DONE + Master Done, set when SPI Master has completed any transactions. + 11 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + TX_OVR + Transmit FIFO Overrun, set when the AMBA side attempts to write data to a full transmit FIFO. + 12 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + TX_UND + Transmit FIFO Underrun, set when the SPI side attempts to read data from an empty transmit FIFO. + 13 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + RX_OVR + Receive FIFO Overrun, set when the SPI side attempts to write to a full receive FIFO. + 14 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + RX_UND + Receive FIFO Underrun, set when the AMBA side attempts to read data from an empty receive FIFO. + 15 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + + + INT_EN + Register for enabling interrupts. + 0x24 + read-write + + + TX_THRESH + TX FIFO Threshold interrupt enable. + 0 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + TX_EMPTY + TX FIFO Empty interrupt enable. + 1 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + RX_THRESH + RX FIFO Threshold Crossed interrupt enable. + 2 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + RX_FULL + RX FIFO FULL interrupt enable. + 3 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + SSA + Slave Select Asserted interrupt enable. + 4 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + SSD + Slave Select Deasserted interrupt enable. + 5 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + FAULT + Multi-Master Mode Fault interrupt enable. + 8 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + ABORT + Slave Abort Detected interrupt enable. + 9 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + M_DONE + Master Done interrupt enable. + 11 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + TX_OVR + Transmit FIFO Overrun interrupt enable. + 12 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + TX_UND + Transmit FIFO Underrun interrupt enable. + 13 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + RX_OVR + Receive FIFO Overrun interrupt enable. + 14 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + RX_UND + Receive FIFO Underrun interrupt enable. + 15 + 1 + + + dis + Interrupt is disabled. + 0 + + + en + Interrupt is enabled. + 1 + + + + + + + WAKE_FL + Register for wake up flags. All bits in this register are write 1 to clear. + 0x28 + read-write + + + TX_THRESH + Wake on TX FIFO Threshold Crossed. + 0 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + TX_EMPTY + Wake on TX FIFO Empty. + 1 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + RX_THRESH + Wake on RX FIFO Threshold Crossed. + 2 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + RX_FULL + Wake on RX FIFO Full. + 3 + 1 + + + clear + Flag is set when value read is 1. Write 1 to clear this flag. + 1 + + + + + + + WAKE_EN + Register for wake up enable. + 0x2C + read-write + + + TX_THRESH + Wake on TX FIFO Threshold Crossed Enable. + 0 + 1 + + + dis + Wakeup source disabled. + 0 + + + en + Wakeup source enabled. + 1 + + + + + TX_EMPTY + Wake on TX FIFO Empty Enable. + 1 + 1 + + + dis + Wakeup source disabled. + 0 + + + en + Wakeup source enabled. + 1 + + + + + RX_THRESH + Wake on RX FIFO Threshold Crossed Enable. + 2 + 1 + + + dis + Wakeup source disabled. + 0 + + + en + Wakeup source enabled. + 1 + + + + + RX_FULL + Wake on RX FIFO Full Enable. + 3 + 1 + + + dis + Wakeup source disabled. + 0 + + + en + Wakeup source enabled. + 1 + + + + + + + STAT + SPI Status register. + 0x30 + read-only + + + BUSY + SPI active status. In Master mode, set when transaction starts, cleared when last bit of last character is acted upon and Slave Select de-assertion would occur. In Slave mode, set when Slave Select is asserted, cleared when Slave Select is de-asserted. Not used in Timer mode. + 0 + 1 + + + not + SPI not active. + 0 + + + active + SPI active. + 1 + + + + + + + + + + SPIMSS + Serial Peripheral Interface. + SPIMSS0_ + 0x40018000 + + 0x00 + 0x1000 + registers + + + + DATA16 + SPI 16-bit Data Access + 0x00 + 16 + read-write + + + DATA + SPI data. + 0 + 16 + + + + + 2 + 1 + DATA8[%s] + SPI Data 8-bit access + DATA16 + 0x00 + 8 + read-write + + + DATA + SPI data. + 0 + 8 + + + + + CTRL + SPI Control Register. + 0x04 + + + SPIEN + SPI Enable. + 0 + 1 + + dis_en_enum + + disable + 0 + + + enable + 1 + + + + + MMEN + SPI Master Mode Enable. + 1 + 1 + + slv_mst_enum + + slave + 0 + + + master + 1 + + + + + WOR + Wired OR (open drain) Enable. + 2 + 1 + + dis_en_enum + + disable + 0 + + + enable + 1 + + + + + CLKPOL + Clock Polarity. + 3 + 1 + + spi_pol_enum + + idleLo + SCLK idles Low (0) after character transmission/reception. + 0 + + + idleHi + SCLK idles High (1) after character transmission/reception. + 1 + + + + + PHASE + Phase Select. + 4 + 1 + + spi_phase_enum + + activeEdge + Transmit on active edge of SCLK. + 0 + + + inactiveEdge + Transmit on inactive edge of SCLK. + 1 + + + + + BIRQ + Baud Rate Generator Timer Interrupt Request. + 5 + 1 + + dis_en_enum + + disable + 0 + + + enable + 1 + + + + + STR + Start SPI Interrupt. + 6 + 1 + + start_op_enum + + complete + No operation/complete. + 0 + + + start + Start operation. + 1 + + + + + IRQE + Interrupt Request Enable. + 7 + 1 + + dis_en_enum + + disable + 0 + + + enable + 1 + + + + + + + STATUS + SPI Status Register. + 0x08 + 0x00000001 + + + SLAS + Slave Select. If the SPI is in slave mode, this bit indicates if the SPI is selected. If the SPI is in master mode this bit has no meaning. + 0 + 1 + read-only + + sel_enum + + selected + 0 + + + notSelected + 1 + + + + + TXST + Transmit Status. + 1 + 1 + read-only + + busy_enum + + idle + 0 + + + busy + 1 + + + + + TUND + Transmit Underrun. + 2 + 1 + oneToClear + + event_flag_enum + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + ROVR + Receive Overrun. + 3 + 1 + + event_flag_enum + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + ABT + Slave Mode Transaction Abort. + 4 + 1 + + event_flag_enum + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + COL + Collision. + 5 + 1 + + event_flag_enum + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + TOVR + Transmit Overrun. + 6 + 1 + + event_flag_enum + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + IRQ + SPI Interrupt Request. + 7 + 1 + oneToClear + + flag_enum + + inactive + No interrupt is pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + + + MOD + SPI Mode Register. + 0x0C + + + SSV + Slave Select Value. + 0 + 1 + + lo_hi_enum + + lo + The SSEL pin will be driven low. + 0 + + + hi + The SSEL pin will be driven high. + 1 + + + + + SSIO + Slave Select I/O. + 1 + 1 + + input_output_enum + + input + 0 + + + output + 1 + + + + + NUMBITS + 2 + 4 + + spi_bits_enum + + bits16 + 0 + + + bits1 + 1 + + + bits2 + 2 + + + bits3 + 3 + + + bits4 + 4 + + + bits5 + 5 + + + bits6 + 6 + + + bits7 + 7 + + + bits8 + 8 + + + bits9 + 9 + + + bits10 + 10 + + + bits11 + 11 + + + bits12 + 12 + + + bits13 + 13 + + + bits14 + 14 + + + bits15 + 15 + + + + + TX_LJ + Transmit Left Justify. + 7 + 1 + + dis_en_enum + + disable + 0 + + + enable + 1 + + + + + SSL1 + Slave Select 1. If SPI is enabled and in master mode, the SSEL_1 is driven according to this bit. + 8 + 1 + + hi_lo_enum + + hi + High. + 0 + + + lo + Low. + 1 + + + + + SSL2 + Slave Select 2. If SPI is enabled and in master mode, the SSEL_2 is driven according to this bit. + 9 + 1 + + hi_lo_enum + + hi + High. + 0 + + + lo + Low. + 1 + + + + + SSL3 + Slave Select 3. If SPI is enabled and in master mode, the SSEL_3 is driven according to this bit. + 10 + 1 + + hi_lo_enum + + hi + High. + 0 + + + lo + Low. + 1 + + + + + + + BRG + Baud Rate Reload Value. The SPI Baud Rate register is a 16-bit reload value for the SPI Baud Rate Generator. The reload value must be greater than or equal to 0002H for proper SPI operation (maximum baud rate is PCLK frequency divided by 4). + 0x14 + 0x0000FFFF + + + BRG + Baud Rate Reload Value. + 0 + 16 + + + + + DMA + SPI DMA Register. + 0x18 + 0x00070007 + + + TX_FIFO_LEVEL + Transmit FIFO Level. Set the number of free entries in the TxFIFO when a TxDMA request occurs. + 0 + 3 + + fifo_level_enum + + entry1 + 0 + + + entries2 + 1 + + + entries3 + 2 + + + entries4 + 3 + + + entries5 + 4 + + + entries6 + 5 + + + entries7 + 6 + + + entries8 + 7 + + + + + TX_FIFO_CLEAR + Transmit FIFO Clear. + 4 + 1 + write-only + + start_op_enum + + complete + No operation/complete. + 0 + + + start + Start operation. + 1 + + + + + TX_FIFO_CNT + Transmit FIFO Count. + 8 + 4 + read-only + + + TX_DMA_EN + Transmit DMA Enable. + 15 + 1 + + dis_en_enum + + disable + 0 + + + enable + 1 + + + + + RX_FIFO_LEVEL + Receive FIFO Level. Sets the RX FIFO DMA request threshold. This configures the number of filled RxFIFO entries before activating an RxDMA request. + 16 + 3 + + fifo_level_enum + + entry1 + 0 + + + entries2 + 1 + + + entries3 + 2 + + + entries4 + 3 + + + entries5 + 4 + + + entries6 + 5 + + + entries7 + 6 + + + entries8 + 7 + + + + + RX_FIFO_CLEAR + Receive FIFO Clear. + 20 + 1 + + start_op_enum + + complete + No operation/complete. + 0 + + + start + Start operation. + 1 + + + + + RX_FIFO_CNT + Receive FIFO Count. + 24 + 4 + read-only + + + RX_DMA_EN + Receive DMA Enable. + 31 + 1 + + dis_en_enum + + disable + 0 + + + enable + 1 + + + + + + + I2S_CTRL + I2S Control Register. + 0x1C + + + I2S_EN + I2S Mode Enable. + 0 + 1 + + dis_en_enum + + disable + 0 + + + enable + 1 + + + + + I2S_MUTE + I2S Mute transmit. + 1 + 1 + + + normal + Normal Transmit. + 0 + + + replaced + Transmit data is replaced with 0. + 1 + + + + + I2S_PAUSE + I2S Pause transmit/receive. + 2 + 1 + + + normal + Normal Transmit. + 0 + + + halt + Halt transmit and receive FIFO and DMA access, transmit 0's. + 1 + + + + + I2S_MONO + I2S Monophonic Audio Mode. + 3 + 1 + + + stereophonic + Stereophonic audio. + 0 + + + monophonic + Monophonic audio format.Each transmit data word is replicated on both left/right channels. Receive data is taken from left channel, right channel receive data is ignored. + 1 + + + + + I2S_LJ + I2S Left Justify. + 4 + 1 + + + normal + Normal I2S audio protocol. + 0 + + + replaced + Audio data is synchronized with SSEL. + 1 + + + + + + + + + + TMR0 + 32-bit reloadable timer that can be used for timing and event counting. + Timers + 0x40010000 + + 0x00 + 0x1000 + registers + + + TMR0 + TMR0 IRQ + 5 + + + + CNT + Count. This register stores the current timer count. + 0x00 + 0x00000001 + + + CMP + Compare. This register stores the compare value, which is used to set the maximum count value to initiate a reload of the timer to 0x0001. + 0x04 + 0x0000FFFF + + + PWM + PWM. This register stores the value that is compared to the current timer count. + 0x08 + + + INTR + Clear Interrupt. Writing a value (0 or 1) to a bit in this register clears the associated interrupt. + 0x0C + oneToClear + + + IRQ_CLR + Clear Interrupt. + 0 + 1 + + + + + CN + Timer Control Register. + 0x10 + + + TMODE + Timer Mode. + 0 + 3 + + + oneShot + One Shot Mode. + 0 + + + continuous + Continuous Mode. + 1 + + + counter + Counter Mode. + 2 + + + pwm + PWM Mode. + 3 + + + capture + Capture Mode. + 4 + + + compare + Compare Mode. + 5 + + + gated + Gated Mode. + 6 + + + captureCompare + Capture/Compare Mode. + 7 + + + + + PRES + Prescaler. Set the Timer's prescaler value. The prescaler divides the PCLK input to the timer and sets the Timer's Count Clock, F_CNT_CLK = PCLK(HZ)/prescaler. The Timer's prescaler setting is a 4-bit value with pres3:pres[2:0]. + 3 + 3 + + + div1 + Divide by 1. + 0 + + + div2 + Divide by 2. + 1 + + + div4 + Divide by 4. + 2 + + + div8 + Divide by 8. + 3 + + + div16 + Divide by 16. + 4 + + + div32 + Divide by 32. + 5 + + + div64 + Divide by 64. + 6 + + + div128 + Divide by 128. + 7 + + + + + TPOL + Timer input/output polarity bit. + 6 + 1 + + + activeHi + Active High. + 0 + + + activeLo + Active Low. + 1 + + + + + TEN + Timer Enable. + 7 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + PRES3 + MSB of prescaler value. + 8 + 1 + + + PWMSYNC + Timer PWM Synchronization Mode Enable. + 9 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + NOLHPOL + Timer PWM output 0A polarity bit. + 10 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + NOLLPOL + Timer PWM output 0A' polarity bit. + 11 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + PWMCKBD + Timer PWM output 0A Mode Disable. + 12 + 1 + + + dis + Disable. + 1 + + + en + Enable. + 0 + + + + + + + NOLCMP + Timer Non-Overlapping Compare Register. + 0x14 + + + NOLLCMP + Non-overlapping Low Compare. The 8-bit timer count value of non-overlapping time between falling edge of PWM output 0A and next rising edge of PWM output 0A'. + 0 + 8 + + + NOLHCMP + Non-overlapping High Compare. The 8-bit timer count value of non-overlapping time between falling edge of PWM output 0A' and next rising edge of PWM output 0A. + 8 + 8 + + + + + + + + TMR1 + 32-bit reloadable timer that can be used for timing and event counting. 1 + 0x40011000 + + TMR1 + TMR1 IRQ + 6 + + + + + TMR2 + 32-bit reloadable timer that can be used for timing and event counting. 2 + 0x40012000 + + TMR2 + TMR2 IRQ + 7 + + + + + UART0 + UART + 0x40042000 + + 0 + 0x1000 + registers + + + UART0 + UART0 IRQ + 14 + + + + CTRL + Control Register. + 0x00 + 32 + + + ENABLE + UART enabled, to enable UART block, it is used to drive a gated clock in order to save power consumption when UART is not used. FIFOs are flushed when UART is disabled. + 0 + 1 + + + dis + UART disabled. FIFOs are flushed. Clock is gated off for power savings. + 0 + + + en + UART enabled. + 1 + + + + + PARITY_EN + Enable/disable Parity bit (9th character). + 1 + 1 + + + dis + No Parity + 0 + + + en + Parity enabled as 9th bit + 1 + + + + + PARITY + When PARITY_EN=1, selects odd, even, Mark or Space parity. + Mark parity = always 1; Space parity = always 0. + 2 + 2 + + + Even + Even parity selected. + 0 + + + ODD + Odd parity selected. + 1 + + + MARK + Mark parity selected. + 2 + + + SPACE + Space parity selected. + 3 + + + + + PARMD + Selects parity based on 1s or 0s count (when PARITY_EN=1). + 4 + 1 + + + 1 + Parity calculation is based on number of 1s in frame. + 0 + + + 0 + Parity calculation is based on number of 0s in frame. + 1 + + + + + TX_FLUSH + Flushes the TX FIFO buffer. + 5 + 1 + + + RX_FLUSH + Flushes the RX FIFO buffer. + 6 + 1 + + + BITACC + If set, bit accuracy is selected, in this case the bit duration is the same for all the bits with the optimal accuracy. But the frame duration can have a significant deviation from the expected baudrate.If clear, frame accuracy is selected, therefore bits can have different duration in order to guarantee the minimum frame deviation. + 7 + 1 + + + FRAME + Frame accuracy. + 0 + + + BIT + Bit accuracy. + 1 + + + + + CHAR_SIZE + Selects UART character size. + 8 + 2 + + + 5 + 5 bits. + 0 + + + 6 + 6 bits. + 1 + + + 7 + 7 bits. + 2 + + + 8 + 8 bits. + 3 + + + + + STOPBITS + Selects the number of stop bits that will be generated. + 10 + 1 + + + 1 + 1 stop bit. + 0 + + + 1_5 + 1.5 stop bits. + 1 + + + + + FLOW_CTRL + Enables/disables hardware flow control. + 11 + 1 + + + en + HW Flow Control with RTS/CTS enabled + 1 + + + dis + HW Flow Control disabled + 0 + + + + + FLOW_POL + RTS/CTS polarity. + 12 + 1 + + + 0 + RTS/CTS asserted is logic 0. + 0 + + + 1 + RTS/CTS asserted is logic 1. + 1 + + + + + NULL_MODEM + NULL Modem Support (RTS/CTS and TXD/RXD swap). + 13 + 1 + + + DIS + Direct convention. + 0 + + + EN + Null Modem Mode. + 1 + + + + + BREAK + Break control bit. It causes a break condition to be transmitted to receiving UART. + 14 + 1 + + + DIS + Break characters are not generated. + 0 + + + EN + Break characters are sent(all the bits are at '0' including start/parity/stop). + 1 + + + + + CLKSEL + Baud Rate Clock Source Select. Selects the baud rate clock. + 15 + 1 + + + SYSTEM + System clock. + 0 + + + ALTERNATE + Alternate 7.3727MHz internal clock. Useful in low power modes when the system clock is slow. + 1 + + + + + RX_TO + RX Time Out. RX time out interrupt will occur after RXTO Uart + characters if RX-FIFO is not empty and RX FIFO has not been read. + 16 + 8 + + + + + THRESH_CTRL + Threshold Control register. + 0x04 + 32 + + + RX_FIFO_THRESH + RX FIFO Threshold Level.When the RX FIFO reaches this many bytes or higher, UARTn_INFTL.rx_fifo_level is set. + 0 + 6 + + + TX_FIFO_THRESH + TX FIFO Threshold Level. When the TX FIFO reaches this many bytes or higher, UARTn_INTFL.tx_fifo_level is set. + 8 + 6 + + + RTS_FIFO_THRESH + RTS threshold control. When the RX FIFO reaches this many bytes or higher, the RTS output signal is deasserted, informing the transmitting UART to stop sending data to this UART. + 16 + 6 + + + + + STATUS + Status Register. + 0x08 + 32 + read-only + + + TX_BUSY + Read-only flag indicating the UART transmit status. + 0 + 1 + read-only + + + RX_BUSY + Read-only flag indicating the UARTreceiver status. + 1 + 1 + read-only + + + PARITY + 9th Received bit state. This bit identifies the state of the 9th bit of received data. Only available for UART_CTRL.SIZE[1:0]=3. + 2 + 1 + read-only + + + BREAK + Received BREAK status. BREAKS is cleared when UART_STAT register is read. Received data input is held in spacing (logic 0) state for longer than a full word transmission time (that is, the total time of Start bit + data bits + Parity + Stop bits). + 3 + 1 + read-only + + + RX_EMPTY + Read-only flag indicating the RX FIFO state. + 4 + 1 + read-only + + + RX_FULL + Read-only flag indicating the RX FIFO state. + 5 + 1 + read-only + + + TX_EMPTY + Read-only flag indicating the TX FIFO state. + 6 + 1 + read-only + + + TX_FULL + Read-only flag indicating the TX FIFO state. + 7 + 1 + read-only + + + RX_FIFO_CNT + Indicates the number of bytes currently in the RX FIFO. + 8 + 6 + read-only + + + TX_FIFO_CNT + Indicates the number of bytes currently in the TX FIFO. + 16 + 6 + read-only + + + RX_TO + RX Timeout status. + 24 + 1 + read-only + + + + + INT_EN + Interrupt Enable Register. + 0x0C + 32 + + + RX_FRAME_ERROR + Enable for RX Frame Error Interrupt. + 0 + 1 + + + RX_PARITY_ERROR + Enable for RX Parity Error interrupt. + 1 + 1 + + + CTS_CHANGE + Enable for CTS signal change interrupt. + 2 + 1 + + + RX_OVERRUN + Enable for RX FIFO OVerrun interrupt. + 3 + 1 + + + RX_FIFO_THRESH + Enable for interrupt when RX FIFO reaches the number of bytes configured by the RXTHD field. + 4 + 1 + + + TX_FIFO_ALMOST_EMPTY + Enable for interrupt when TX FIFO has only one byte remaining. + 5 + 1 + + + TX_FIFO_THRESH + Enable for interrupt when TX FIFO reaches the number of bytes configured by the TXTHD field. + 6 + 1 + + + BREAK + Enable for received BREAK character interrupt. + 7 + 1 + + + RX_TIMEOUT + Enable for RX Timeout Interrupt. Trigger if there is no RX communication during n UART characters (n=UART_CN.RXTO). + 8 + 1 + + + LAST_BREAK + Enable for Last break character interrupt. + 9 + 1 + + + + + INT_FL + Interrupt Status Flags. + 0x10 + 32 + oneToClear + + + RX_FRAME_ERROR + FLAG for RX Frame Error Interrupt. + 0 + 1 + + + RX_PARITY_ERROR + FLAG for RX Parity Error interrupt. + 1 + 1 + + + CTS_CHANGE + FLAG for CTS signal change interrupt. + 2 + 1 + + + RX_OVERRUN + FLAG for RX FIFO Overrun interrupt. + 3 + 1 + + + RX_FIFO_THRESH + FLAG for interrupt when RX FIFO reaches the number of bytes configured by the RXTHD field. + 4 + 1 + + + TX_FIFO_ALMOST_EMPTY + FLAG for interrupt when TX FIFO has only one byte remaining. + 5 + 1 + + + TX_FIFO_THRESH + FLAG for interrupt when TX FIFO reaches the number of bytes configured by the TXTHD field. + 6 + 1 + + + BREAK + FLAG for received BREAK character interrupt. + 7 + 1 + + + RX_TIMEOUT + FLAG for RX Timeout Interrupt. Trigger if there is no RX communication during n UART characters (n=UART_CN.RXTO). + 8 + 1 + + + LAST_BREAK + FLAG for Last break character interrupt. + 9 + 1 + + + + + BAUD0 + Baud rate register. Integer portion. + 0x14 + 32 + + + IBAUD + Integer portion of baud rate divisor value. IBAUD = InputClock / (factor * Baud Rate Frequency). + 0 + 12 + + + FACTOR + FACTOR must be chosen to have IDIV>0. factor used in calculation = 128 >> FACTOR. + 16 + 2 + + + 128 + Baud Factor 128 + 0 + + + 64 + Baud Factor 64 + 1 + + + 32 + Baud Factor 32 + 2 + + + 16 + Baud Factor 16 + 3 + + + + + + + BAUD1 + Baud rate register. Decimal Setting. + 0x18 + 32 + + + DBAUD + Decimal portion of baud rate divisor value. DIV = InputClock/(factor*Baud Rate Frequency). DDIV=(DIV-IDIV)*128. + 0 + 12 + + + + + FIFO + FIFO Data buffer. + 0x1C + 32 + + + FIFO + Load/unload location for TX and RX FIFO buffers. + 0 + 8 + + + + + DMA + DMA Configuration. + 0x20 + 32 + + + TDMA_EN + TX DMA channel enable. + 0 + 1 + + + dis + DMA is disabled + 0 + + + en + DMA is enabled + 1 + + + + + RXDMA_EN + RX DMA channel enable. + 1 + 1 + + + dis + DMA is disabled + 0 + + + en + DMA is enabled + 1 + + + + + TXDMA_LEVEL + TX threshold for DMA transmission. + 8 + 6 + + + RXDMA_LEVEL + RX threshold for DMA transmission. + 16 + 6 + + + + + TX_FIFO + Transmit FIFO Status register. + 0x24 + 32 + + + DATA + Reading from this field returns the next character available at the + output of the TX FIFO (if one is available, otherwise 00h is returned). + 0 + 7 + + + + + + + + UART1 + UART 1 + 0x40043000 + + UART1 + UART1 IRQ + 15 + + + + + WDT0 + Watchdog Timer 0 + 0x40003000 + + 0x00 + 0x0400 + registers + + + WDT0 + 1 + + + + CTRL + Watchdog Timer Control Register. + 0x00 + 0x7FFFF000 + + + INT_PERIOD + Watchdog Interrupt Period. The watchdog timer will assert an interrupt, if enabled, if the CPU does not write the watchdog reset sequence to the WDT_RST register before the watchdog timer has counted this time period since the last timer reset. + 0 + 4 + + + wdt2pow31 + 2**31 clock cycles. + 0 + + + wdt2pow30 + 2**30 clock cycles. + 1 + + + wdt2pow29 + 2**29 clock cycles. + 2 + + + wdt2pow28 + 2**28 clock cycles. + 3 + + + wdt2pow27 + 2^27 clock cycles. + 4 + + + wdt2pow26 + 2**26 clock cycles. + 5 + + + wdt2pow25 + 2**25 clock cycles. + 6 + + + wdt2pow24 + 2**24 clock cycles. + 7 + + + wdt2pow23 + 2**23 clock cycles. + 8 + + + wdt2pow22 + 2**22 clock cycles. + 9 + + + wdt2pow21 + 2**21 clock cycles. + 10 + + + wdt2pow20 + 2**20 clock cycles. + 11 + + + wdt2pow19 + 2**19 clock cycles. + 12 + + + wdt2pow18 + 2**18 clock cycles. + 13 + + + wdt2pow17 + 2**17 clock cycles. + 14 + + + wdt2pow16 + 2**16 clock cycles. + 15 + + + + + RST_PERIOD + Watchdog Reset Period. The watchdog timer will assert a reset, if enabled, if the CPU does not write the watchdog reset sequence to the WDT_RST register before the watchdog timer has counted this time period since the last timer reset. + 4 + 4 + + + wdt2pow31 + 2**31 clock cycles. + 0 + + + wdt2pow30 + 2**30 clock cycles. + 1 + + + wdt2pow29 + 2**29 clock cycles. + 2 + + + wdt2pow28 + 2**28 clock cycles. + 3 + + + wdt2pow27 + 2^27 clock cycles. + 4 + + + wdt2pow26 + 2**26 clock cycles. + 5 + + + wdt2pow25 + 2**25 clock cycles. + 6 + + + wdt2pow24 + 2**24 clock cycles. + 7 + + + wdt2pow23 + 2**23 clock cycles. + 8 + + + wdt2pow22 + 2**22 clock cycles. + 9 + + + wdt2pow21 + 2**21 clock cycles. + 10 + + + wdt2pow20 + 2**20 clock cycles. + 11 + + + wdt2pow19 + 2**19 clock cycles. + 12 + + + wdt2pow18 + 2**18 clock cycles. + 13 + + + wdt2pow17 + 2**17 clock cycles. + 14 + + + wdt2pow16 + 2**16 clock cycles. + 15 + + + + + WDT_EN + Watchdog Timer Enable. + 8 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + INT_FLAG + Watchdog Timer Interrupt Flag. + 9 + 1 + oneToClear + + + inactive + No interrupt is pending. + 0 + + + pending + An interrupt is pending. + 1 + + + + + INT_EN + Watchdog Timer Interrupt Enable. + 10 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + RST_EN + Watchdog Timer Reset Enable. + 11 + 1 + + + dis + Disable. + 0 + + + en + Enable. + 1 + + + + + RST_FLAG + Watchdog Timer Reset Flag. + 31 + 1 + + read-write + + noEvent + The event has not occurred. + 0 + + + occurred + The event has occurred. + 1 + + + + + + + RST + Watchdog Timer Reset Register. + 0x04 + write-only + + + WDT_RST + Writing the watchdog counter 'reset sequence' to this register resets the watchdog counter. If the watchdog count exceeds INT_PERIOD then a watchdog interrupt will occur, if enabled. If the watchdog count exceeds RST_PERIOD then a watchdog reset will occur, if enabled. + 0 + 8 + + + seq0 + The first value to be written to reset the WDT. + 0x000000A5 + + + seq1 + The second value to be written to reset the WDT. + 0x0000005A + + + + + + + + + + diff --git a/pyocd/target/builtin/__init__.py b/pyocd/target/builtin/__init__.py index 126cc5567..a57ba1bfc 100644 --- a/pyocd/target/builtin/__init__.py +++ b/pyocd/target/builtin/__init__.py @@ -74,6 +74,7 @@ from . import target_MAX32620 from . import target_MAX32625 from . import target_MAX32630 +from . import target_MAX32660 from . import target_w7500 from . import target_s5js100 from . import target_LPC1114FN28_102 @@ -164,6 +165,7 @@ 'max32620': target_MAX32620.MAX32620, 'max32625': target_MAX32625.MAX32625, 'max32630': target_MAX32630.MAX32630, + 'max32660': target_MAX32660.MAX32660, 'mimxrt1010': target_MIMXRT1011xxxxx.MIMXRT1011xxxxx, 'mimxrt1015': target_MIMXRT1015xxxxx.MIMXRT1015xxxxx, 'mimxrt1020': target_MIMXRT1021xxxxx.MIMXRT1021xxxxx, diff --git a/pyocd/target/builtin/target_MAX32660.py b/pyocd/target/builtin/target_MAX32660.py new file mode 100644 index 000000000..83be597e5 --- /dev/null +++ b/pyocd/target/builtin/target_MAX32660.py @@ -0,0 +1,91 @@ +# pyOCD debugger +# Copyright (c) 2017-2021 Maxim Integrated (Part of Analog Devices) +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ...coresight.coresight_target import CoreSightTarget +from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap) +from ...debug.svd.loader import SVDFile + +FLASH_ALGO = { + 'load_address' : 0x20000000, + + # Flash algorithm as a hex string + 'instructions': [ + 0xe00abe00, + 0x20604989, 0x6a4a6048, 0x44484888, 0x22006082, 0x688a624a, 0x68426042, 0x4270f022, 0x68426042, + 0x5200f042, 0x68406042, 0x47706088, 0x688a497e, 0x4448487e, 0x68426042, 0x4270f022, 0x68426042, + 0x6880608a, 0x47706248, 0x47702000, 0x47702000, 0xf7ffb500, 0x4b74ffd5, 0x48746899, 0x60414448, + 0xf4216841, 0x6041417f, 0xf4416841, 0x6041412a, 0x60996841, 0xf0416841, 0x60410102, 0x60986840, + 0x01c06898, 0xf7ffd4fc, 0x6a58ffd1, 0xf04f0780, 0xd5010000, 0x20016258, 0xb500bd00, 0x035b0b43, + 0xffaef7ff, 0x600b4960, 0x4860688a, 0x60424448, 0xf4226842, 0x6042427f, 0xf4426842, 0x604242aa, + 0x608a6842, 0xf0426842, 0x60420204, 0x60886840, 0x01c06888, 0x6a48d4fc, 0xd5050780, 0x62482000, + 0xffa4f7ff, 0xbd002001, 0xffa0f7ff, 0xbd002000, 0x4613b5f8, 0x4605460c, 0xff82f7ff, 0x6881484a, + 0x444a4a4a, 0x68516051, 0x6100f021, 0x68516051, 0x0110f041, 0x68516051, 0xe00e6081, 0x68196005, + 0x68516301, 0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9, 0x1f241d1b, 0x2c041d2d, 0x06e9d301, + 0x6811d1ec, 0xd1202980, 0xd31e2c10, 0x60516881, 0xf0216851, 0x60510110, 0x60816851, 0x68196005, + 0x68596301, 0x68996341, 0x68d96381, 0x685163c1, 0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9, + 0x3c103310, 0x2c103510, 0x2c04d2e8, 0x6881d31c, 0x68516051, 0x6100f021, 0x68516051, 0x0110f041, + 0x68516051, 0x60056081, 0x63016819, 0xf0416851, 0x60510101, 0x60816851, 0x01c96881, 0x1d1bd4fc, + 0x1d2d1f24, 0xd2ee2c04, 0xa119b314, 0x91006809, 0x21006886, 0x68566056, 0x6600f026, 0x68566056, + 0x0610f046, 0x68566056, 0x466e6086, 0x7b01f813, 0x1c495477, 0xd1f91e64, 0x99006005, 0x68516301, + 0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9, 0x07806a40, 0xf7ffd503, 0x2001ff09, 0xf7ffbdf8, + 0x2000ff05, 0x0000bdf8, 0x40029000, 0x00000004, 0xffffffff, 0x00000000, 0x00000020, 0x00000000, + 0x00000000 + ], + + # Relative function addresses + 'pc_init': 0x2000004d, + 'pc_unInit': 0x20000051, + 'pc_program_page': 0x200000f5, + 'pc_erase_sector': 0x2000009f, + 'pc_eraseAll': 0x20000055, + + 'static_base' : 0x20000000 + 0x00000004 + 0x00000234, + 'begin_stack' : 0x20000448, + 'begin_data' : 0x20000000 + 0x1000, + 'page_size' : 0x400, + 'analyzer_supported' : False, + 'analyzer_address' : 0x00000000, + 'page_buffers' : [0x20001000, 0x20001400], # Enable double buffering + 'min_program_length' : 0x400, + + # Relative region addresses and sizes + 'ro_start': 0x0, + 'ro_size': 0x234, + 'rw_start': 0x234, + 'rw_size': 0x10, + 'zi_start': 0x244, + 'zi_size': 0x0, + + # Flash information + 'flash_start': 0x0, + 'flash_size': 0x40000, + 'sector_sizes': ( + (0x0, 0x2000), + ) +} + +class MAX32660(CoreSightTarget): + + VENDOR = "Maxim" + + MEMORY_MAP = MemoryMap( + FlashRegion( start=0, length=0x40000, blocksize=0x2000, is_boot_memory=True, algo=FLASH_ALGO), + RamRegion( start=0x20000000, length=0x18000), + ) + + def __init__(self, session): + super().__init__(session, self.MEMORY_MAP) + self._svd_location = SVDFile.from_builtin("max32660.svd") diff --git a/test/data/binaries/max32660evsys.bin b/test/data/binaries/max32660evsys.bin new file mode 100644 index 0000000000000000000000000000000000000000..46f06adda40c655202784354102ac98081c35270 GIT binary patch literal 31472 zcmc$`dwf$x`aeGBl1r1cO=*GBR!EbAr3DNvC<02Fq^CfEa#Im>34)LUN>_GQR`+9* z77)ZsX#}AW1*@x2xwOE_#crywyDIB$!ezHyx>kjTwx}mrk(MAWMS2ox1yUi>Dx{~7t^v1S z;<^^;*GSJI{TAtYr2j!GMsgyRB5go=5ot5h7Nl2@UPIc7gw zAXOs0hx9(uUZg4{?NQkb!@BI_e13d2KlNZcaWDo$k+$};D59gZ(fyJT--$i+{<3sGu4hF`s$jzN zv%Qmr8-C&uR|EbY1&AQkAuxbb*xT#ZM4qQ})A;;EI}ua6l(p-T_bVj)>6AwcM2Fe6 z(jxMl>k0Oz*Boy*h(^vL>bQ6tdsCI=!ZTUxvJr7hXE=Bq@oI3U!n z(<`PariO#3!oSn=(`2~&W7w%@r?I#@9wwf2)U}${=PAl75W~iw`=xEaetfVX6G+D3 z28|FqYqvn#o7%+)x*~dK=t>ANlcFVA6-wmjP-@!;GO14~?KwU_UOPIEnAKip z+;G9LPLzFa*2<Q-b4L+-3Rh2i!E2uOHLo!@XXntqTi2z-?{B+J`29_{1HWs#QF9Ry*Wl`~ z5pflLo=wCR^lBpFQkrih;$r%Y>!R)g(Si4mbcciA-2x|kM;aw68i}b0V<1L|6NcyS zFhAhsZaU`eFz5IfiMN(^lk_Z~Zn`-%oD7qG*KVrW6mMxm%Gw-nao1$6Gm8uxZ!y>s zF^(1~+w6zC+uAQhCHX3l79u4@aj9EOm93w~n_C$36HBg+_ z+S7hUxbA0CoBV9*RsR~1CFmU;Z@P~D5s@ejOYQNZHn!VWS6Ek5lt-T&c=}{XQ63?j zon+phpXeb5_BmNbVf|OvS&@rRPQ81S*}iZIk}YY4*?wt-`8g)zJ};XgcvXjKEk$`| zd(uzdfA2TjD{RPHacRZ%zWYndFRl0n_Y0AKX?cT`duch!t@x)u>FFH1zJ+0vmM`qP zTe$pO=$I~&dkS~7*J&x0<-g0QEI%!A2ad|}b5J7q5)W|@6KyXmwHG*13j9Z2(i4%q zqN6gwc04XWW%ZlOZdCuN^-o;vEP3lGdCW{%>nWKUeC&EgGc6j@?z0|ZX!Y~=^5d5& zmEHuAi4A!(QOixQ(Y_*C*Kq<%18ONwOv6r`Qvq!4jqu!|rZ|k&Ax_>8z$?k{XwPs+lm_ ze85hncT_r4NW4!rJ>G5+8R)SIwQT+(JCn^}v|aRT8%bjk@Npnf{xF$ii5>Urkd#NG z^UpOCL%c=nQ%c$`9NZgL_T@!+F1HZNU>Gu>ak5P|g<8R|rlt=5L z?`Rq2Q7y%MBj07+Iz^)imcoB&~HJAefvmrCq3D?Mdqa#mRz4*osdE#XgL zQT!t~hlA(B%GwEl9l?DdU=Oj!O) zoz#N8sAIP|`fR+^)*D`0ik6AI{R{5C@rQ$U)PF&$`)ps`{q@qeO@N#!=yp*?BaBwk7{qlAx+ijfYg^wK%2jA^MDNBW$WuzuahGAePtP`?AS(arpnv)Z&0XhNpxwA)Bi0u z0(;2Mz=&;RGVWlZ@z4=RM>2)NG*Wq{t)=yMgLk4xNcz3$CpKczie!z#E46I3P9!$1 zWydmVZ#vBLRwi0zL%PutN?O9~l1dm_2r0W=hCWH9j%tKq&~Ln#{>FPTf@=$)LBqkX z!iIJ8P9{O}eciKk9h1r6o#-Eo=Zig^eyUx=!7ou}Q8kkp?PW6u`{MBKj8rxXWiR<- z@O-N0M?aOGo}M3zVpA|71bVtOs1)>??r)^j=T77K+EdISPd7@ zDF#KYuyS}dH}emfo7so7StraP-!mx*q9&GWWe4zLtUTVEJ+>Mv?ithAmtkt2Rwls8U@7s#k~U&x;zaKb14Bw>Wm=#M=@jw+^=9^$K%_IV*b z(`jd1M7$c}4m}>?TlqtG=php}9a0J!*t<`L^wX}sxOXoYK#>%jBkf*_=T;!{QMJ7V_eVEcaXW2&L@_q3(US$ffECrOP?m` z2_DQc#;b;3Qn2SMH@ju>3+aOLP*E-BB%&$w5C86`IPtmeNFKd^raJ=ttp9iDH#VOU z67AvOeov(DB)jVW)>nM4v$ zl9H|&ZSQuy+*aB(qy3$(m)rAC`kGk-&82UD`sUl?zGnVs-#m{uasw04F+cYm$^5gI zWbhu|m?R81QPWk@cI!HuOIpvuMxvPTA}2c?z|6koPqGA&kENJtx@0$>Xpylgo~11^ z?g@cnzSmDlfO%lS=)R?b1ukrSi&ve;8!hDNywnadrz)f|p24%b<}QUP@P*@Y zNUI|mm|Z(KF^N^-?r#oac>BB3=f7+=WPjNlk2Ek_;~L=NUBj#Nt{uXpb+3FE^L*6z z+7q5QfM|12>VvxCB39T}+uH`ADNbA;@(HJY$~3U*#-EW%Pu1S`A^H-`G1`8 zk;6u2_(v=gP1xxq>WN1r}n zN=Ur)Wt+lBO31qbBS-XuLQFo}TEcRzW|rneW!}6fD}U8L6+QQTr^P8Tmv&0@>KCU|6Z9qx}wQ|IZj&bYO`X5CZM!e@1NzV~~ReBOV7U`Sfj;wz6BnE*RCZK@jXXFvmuMg z&af&n(XUybg-Fq7A%3>}iZY;7@)d@48CJf0gn!8Q+9dtHsRA#q?4h;KTpt~HUHQGl zlaw!`wv$mWc!?niyvnFv37t35o>i1{<*M@Na&`Hap~T(e53+7{XFPOOqDK{ARdM2a zwah2mxu;bnCu2za`O>sq&r};Km@H;|bYK->QWY7%Y4~TbhDf_?J;i8MwZY9~v*VNZ zvU&;Q@pwCcw9BQpCj*XHB5p6?-13UIk|@mj64w1M83`Cm49KIcW3mULt!dlPN}a() z+DDgCxUBmzh3taNUD>MQ$tco(qqL!!&zhV2p~sy#&&s>VXp{Iu2{9O43U_j~V&_zW zg>Q+{AnNT&=%M0{O$XB4pb~iBB!*t3C5iaj$4Yn9c11R|xeni8pQq!ap*iMZrE7#p zEUNFzCaS*IE?jgYsp?q%Z1fUFL6%H(cTpQ_?uNs#?Za6#UnZ`V@}_PedSXZy#v;dt zoE)*jxhu3vx36GdKD&dY7KofmCpw5?D_fQ~>NC$UfzqAZs@fX8RlSvDOzC*=pen$q zCW8A|@%dYUYpi?RItzSRtjpR&aiux~y@$DiO6o}^9iiGJzj9a9c4*D#$p}2#$UQ_{ zzmcnm7np1!j(Kq`d_|o2dAFjBq%-0-PKBG>ekoaAo-OEGzIjf$jpC?qsa(-6wdxn5SGbj>x($|G&#(YAkF8@CR4&pwOxMs2r< zTG>I=KXQG4{q5J;Dy54t5IsuixQaM|8T&up;i7|Kx1wIgB>u95WSGQ_8%gFV-#}Pe za`f)1^+v-hq5H;0x7|0`*%+zKf!f}_mL&|psLvS1h=G#Gm>(qU@DM%IR_^>cdBBx# z?CS{z-xps4y&$`bJ7fnWym$Ww__OZSqlozF24IgePxX~o;(kSct8mY{cgd&RoQIKC zj~4!j7E;P6T?H-QJin-qw&jB7$2kYN3U=3}08z!EpGR}6TJzP+ShU`V@yj`V3?y{7 zlg*rUaIJ5^j)In?c$FB-SX$I;3tEx~R0uA?@|YsTJV1Np2j8r!%gO9E(8R#)?C)o} zoA&w^^D+2!DulWw;t7X;D0V=R<%q1T9IYX0*03%uQR1_=~8~abt+26Oo79d4D?Oj*iJ@#xwmg z3$ll_=WL*oC&jI)aG4!#ic*6+j!Gz<7QZ~oN8}WhQ8kiu)3_fSN=6k2_wLt@GPp6f ztTDA4H!K#kBdFYwHBxBW$y(r%wt>mkj=wD*e-miX@1r_^hmV@dmIq59^&w+lS2p=2 znFsk>99*_vJH}A)Rj8wSWWxesq);gU?n14BhaT5Hs2xM$CpIuy+Od*cPKD&Pv;p!+ z3Gy!_$l4NG63|mA^aAi|(-ngdEj)&i&18+Y_DS>*IfORaNc(;3`(-tSUK~vC!=dQ^ z7Y->B4u5n0f5TyL39Y;3>Kz>yLf=j4Q0}L6egA9p%TDx*R*PR=%VbAO`h)h%S*Zs} zMqS%qUer`XdJGzi>ZE6igXccH1M%;G_)lNjE=rZpu4>$dHu=OpY$rRPLI73&axEKvoFDu&9rJ(Sa_ ztW!5ddU#~RV3&5dMNkUIgu5xPOpovh7f^D8z$d4tX^m2U>)@{$NO>?-;*@Ei1tfTB zUoYu~S6#m{S;!N12!q`Wxa1F0K?(Bf#C~3(e6ppKN)y@=_*+l*({-?m7xjQv+>voG zZP#EorS4DP^J2X8HW_at@4i1t$BwQ65T4VH0KOP66zbpxCVR*9pNyFi6yN)SFXl9o z_hDTrE!#hC{8yOlm!)w%JYqSGN6(pxgIBBiapS}b_^!C4;XvB1aIpSlN(D4X`v^z~ zb(YKgfd{j<%oHKI!QiI-&SX8raTXT)-%AY@|Hh|T;M0t>ZH98v-Yh}WRD7KHZmXItx*KQ~s2gJlQqP&`GV zc-jHfmP`A)wCNW^sbq5;(2n^kwCRwk{o9f+Lz>;%yYbDwK8Xo=Woo}r!i+=jN5)|@ z`H1%Z9I5x)dMUS_*w7EN>9#G*iC6KixZ}qDe(w7GgtFp$`P>kntSqN`^cUyt@<%TW z>vDEctpxAn$vnCq=RZgBs38u|}12~s4rPu@dDk#@JFF-S&n$L{?sGzwp3i3buj7bK5*EM+2M6JDR z8**+Wst3CiE&*kBug@I=n``}{@29H7@9iqit=s!}%3W1oHj}X#LP`~zIu(A4>aaqP zSyk?akn#}gnuG7D=H-CXLW z!JoG7VKJv;DqC}yp@MPj(lP0REX1V53$j3X{EaKd>`OJJL*L3BNpnY;+ByN@IgV+J zga3y5a9+*r^hF0YbI4_rlj5yI_qmf2TpBl|bY_xOUT$hXxIymLR>WJWO_kZc=y)Lyg-@!J|uk7m0Z z*ne|kV1Cs00kB`+mM_2XAGTnHSE0HT#KpZ0n$%L-nK+I@v!vnN~N`0%kxAr~`*$K_Ao#-U%z%=rlxFGyc+tm>!7;S(o?k=IUQhDS9M)e@3rBWbzi zAB2NjPe$y8@AguS+iyw7*{Edt^|6SyFjX&W$bfzNIJFK=@V710N1$!BS$!UdX7Vt# zBB_1KiIXHOzW)%k$f6S5@1G#_1`~R-1#UYl$jidPe;jLo1ZT#KK z8Q(#Q`{|tESq%P@NTwo5(|qUXi>&cy0y_eBK>%r7CMGX?o&COg||)B|$nVfe?} zA@}?BLhnyTlLmcL+>x*k+%-O>B4QEhPDJ!;R;V{v_{aY7#ECzm*5Z!dz5U}M=Oon} zmr~fv9Oi3$13MYCh998*DlEFOTg-KBo1Mw9Wr+B3NknodcR1E?Uf6Kxt7l8(?iFcN zZX?x;n@`aA!CvN%F5e z(6wqESIvy?m%L;IFP`e0a82W;UY=hFpxhz#8{LVV%Z&R4yW`(hQIFJ1=>nDAW4t7D zm5-UC4umHe05#6WWY4BHXW6GAez(E3n zty5;*u?rKf42Axs(7isAIoTKg257OiJMk^^VPKHByA11<(mifypS>p_5q1u3&<=;K zAX(jPK*UeI6g_1WCU;rp7e3=szB$J_E#C_c3OeA_$; zpP|-|{VF#zN#!c{>-R;z9^7R*nYLs2zHPU2V!A@dC#|D-Z}~?{exBao(m&F1Mu^cw zNaGYb2l`3>V=jsQ{~)EJp92=0JcX9*KA|YXh%;~jVo}0_H`<{5UYjx4HKXayxAoGu zjUV)XnS7!juQPaa*%!fMsMY4z?USsUto~8SRrFgo=I*>P3Sp!VmLmB4RfnpAEzi@t z1$nv79S)Y?o&#~>9;vpyAEdbZ>rwr?Z55h2lKbcsperq81AXBQETvF#{gBHe+_d9=?@GG0!tlo!fb0zFC{~ zRp`T-)Js37e-i#z{nIVY7Z2>cqksNG@;8$s#lcnk?)3FH_~M0*7D(ZUN2NE|<#{xP z`sp|HDqs;~rIi|w19**dMhoJ*{u9A_@ z_>34g&*`EA&$GsC7OM@1R-J?08V0H1)$79Gtj!KG zTwQ_4Un65=#)dPMUaTIwbT%m;Uh|ti^*UyJhd+L+x;(l}wM|tK?UwJ1w+^acySaF| zi}Fx7_|gfjn{DM(bFA{M%hSGWigjlfkjx{V(R#|CWAl%Bs2uGLzVI>SK?}8sPK1NL zWBoG%POOvWX|EpO+|;&Dacyp*l6p$VoPFV7{t22#t#0b8UXeED8nwz_^>c3Uw*281 zj+J4J0HdPgV0qfGYuXAH9`t#z%qe!u(*|8rx_`9}wjE|igM^%xub<({dxLMCU=5Tq z15Z$FDCbh|C5^F#gX@n)bij*i|DsnVIagAot*qzlogO{^uJ2}iTA21kb2 zaC~*eDjMmg(b+^rMUCb{YT@4M0U}(SURxmh;Q!h{G&g0)tqWRU}Cpo#Q|c?;_*%jhv@Z7QwvjVME``%Af|Dj`02M% z`V1TWy3#qJmd5^BtT$$-!U8&m=r{i2Anh?j5t4*Ncp=g}G^WZG#uOelmmaEXD{(Ln zGJyoh+yvDlp%KP~p%L)KmI{Y3nW!+US~n6!>&7Uitfsp?bniJbJVW?cm@PaO;`2HC zu&(vJ%I=tl3R(uvO4BI2`K$YD9!h?v=F`@A^T4jscDB`EcGp;1Sj*SBCaggyXi3N| zXwjR$I!oLOL!5E#amLM0s>tQfDH_M+*xqeNRkoso(XAf3xMyt(2JRul>tUM zTOfu9Z~L{kj$wrBG{_KXzCv@b&mW!WuJ91*J=yS_slNk8YtU^OhD@7U)#QS5OD zj_Rli24CDw?Ip3kWaZREn`J2DVs;Ec#FCdHmb<=V)r0m8-m61xm%0|W40)(TFo_k2 zVGi1mYn8QSnRMx<_5-DL0TU-{8(@m+;yOok5pfz;t#IPyPR77IuqP1urPehMRx2mI z9-3B3*MD(hURVF!%OSatF2urWJ=ZDpy`9iSZCRsmKXQME_w?<*ASp}H1CiXFn75e3 zqLPz=L9wLm7bTPPxz;lrg$>=raN=gX|CN^*K4_K6VqL3-QY%d1B7r%u;Z-hbK|OUz z1v^f6sIpbfsa$6Qgo}aiA}SL$vf5rRgZ4C)@{`~X7%q3SlQ_i2U*1S#$xeA zSjC?}ZV?iiqFY1Ab=+S?qU~HYl(x{bKDJC%rf@H^e%Y*;L=2HuZNG>SIQ+|I&3KH| zd*v?mc9ObeS=@FV#ZQJ((-{?DqG&HZ zrKPjCAAFMpsw=Y4t50<)giS)M+fw~LIAns5gS;xt8cgE64Rn6`d1#_w!S$Zb-FVN6 zzYgyS%wGFt^8h9u+MRMPJoN5?xKD6JOV58^pHNYrzik@DdLGu6U}T9CyBeAY7+7(> zG)JPfSZ*X{a^jJ08DPH=qTEJvS*}QCvCk1qrJe8^9k*thv~S z`PuX@LsJjU5Kh7Np1u3g8;M)e8qej~9vS@???AWG zwTI63VIh@GXWVS#&RimXyMC51B*cuqA99SzI$NW3ZpSwi#(n7fyqCo9SG+_rlP^(9Ctfj$kCaj=VnW+EakjLJztavOuU6=>z4E+f# zTQ3hdALOb5iHIvp<0XuvL)6xaP zn~1TO0!`M4YZ|Tzrf5VMEM9mvVVjqgsNIW)#<))+vS=@famT#To#eKXyD8=B+8!@e zAjYk#7-AhHBv}<(Rpm@79eLVbnJ~POxhuHu^q_x}m@kqUlDg=^n6s)6#tlovS6 z|LF{A<9;g3O0N9{a^k15I_7QFThY777)^r$cttg+cGj5Vg%3R0!skgpCY?)CZB@8p z+~+Wlr=#X0C%H%4FcNE3C?4y2=L!twj`ZGpYYw~$^e*+*r$UVT64nsyaYpbxc8f|* z2Vx_~9dh)Lsr_vyxtr0^T=?uAiVCe&D-MH@+UckZ=r|rFZj`Frb%7MZSy`*fCATWd zRlq>jFbDPkt>vZO58G z_b~UCVC>Gu+~sWdtX!-;p`Ot*bEwCID-czzPH4TXhOAfCk<8C9J7(PVu*(>=+lmrV za|A|D@eq1*;45Lo4{7|NeY-OmvjN6UY0Wn-!kF4Yb1|O3#zk zbPbryyEm8fPLuLVdqzsv;+{Fu_4%GCH_>MmVMgJO=-eqC(?4W(4E>3QNYSa>9B72q z99GcDwbIWFue%C55;=70!&ez$p#*EsElPWbU}`@D>eq*~MrCU$msrS%J9=p8Nq%T7 z_(1ffaa6;Vsa+AO(e*j;=y%K)OYRYPSpLjgh*Pl9XqMoYx^Dt7W^l!nS4-kp401O6l8XC;&-b!}x% zd4;(pDk{2SZc7wP#7$1{Uwb_wG1)~bw;Y<`u=xz43zwX8pxsqcUwq$PYc4@V{7w&Q zy^Q#6_x*G~R$rMb-LnLR@V=kc``2#9@E1QLqW@7sA-w6QzGzAXQ~L%7<)Pw^Bl~j% znFQU_&49xx=6|3*>EQeOnYSkiWA!vrpsZkAm~TWydUkg=G#AIbg|#YgkkQ2*0}l@N zQ~EICPJHS4@z8-gIKSSlbaR35{n73c(8eUrE{T(P_t6ry>-SAD6%jr-)l(uYXgY$i z%VkqPE>=AaAR~)A9y>s713EHW9sJu5H8F;0#3z)9NW{4iZOR-fs9ncA%m|V?c+8Sz zss={S8x|2RkU$QZ;{;&L1}`pIrhaSCRq4yO$e4Q?;v@?+Q}Z7aINX9)A{&1Cu^AV+a-27-h>=enK>9-VFk-arKj@lqtJ?5M{ zi|TxQrJ*gwx}#XFFhq#*{sVRlD}L8Q?U+ajM)Q=HnFl5p3^tK_;O*WI-&r;tZTM}ie^IhjSkd1_wW?&fhtQdk! zo6_<6hs}s^ZvBvfKkJ8+g?2h}qPj8*+3X4;M0eDXOfPj`Z2-u6P;O{;C4=9T2C=^U@t~&u<1Sh z))uwFSg{Q{kQFa4jrURb)IMUx$GQ*?eE#uV3gMcMT0VP)#rTq1bgWp=MfEBxt_5|L z`6#s(NcL7vU;YM(YEMpO+=U1)yN+^SIQT0> z5NTaw_2J;qlZS=-^?L=3(#!{hnyV4ce&^a9+b@FGEaA^rsZB`x_|}Qyj-~_DpILnK zn*$dSles_hvX@HbDZzgpsK*%Zm!;>b`tIux<`>y^H3Wyjj+CrIW#=i?0@t|X%#!Tha0L~MwK8C^94IvjH! z@-c?yLX)sN&yup-TebPam5Z^8bfrCWl_N=A-MXqSskx9vt7wUw6q65MV_n~5?_HUU zcTna-zN*a`g&8(O2U$w=RnC>Rk*fgPHsr7j^Vgkb)@yj@rK>8d;f*23s+T@}S;3rl$_8S+e=4NU!{Mb*Us7N^NxJ}Ikc-_^;*=0AP15~7 zLStS@r30{^q2$u~Scn8ueW2>E`3{jZ67do1N~_-fd^}QSfLU4>#Ib{Q%q@2{VRD z^o7<$_m2+&ywli+wG$jfpJVY{k7s(X?z^9GRGF`wxhVNft-{Msy35K;#wyzTcjS># zoxa-Z6HJ^~bnGA>~QQ4$RA?pC|+t1W4X$k zj`>G!Iuj7}_Dn0?heP*(FT-xHQ0)>kee<^-#u)9@PCsssw~tQF6&aaP zNKT$(M5w68i^7 z{=$p>))Iyz!}CM@?+4siid33=jVr%zpG2u z1eAQKz0!QYwb1*>5N}A+SZqIq-@k>F#%`=$+V03fB*9{fvsJ@en0@w)@(Lo&O!o9B zCUvVR)uL%V#Xc6wHvJW=Ta**)npyLVP&R3_8R^b*^tk_z^M)+4f2D{yKhjF;a-puZ z-t+b=Ho#>oZDw1}Y@fd ztukIY`Azeq-i3C($gwY(*^6%bo93YJiXHWj+ik1AUHb=8?UxnH&!5E%f20+)@aJq5 zHY;lF$8h@}FDph`6_g&6>yn08y(me`jD)Y1;<&|Zw*57vB-`qT=Np9~;J{$-YyN~x z)y4Ulsr70#7g!@<_cVLZKhg^Sbl@@Rc@g`*pSJ3+%{&2KRtxG}>j(9;C0!?m;9!-`E%)R?k(7%^gmMW@7QAO(t6?7FX*3_;L<+H z_fJLt9|0dwesE#;DiL3DP~TvgK&7Sz60`J>Q*hM`zo0KnI*c{`3doPyqrGGVjl$d~ z>=PE}PJ#`W?pcKB9=WH$`*q=yD?i91>5Cfb!2`)ja_f|&&u~XFn|=M>N4j3HH~5j{ zTf4^zsiC7#T{RTG&xe&`s;Rz*g)xUKWbfNeftMX0nNJF&?ITBgN7)*ac*dc#&%qpp zh|6#7u^a54Coi(zC+x8qY-9wj^K$YDYox|MOCD}_4HVc**kxI-;XTRUHDDOeEb^`1W^2XDLNqV-IROsyI zoB8+}bGj!>*ktA}?y+;NjCx_N?~2R3^2*nRD^_Lz>an~-0ir(FK-`Dn10=48y$sr` zG*XY?a#99ZOhN}JwBGQamlXb?s{*S*gJ`hCJ-VVXaZ+kxClZxN`*loN4p z4^u&DzZt8XDCJ$kYktsTu->j{%~QMsDkxh2`7GwJq*!aT9y3oA)_WAFcabdlLLB83 z@te?s!wZGfLQ+P=>)r2cCT@L4*%M~b!M@{szf}$ESxr_fF)Y#9-my_UWZ2ulh$BNB zyg$ISkoML)w`gB!^w>wheU5pu!T(GODbMz640tlqcS(NQi>I1MPq>(#iy>LyHz)iC zc1_ejnwj~`198w{OeS65rM!5gGsehd^WIeSb6p<1%TcRR_<)`N&Ude!jA`;sjhu%u zwJ2{g>-fPk)3K>!^EfslRbacz+U5b)J8hAUIfQ4i9pI{%0 z+Q#BrrfMZ*H}+J9$xa!(OhgZD zg{gi~ zNe|3Rj)lKq4ZH@QHS>RUwAq|B&?gR2>pZ8#ii#*kUt+2c@nk_LyXbwJvh`F9YdHY@ z*a*9k*G+{E>#=RoGP)h6GV}a;E$UAdImSe^W+uQhU0!ADCs8wmR|UWQW3$4S@HNSF zv?V*kcp_qq^SRqNv^D&*nZ5ekAyl*fkIjPLzlAg=qi{N;Yy@U;pER?7ooPFXGb$eK zRzM@NjT%F?jimL{4P3R>lse_^XZ=^8E0rF0Y;i~K!AN`Ur^(NyUz?flNt@X@+nyk7$2kQr znJ0LL*tXriD{FjW2kgDK`)edy zW3FMzxdia!2?sPdsUwTBm90dv04v^=Q(iPM;OeKr)vl(P024D*zLQqUQ#RWVO$owx@QKZ{ zy(U4(u0Ka10OqsVgZpf%?D{HdQvvGl0QEF{64ZX8w)BTchx%+OE9@z1TknIVan4%R z%Brcxd8-M1$bfQ~t6IBg&BSf953wI^g1t7o)U2v^QP`;OciCSx!CG5eochAu<=1dm z^n&3ZO_VN=`{@jq5yOl&Bl9x$LB}e+T6jsAiCfI0z4Y09_(z|3B&UT*Mat1*r*yGM z6vQB2XCGEh-q6CxHnuSG4PALRE4yC2xv?wn$M?Ek{PES}-(&wdh0BTxHmf=Uv$O^4 z%d11M+=}v;3D9<<<(}~ZH$gFhdaV>b#yGIUQOtViwQdSy1O!^jvmPrZ2p11Y|CG-( zG3VSIM8#=j{QuO|G_m%qF)Qx>5fAOe@6KwnWVg%l6FblHSxWC&MT!TjyeAP#>2vB$ zoWmy2b}BRC&uvk>0=jeDOWsofo;#a#`6p!^=f;NENjjwG{rtp>=a`B8P&&`5*KtMp z>nufE)|D1fXq(L9k?4zC62S)-*C%FG>jkVHnu+x|Gk4-V19~<}$w{pD6n@x2q!a+p{0*uAc}+nOB31hhN}Xl3(5+SR#8^Ftb3HMlBq zRpQFy%CFXr``9c)xfUOjJpktyFs)o13pk{eiy!Reu{M&^yr+EAnlda#co_ReG2bd) zhE?yYQ@Gak9MifU+{3Z^{qu)%!hXMA_yBWFW@=W zUMP(?s#&`wl%^wwx@IoBs5Vv4rxNi+?4Q2AjHG`@Ik1*VWxDtupSdetC?`1Q34XJ7 zZ(TDZT=kEwKDBx}PDaoZrOgu3YiZw5doQ+d6QT-t30V0-NUGd^q=xP0=cQ=IYuIjC zE;IJc8opI#ea#Hs%*X1E-0!Z;tj4(1oiHmKl&vqa%zY0n`7ER@ikyUo@u!6qIyM~K z6NX0Bu~|xZGkIV`*Z*wjy%?GS%bwTH4W$xzQ;0c~>eww@0u4A#GJozk;k5>>SX{t+ z2&>H2ibeE};c;ieorAF1BwdCv?I_?74^Vn^hD6F`hO2%Kr<5pIMo@S!DRk#|DD)Q= zDb+RxqjE>h2i003UJS?bo@ABMn54ipiZ@o^>Q=JGOA7A%A|-((F88W!MR~~@trNG+2u#9boGwLf)OwYBVp1Qd$OjchIDfVt~JH-zPX?!e#Z6s z_FM{y;&ikg-qR0`L4$BtGlt*Wx}_L(1i@_0G|L$-W>iz^)Kz+tCwl#g;I4{ z;OJDFTxh{rchrNIfru3?bx%0mRu>`;8O{fh3m5%}W>23RGGGK@9@{GyKEwH7v|N6# zsqL2r%Mu1rX?BTBIJS(l{SiEQ9llcFmKRf~5q#)PdFvB{ex0(*Tn1bVTAmofH!LE% zV7)R-*A)uq?q2HCMK3e44N(CZThLMxqZSCxKtNkG=<74|RFebf;~`dkA?@o#ahQ^8 zEzykfeiqP0y-I1WO0u(E$~JS0ik$`SpnE6Y%++^Ab+y%h*vfPL9Ki<$vajb-o;ZOz z6Fp;uoDjFWd-W4``4WtBZ7Ln*CQ#!={(cGrC-L3$W^QLaPNnh~kEja~zbmXuit{Cm zPZ-Ozma;NLE}6z%W+h#BH4{6z?t0lA^m1qE?9d3w%IN5?Br^)sg$adpZ{AD7R$+*b zt{Gzk2}%Xzo3eF1&osVm{vQhEDR9c;2LMHLb|RwtdBPV$nXpq}(qxTKUhe;jq+NnM z2nRRcx`_DcrI2>^X&WfWBa#~RuApe$5cA+L#G4GzizgvZ4-V_v(iY$KQag5I=T{`1 zX-)dcs;|wtGtg4H>dRlhrbWq2v(l9XpX6emA(sbuIjA+_J}aGdOz@D@Kjx0GYOI4V z9>J;uK+liIDzf+l4(rGwYsg-`4ef{em_T^k)nv$aewjg)T~ zFZN8tlTnOosMju#*TcbWx9D6x+Xovz^_eAPWP*p)Gy3W!Sxad7!QQ=X+2$nJu%`m! zVLKAtB%3rdEM-0j2cNygxYtsSM)W^Y-tUvsom*{L7G)tyLmubes%tfCC%`t2e203N zQvn^zz_Mg57fsC8XO^4-zd%nE3>L^p8g)`&E~`PDhn^EPX!PQGE!F`ZG~4PylN3tl z?PmByqa1A;9iQi)L!>{dRZi9;YJbAv#ZHJ(u*Es+BKu{mGTh=A1zjm`)nM7aqivA$ zj3QR@5$?r`SMY=pl8<@Wdc+DYzqB@ymeD%zhp#^zd?=(48mm#`WCP&kTX$pj?7|G9 z%aJuKErYb(<&4Mg-yEEv7vk(gY;)|7+RAYXA3YhhH<);f!pQHLKz$Xj4X3l{(HX*< zmox@S6>@hZpL!`D5vZT?k&trOC9GdPW24Y_+DMxc@J(%;(_sGs@@^@9HO6GrnCw)J zQ^Bs#+mmch;B>!tQ0H>NCrlRBV%Dgyzlc_Gz&P773-dhQxK^5bthJf2i#Qx)`!M=k zvE`?9&{BSV!4F&u;(+}>J%FN%YL&r`VAxq!I(r#t0kqlqwg^1sSPHQh{d1OjLkXRk z$XnGKSS{#@nU_9Ce0b48*flA8TNz4esfI1q!`214Q-aeEJ|dvCTC_Trw)Yd;Cbalt z2W|03j#S!W?GoBzEp2g~LuxUbYLBxmvCpyXv2V4hfE(sPk{4vBZMu#2C2jRx7L)J= z@C*lE>7L;?*g4x$yTz8AF|8gpKZ{d`xO6{l*M~p(miBS~x6oIQg@bFl?-oY;(K`c% z3SZc^+UEFYOMJj_L#_G#|Hl39`uGgP#ec{9p8EgBdZ^S-8Sv#HO4ilfRB11@eQQ5u zOUp>}@;JK+wjo#FG!d2*^?lLO;owJIlZ80{Btb6a(VoWHO~_OD_kcoL?brBC(57YR zy;|@^BKv2*p^#g$w9vAI#}f%8atE8JU%8T&HhxP>8`CmUBc-{^sBNwUIUFqM8kj%V zuP^)}l&E_GzbU#zK-96j{-Y+uJ*1lMMNKGcTuRFtrLx@NrvLt4@2B<8yP?f2Iyz{U zOfZNfhv=%!>9*7>+V8b1V=Q-h<7N#tr3lN94}racJXK6S5l7u(Vdbn5$CFfiktT+% zQ3KOD8u>kKy<@G|SH)PdZ!mtO%|ap9((^B1jv~#rOgO5TiJeT?JD*|e3eLJ^Ng>_| zwVCj&cY6&ukMRm@U}|egenRmbthC}T8=m5Q~-q)N6x4iPJkQCL`is3+pAF8JwYw?L-dsl(b;x!If&MOWg6^rV+Kyd!I?hL3)CW0Mv=@*m?7No_XgkWzEcu(IP$tZrTTH`zi_bDqZ8)a z=zURM5&A$mGfp5QV2xr0${1UU&=q{5oAy`H?fxo~`it5~UxggI<&YeN>_CivE94J< zD+v*vbD^m@X@{S*4;ww=$}f&rr7+`ZnI~=5kfEqI2%kBP)V|2KZh&VMC)Spo4{b`_ zYf@R*F{g2NDx`E?^_>sBJa&lXbm--{fBCtazyH-Kz`Wq6aC(F1a9Uy5c*VaCaqJ1; zW$jV6n+fy=Ew^~VUJcnWih5-EiD?;$kOcC28kNR=E1cJ;6TnA?%5Z?22(8n73cBNd%uKv0(T|itk{TSV&aBt=2B>ANCsjG76s(&dKPRstW6D!P@ zE?J#gO`kr()6!+V!5n(#oc^-n9p~K_c0bh{oYvDDe4x{aQ=2O{@06W6PxEr5eFr4- zv~T&-gRrkdiO3W!(HjhfC{{<#Uq}48H+T;G1n7uT&!$zV)Gpj(MIk zYZ^vb9W5;DY~i>P?8AVkkm-NEcl0KYu{KU51qCh4*n$p6%8Zt{v8>l*UV@Vq;-!2p zdqzuqHtU^hUfOEmXw5`?-|;SF{e^%oCi*hl71N{dlC?!n2IjhAaA)a~wLh8@)A(LM zCyPNTCIk6B>7E%PtD*1e66dsj`%6Y7xrz&zYsOp&5X}p%ggo8KFx595!;-NsfDF@| zo2FdcV#LXelQmSg6tr094!G%vbzsK{yut-a&*GNZn%OwJF{hQElhYz&jJ1nfW@t<~ zIe^1VPWMn)&1biYIFHf82(T!+obB+|WLE~%++o-)noF82Wdr6+nok27%r`VR&z}uw z8j-`+dp-^5G?;?~bX}UBE1sMd4O3>in)Hr`L2V-~#@aP4JU?u5$|cRXm@Ak?^QQq( z*TswYrrp%Sk<>1}J-3AeR;L4+zPGTJkxSo>6pXbfA=2yXz_1jvF1g3q;fTXID-X=_ z)Ba5K&>kJ>z0=<^FYRyMGepqhyR9u;>`=@hiH^2>{$nzqiA>8$_vCGq;5sPa)@YV1)|&OJx%M zNNQ0YIIzR9j1Oe-fPwuTgqe+T{d4cNsBfyrWTs^vL>V)B@f)U~Wh@d~Gz|WhVv7qN zHm7KtQylt*)T53M&+m+e;+?^LuV1u%deOG^O@)UIm0^9&BoWiLdywxAEg z#VuPOo6=E#0J~7w#VuDfi5@Pi|H`&rVbQxB>Ee2HU9?ZQUQI;ACU{#~AwmiYgxy|a&P;=JSdKw%0=fbdq#IQwiTp@|(x zXxW-Jge0JBdGYc>gT#bsGnGNm+ASTbluoG2+D$MrN`itEX5Pvs6<87|BNAn>0Ul?j zC1AkBf$aM`KS_D7w10-GKGMhE^K;ML{qFZX&)swP+;g|GK@ms34_MEIV4@B6Nk2u& zTpq7v=D?D9F{Te> zhc7&AaifGXoyj~8WqIOYS&m?+sp0Y*af8;0l-4Oo9pyjmM#;Tx_3JfnD6$bXN;&PO zb*QBr%jvLO1eNr`<HsOgUY~M)rkB-Om43|sM&UN*8R@#-(qh%$!5VdEj zT^Tjju0$2P6gJ%mTXDC{`zx!h_nuCNQY3V}W^gHUg#EF_7-1fsb_G09=x(>@ZP_mud?r^KIUD;2#lro*K~z_M{QE=51ir5x4D$_l)%@CfQ( zT->2f%BK;X=xrklIZ7w<$2KnQ}MLNB{JcoAZ_i?(Lzt zslKnu#iqb#d+-mvy5DI=Nx-;$|DI@iV#Tb-xfwm7BAug2vx2&lI7{PfT$+b7HI6zo zHL*kUM=URjRLVp;whHRmo9^yM;T{^rtn!B6qo3$KH@-soryHX(7~Txd*QJ&^b7Uv2 zlh$~aY&D?YwoJD9US{8Yi648k{k^$eAVbWkc^X42_ zc7V+^c-f3Nx_+4rQdm#;fH{4%J!i2i_hdJ8vQPZ&Pb!DBQw<89AY z>v%_6H$C%X-HS5=OTY-5_oN*XuDZnr$)$`>j~ClR!P}Gj-oXgtaff9^BkM!P-Vdz9 zxyKSeZ#;^tZut;G-$Z*If)em2C2utVtDh9_~ZdFn9E>o=e74c+fChN4EnUK{c} zxYHiVZ4kP4;yI9RPmH6bpf{APgP zbbUgA`_Am?+3q3VZS{nGw;8@u?N;}Xg$#WE;5%~__V35t!oo$i&tu^^N3HKT+G*N8 zePC*zcr#8$YweDvPmeqEW1Lg^#NNsBVzS^a#)iX!31op~Sjf<~)h_huZ!n}ZbQi?n zS~4ZQYpdPS{rg^F*c2PyiLCJsc`r@Tq``_{8@%VE9Gsoh)n-qG6)z(9hDo3J4UstC z|IB4Sj&TODr^k1^zIUJBD6I2qA|ugyydhw+DBz30qr5G_vyu@ zr4^=ED=JHirqJ^Sg%vgB)utt;X+^_X)+#yQl9OX!k!zY(Ws09x^pa`Dw5plI)pRc< zXBE_zlqEY#D$Cw1s4Py-vaFgrXa4+@q5|m1;0l$LzB#e=O6HI5OqdWIHK@>*?v;epb4eLWB@`* zQLV{rDz7QaM^qKQ%N90qox7w$P2jGmtVRNIh3rVnPs`45XIy^9npoV*8OdVs>LTylA7{&3RdX9)~7EZ{iq?f3@Tk|O!)}RW3kHIKDr|K}fPFUP)Y z`^6Vo`oH#gY`2$P`m->r)zT=9|cd|8oeoGH^Ufqsy*d#|!31Jco9EYtpwK_$GRw4fl%& z_lx@N2=zHiXeK8YO5|~hphPLB8cJ;B)Io`QP6L!^?WIjK;>%&|dwmpN+fbpp@f-Z zgA$pXTqu#pDS{HEoN6esky8gH>NyQiqLI@CC3bUKphP?87?e1{IRhomb1p-P>zn|T z=;!DnvHw8i;Qdo3B94;)B@#KQP{PcyL5WOGE|kdQ6hVnnPBoO+$f<)8^_&JM(a33n z61zDqP@3 Ezc5NK8UO$Q literal 0 HcmV?d00001 From 756251366295004a9c4b55781d42746f7f62af3e Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 23 Sep 2021 16:54:04 -0500 Subject: [PATCH 003/123] Board IDs: add some missing NXP boards. (#1219) --- pyocd/board/board_ids.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pyocd/board/board_ids.py b/pyocd/board/board_ids.py index ca431da84..dde7089d2 100644 --- a/pyocd/board/board_ids.py +++ b/pyocd/board/board_ids.py @@ -46,8 +46,6 @@ def __init__(self, name, target, binary): "0223": BoardInfo( "FRDM-KE02Z", "mke02z64vlh4", None, ), "0224": BoardInfo( "FRDM-K28F", "k28f15", "l1_k28f.bin", ), "0225": BoardInfo( "FRDM-K32W042", "k32w042s", "l1_k32w042s.bin", ), - "0242": BoardInfo( "MIMXRT1010-EVK", "mimxrt1010", "l1_mimxrt1010-evk.bin",), - "0243": BoardInfo( "MIMXRT1015-EVK", "mimxrt1015", "l1_mimxrt1015-evk.bin",), "0226": BoardInfo( "MIMXRT1020-EVK", "mimxrt1020", "l1_mimxrt1020-evk.bin",), "0227": BoardInfo( "MIMXRT1050-EVKB", "mimxrt1050_hyperflash", "l1_mimxrt1050-evkb_hyperflash.bin",), "0228": BoardInfo( "Rapid-IoT-K64F", "k64f", None, ), @@ -59,13 +57,21 @@ def __init__(self, name, target, binary): "0234": BoardInfo( "Rapid-IoT-KW41Z", "kw41z4", "l1_kw41z4.bin", ), "0235": BoardInfo( "LPC54018IoTModule", "lpc54018jet180", None, ), "0236": BoardInfo( "LPCXpresso55S69", "lpc55s69", "lpcxpresso55s69.bin", ), + "0237": BoardInfo( "FRDM-K32L3A6", "k32l3a60vpj1a", None, ), "0238": BoardInfo( "MIMXRT1024-EVK", "mimxrt1024", "evkmimxrt1024.bin", ), "0239": BoardInfo( "FRDM-K32L2B3", "k32l2b3", "l1_frdm_k32l2b3.bin", ), "0240": BoardInfo( "FRDM-K64F", "k64f", "l1_k64f.bin", ), + "0241": BoardInfo( "TWR-KM35Z75M", "mkm35z512vll7", None, ), + "0242": BoardInfo( "MIMXRT1010-EVK", "mimxrt1010", "l1_mimxrt1010-evk.bin",), + "0243": BoardInfo( "MIMXRT1015-EVK", "mimxrt1015", "l1_mimxrt1015-evk.bin",), "0244": BoardInfo( "MIMXRT1170-EVK", "mimxrt1170_cm7", "l1_rt1170.bin", ), "0245": BoardInfo( "IBMEthernetKit", "k64f", "l1_k64f.bin" ), + "0246": BoardInfo( "MIMXRT1160-EVK", "mimxrt1160_cm7", None, ), "0250": BoardInfo( "FRDM-KW24D512", "kw24d5", "l1_kw24d5.bin" ), "0251": BoardInfo( "FRDM-KW36", "kw36z4", "l1_kw36z.bin", ), + "0252": BoardInfo( "FRDM-KW38", "kw38z4", None, ), + "0253": BoardInfo( "USB-KW38", "kw38z4", None, ), + "0254": BoardInfo( "KW38-ER-RD", "kw38z4", None, ), "0260": BoardInfo( "FRDM-KL26Z", "kl26z", "l1_kl26z.bin", ), "0261": BoardInfo( "FRDM-KL27Z", "kl27z4", "l1_kl27z.bin", ), "0262": BoardInfo( "FRDM-KL43Z", "kl43z4", "l1_kl26z.bin", ), From 0e1b14f30ca636727ac7e3373431bd88b85ad9fb Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 21 Sep 2021 16:09:40 -0500 Subject: [PATCH 004/123] cmdline: handle float values of session options. (#1216) --- pyocd/utility/cmdline.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pyocd/utility/cmdline.py b/pyocd/utility/cmdline.py index d13c9c391..3e56bd5c9 100644 --- a/pyocd/utility/cmdline.py +++ b/pyocd/utility/cmdline.py @@ -115,6 +115,12 @@ def convert_session_options(option_list: Iterable[str]) -> Dict[str, Any]: except ValueError: LOG.warning("invalid value for option '%s'", name) continue + elif info.type is float: + try: + value = float(value) + except ValueError: + LOG.warning("invalid value for option '%s'", name) + continue options[name] = value return options From 36e77a8cee193357f201954c6924526954ba70ab Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 21 Sep 2021 16:53:48 -0500 Subject: [PATCH 005/123] Board: log description of exception during uninit. (#1217) --- pyocd/board/board.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyocd/board/board.py b/pyocd/board/board.py index 5af925595..0bc4ab12e 100644 --- a/pyocd/board/board.py +++ b/pyocd/board/board.py @@ -111,8 +111,8 @@ def uninit(self): resume = self.session.options.get('resume_on_disconnect') self.target.disconnect(resume) self._inited = False - except exceptions.Error: - LOG.error("link exception during target disconnect:", exc_info=self._session.log_tracebacks) + except exceptions.Error as err: + LOG.error("link exception during target disconnect: %s", err, exc_info=self._session.log_tracebacks) @property def session(self): From b19e20a01449bf78c00e88d5a7a68804ef463fdb Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 24 Sep 2021 14:56:42 -0500 Subject: [PATCH 006/123] JLinkProbe: only connect if jlink.device is set; non-interactive option. (#1220) - The JLink DLL will still allow CoreSight operations if not connected, so in most cases there is no need for pyocd to connect. - Added a jlink.non_interactive option that disabled JLink dialogs. --- docs/options.md | 31 +++++++++++++++++++++---------- pyocd/probe/jlink_probe.py | 22 ++++++++++++++++++---- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/docs/options.md b/docs/options.md index 944ab0236..fa3d4703e 100644 --- a/docs/options.md +++ b/docs/options.md @@ -532,22 +532,33 @@ These session options are available when the SEGGER J-Link debug probe plugin is Option NameTypeDefaultDescription -jlink.power +jlink.device +str +No default + +If this option is set to a supported J-Link device name, then the J-Link will be asked connect +using this name. Otherwise, when unset, the J-Link is configured for only the low-level CoreSight operations +required by pyOCD. Ordinarily, it does not need to be set. + + +jlink.non_interactive bool True -Enable target power when connecting via a JLink probe, and disable power when disconnecting. -Default is True. +Controls whether the J-Link DLL is allowed to present UI dialog boxes and its control +panel. Note that dialog boxes will actually still be visible, but the default option +will be chosen automatically after 5 seconds. + +Note: This has the effect of also silencing dialog boxes that appear when +updating firmware / to confirm updating firmware. -jlink.device -str -No default +jlink.power +bool +True -Set the device name passed to the J-Link. Normally, it doesn't matter because pyOCD does has its own -device support, and so when this option is unset, "Cortex-M4" is used just to supply something -valid. (For non-M4-based devices, you might see a warning about unexpected core type if you look at -the J-Link logs, but this is harmless. J-Link does not support a "none" or "unknown" device type.) +Enable target power when connecting via a JLink probe, and disable power when disconnecting. +Default is True. diff --git a/pyocd/probe/jlink_probe.py b/pyocd/probe/jlink_probe.py index d5aac7c6f..9b43198fa 100644 --- a/pyocd/probe/jlink_probe.py +++ b/pyocd/probe/jlink_probe.py @@ -160,6 +160,11 @@ def capabilities(self): def open(self): try: + # Configure UI usage. We must do this here rather than in the ctor because the ctor + # doesn't have access to the session. + if self.session.options.get('jlink.non_interactive'): + self._link.disable_dialog_boxes() + self._link.open(self._serial_number_int) self._is_open = True @@ -212,8 +217,12 @@ def connect(self, protocol=None): self._link.set_tif(iface) if self.session.options.get('jlink.power'): self._link.power_on() - device_name = self.session.options.get('jlink.device') or "Cortex-M4" - self._link.connect(device_name) + + # Connect if a device name was supplied. + device_name = self.session.options.get('jlink.device') + if device_name is not None: + self._link.connect(device_name) + self._link.coresight_configure() self._protocol = protocol except JLinkException as exc: @@ -383,9 +392,14 @@ def options(self): """! @brief Returns J-Link probe options.""" return [ OptionInfo('jlink.device', str, None, - "Set the device name passed to the J-Link. Normally, it doesn't matter because pyOCD " - "has its own device support, and \"Cortex-M4\" is used."), + "If this option is set to a supported J-Link device name, then the J-Link will be asked connect " + "using this name. Otherwise, the J-Link is configured for only the low-level CoreSight operations " + "required by pyOCD. Ordinarily, it does not need to be set."), OptionInfo('jlink.power', bool, True, "Enable target power when connecting via a JLink probe, and disable power when " "disconnecting. Default is True."), + OptionInfo('jlink.non_interactive', bool, True, + "Controls whether the J-Link DLL is allowed to present UI dialog boxes and its control " + "panel. Note that dialog boxes will actually still be visible, but the default option " + "will be chosen automatically after 5 seconds. Default is True."), ] From 35f47b4ab4fc4fdd89215445162d94b5d9daf805 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 24 Sep 2021 17:58:15 -0500 Subject: [PATCH 007/123] CortexM: fixed missing format args for warnings from post reset checks. (#1221) --- pyocd/coresight/cortex_m.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyocd/coresight/cortex_m.py b/pyocd/coresight/cortex_m.py index da4373d6a..8a1d934c8 100644 --- a/pyocd/coresight/cortex_m.py +++ b/pyocd/coresight/cortex_m.py @@ -738,9 +738,9 @@ def _post_reset_core_accessibility_test(self): else: # If dhcsr is None then we know that we never were able to read the register. if dhcsr is None: - LOG.warning("Core #%d is not accessible after reset") + LOG.warning("Core #%d is not accessible after reset", self.core_number) else: - LOG.debug("Core #%d did not come out of reset within timeout") + LOG.debug("Core #%d did not come out of reset within timeout", self.core_number) def reset(self, reset_type=None): """! @brief Reset the core. From d524a25da61107a52bf706065c6a06967b6c56e5 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 19 Sep 2020 17:35:45 -0500 Subject: [PATCH 008/123] Docs: prepare docs for site. - Add yaml front matter. - Set class attr on some tables in docs. - Created new GDB setup and installing docs pages from readme content. - Link to latest Python 3 logging docs instead of 3.8. - Changed links to work with Jekyll. --- docs/README.md | 4 +- docs/adding_new_targets.md | 7 +-- docs/api_examples.md | 6 +-- docs/architecture.md | 5 ++- docs/automated_tests.md | 5 ++- docs/command_reference.md | 4 +- docs/configuration.md | 5 ++- docs/configuring_logging.md | 13 +++--- docs/developers_guide.md | 9 ++-- docs/gdb_setup.md | 54 +++++++++++++++++++++++ docs/how_to_build.md | 5 ++- docs/installing.md | 83 +++++++++++++++++++++++++++++++++++ docs/installing_on_non_x86.md | 9 ++-- docs/multicore_debug.md | 5 ++- docs/options.md | 9 ++-- docs/python_api.md | 7 +-- docs/remote_probe_access.md | 7 +-- docs/remote_probe_protocol.md | 5 ++- docs/security.md | 6 ++- docs/target_support.md | 9 ++-- docs/terminology.md | 5 ++- docs/user_scripts.md | 5 ++- 22 files changed, 211 insertions(+), 56 deletions(-) create mode 100644 docs/gdb_setup.md create mode 100644 docs/installing.md diff --git a/docs/README.md b/docs/README.md index 75df7f63e..3afbc6ca1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,4 +1,6 @@ -# pyOCD Documentation +--- +title: Contents +--- ## Table of Contents diff --git a/docs/adding_new_targets.md b/docs/adding_new_targets.md index 26d2853ba..ef3f54b84 100644 --- a/docs/adding_new_targets.md +++ b/docs/adding_new_targets.md @@ -1,11 +1,12 @@ -Adding a new builtin target -=========================== +--- +title: Adding a new built-in target +--- This guide describes how to manually add support for a new target and/or board to pyOCD. In most cases you do not need to add a builtin target anymore, and can use pyOCD's support for CMSIS Device Family Packs. -For background information, review the [architecture overview](architecture.md) document first. The +For background information, review the [architecture overview]({% link _docs/architecture.md %}) document first. The [CMSIS Pack documentation](https://arm-software.github.io/CMSIS_5/Pack/html/index.html) may also be helpful. diff --git a/docs/api_examples.md b/docs/api_examples.md index ce020f2b1..4767d55f9 100644 --- a/docs/api_examples.md +++ b/docs/api_examples.md @@ -1,6 +1,6 @@ -Python API Examples -=================== - +--- +title: Python API examples +--- ### Hello World example code diff --git a/docs/architecture.md b/docs/architecture.md index 87a8812e2..ee587de4b 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -1,5 +1,6 @@ -Architecture -============ +--- +title: Architecture +--- ## Object graph diff --git a/docs/automated_tests.md b/docs/automated_tests.md index 6b50c7d08..e1eb7311c 100644 --- a/docs/automated_tests.md +++ b/docs/automated_tests.md @@ -1,5 +1,6 @@ -Automated Tests -=============== +--- +title: Automated tests +--- Both unit tests and functional tests are used to verify pyOCD. diff --git a/docs/command_reference.md b/docs/command_reference.md index 7c615ccd3..9a7020e2b 100644 --- a/docs/command_reference.md +++ b/docs/command_reference.md @@ -1,8 +1,6 @@ --- title: Command reference --- -Command reference -================= PyOCD has a simple command processor that is accessible from the console via the commander subcommand, or from gdb as remote monitor commands. @@ -23,7 +21,7 @@ precedence even when it is a prefix of multiple other commands. All commands ------------ - +
diff --git a/docs/configuration.md b/docs/configuration.md index 99a94e125..009cce77e 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,5 +1,6 @@ -Configuration -============= +--- +title: Configuration +--- This guide documents how to configure pyOCD. diff --git a/docs/configuring_logging.md b/docs/configuring_logging.md index cf9ed480e..b7b40546f 100644 --- a/docs/configuring_logging.md +++ b/docs/configuring_logging.md @@ -1,10 +1,11 @@ -Configuring Logging -=================== +--- +title: Configuring logging +--- ## Overview pyOCD provides extensive control over log output. It uses the standard Python -[logging](https://docs.python.org/3.8/library/logging.html) package for all its logging. There are several ways +[logging](https://docs.python.org/3/library/logging.html) package for all its logging. There are several ways to set log levels, both globally and with precise control. - Verbosity controls @@ -121,7 +122,7 @@ package supports loading a configuration dictionary to control almost all aspect The `logging` session option is used to specify the logging configuration. It can be set to either a logging configuration dictionary or the path to a YAML file containing a configuration dictionary. Usually it is easiest to include the configuration directly in a `pyocd.yaml` config file. See the -[configuration documentation](configuration.md) for more on config files. The file path is most +[configuration documentation]({% link _docs/configuration.md %}) for more on config files. The file path is most useful when passing the `logging` option via the command line, since you can't provide a dictionary this way. @@ -152,9 +153,9 @@ for all of pyOCD. ### Full control The full schema for the logging configuration dictionary is documented in the -[logging.config module documentation](https://docs.python.org/3.8/library/logging.config.html#logging-config-dictschema). +[logging.config module documentation](https://docs.python.org/3/library/logging.config.html#logging-config-dictschema). The logging module's -[advanced tutorial](https://docs.python.org/3.8/howto/logging.html#logging-advanced-tutorial) +[advanced tutorial](https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial) has a good introduction to the features and log output flow, so you can better understand the configuration schema. diff --git a/docs/developers_guide.md b/docs/developers_guide.md index df8fdfb3f..1422c0aab 100644 --- a/docs/developers_guide.md +++ b/docs/developers_guide.md @@ -1,5 +1,6 @@ -pyOCD Developers' Guide -======================= +--- +title: Developers' Guide +--- ## Setup @@ -47,8 +48,8 @@ $ pip install -e .[test] **Step 3.** Develop -See the [porting guide](adding_new_targets.md) for how to add new devices. Of course, we welcome -all improvements and changes. See the [contributor statement](../CONTRIBUTING.md) for some guidelines. +See the [porting guide]({% link _docs/adding_new_targets.md %}) for how to add new devices. Of course, we welcome +all improvements and changes. See the [contributor statement](https://github.com/pyocd/pyOCD/blob/main/CONTRIBUTING.md) for some guidelines. Normally you should work from the `develop` branch. See the [branch policy](#branch-configuration-policy) below for more information about branches. diff --git a/docs/gdb_setup.md b/docs/gdb_setup.md new file mode 100644 index 000000000..a230048ab --- /dev/null +++ b/docs/gdb_setup.md @@ -0,0 +1,54 @@ +--- +title: GDB setup +--- + +Most users will want to set up the GNU GDB debugger in order to use pyOCD for debugging applications. Either +the command-line GDB or a full IDE can be used. + + +Standalone GDB server +--------------------- + +After you install pyOCD via pip or setup.py, you will be able to execute the following in order to +start a GDB server powered by pyOCD: + +``` +$ pyocd gdbserver +``` + +You can get additional help by running ``pyocd gdbserver --help``. + +Example command line GDB session showing how to connect to a running `pyocd gdbserver` and load +firmware: + +``` +$ arm-none-eabi-gdb application.elf + + target remote localhost:3333 + load + monitor reset +``` + +The `pyocd gdbserver` subcommand is also usable as a drop in place replacement for OpenOCD in +existing setups. The primary difference is the set of gdb monitor commands. + + +Recommended GDB and IDE setup +----------------------------- + +The recommended toolchain for embedded Arm Cortex-M development is [GNU Arm +Embedded](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm), +provided by Arm. GDB is included with this toolchain. + +For [Visual Studio Code](https://code.visualstudio.com), the +[cortex-debug](https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug) plugin is available +that supports pyOCD. + +The GDB server also works well with [Eclipse Embedded CDT](https://projects.eclipse.org/projects/iot.embed-cdt), +previously known as [GNU MCU/ARM Eclipse](https://gnu-mcu-eclipse.github.io/). It fully supports pyOCD with +an included pyOCD debugging plugin. + +To view peripheral register values either the built-in Eclipse Embedded CDT register view can be used, or +the Embedded System Register Viewer plugin can be installed. The latter can be installed from inside +Eclipse adding `http://embsysregview.sourceforge.net/update` as a software update server URL +under the "Help -> Install New Software..." menu item. diff --git a/docs/how_to_build.md b/docs/how_to_build.md index a93a2caa4..accc5d942 100644 --- a/docs/how_to_build.md +++ b/docs/how_to_build.md @@ -1,5 +1,6 @@ -How to Build pyOCD into Single Executable File -============================================== +--- +title: How to build binary +--- This manual provides a step-by-step guide on how to build a single file executable using diff --git a/docs/installing.md b/docs/installing.md new file mode 100644 index 000000000..ae8f25d1c --- /dev/null +++ b/docs/installing.md @@ -0,0 +1,83 @@ +--- +title: Installing +--- + +The latest stable version of pyOCD may be installed via [pip](https://pip.pypa.io/en/stable/index.html) +as follows: + +``` +$ python3 -mpip install -U pyocd +``` + +_Note: depending on your system, you may need to use `python` instead of `python3`._ + +The latest pyOCD package is available [on PyPI](https://pypi.python.org/pypi/pyOCD/) as well as +[on GitHub](https://github.com/pyocd/pyOCD/releases). + +To install the latest prerelease version from the HEAD of the master branch, you can do +the following: + +``` +$ python3 -mpip install --pre -U git+https://github.com/pyocd/pyOCD.git +``` + +You can also install directly from the source by cloning the git repository and running: + +``` +$ python3 setup.py install +``` + +Note that, depending on your operating system, you may run into permissions issues running these commands. +You have a few options here: + +1. Under Linux, run with `sudo -H` to install pyOCD and dependencies globally. On macOS, installing with sudo + should never be required, although sometimes permissions can become modified such that installing without + using sudo fails. +3. Specify the `--user` option to install local to your user account. +4. Run the command in a [virtualenv](https://virtualenv.pypa.io/en/latest/) + local to a specific project working set. + +For notes about installing and using on non-x86 systems such as Raspberry Pi, see the +[relevant documentation]({% link _docs/installing_on_non_x86.md %}). + + +libusb installation +------------------- + +[pyusb](https://github.com/pyusb/pyusb) and its backend library [libusb](https://libusb.info/) are +dependencies on all supported operating systems. pyusb is a regular Python package and will be +installed along with pyOCD. However, libusb is a binary shared library that does not get installed +automatically via pip dependency management. + +How to install libusb depends on your OS: + +- macOS: use Homebrew: `brew install libusb` +- Linux: should already be installed. +- Windows: download libusb from [libusb.info](https://libusb.info/) and place the .dll file in your Python + installation folder next to python.exe. Make sure to use the same 32- or 64-bit architecture as + your Python installation. The latest release is [available on GitHub](https://github.com/libusb/libusb/releases); + download the .7z archive under Assets. Use the library from the VS2019 folder in the archive. + + +udev rules on Linux +------------------- + +On Linux, particularly Ubuntu 16.04+, you must configure udev rules to allow pyOCD to access debug +probes from user space. Otherwise you will need to run pyOCD as root, using sudo, which is very +highly discouraged. (You should _never_ run pyOCD as root on any OS.) + +To help with this, example udev rules files are included with pyOCD in the +[udev](https://github.com/pyocd/pyOCD/tree/master/udev) folder. The +[readme](https://github.com/pyocd/pyOCD/tree/master/udev/README.md) in this folder has detailed +instructions. + + +Target support +-------------- + +See the [target support documentation]({% link _docs/target_support.md %}) for information on how to check if +the MCU(s) you are using have built-in support, and how to install support for additional MCUs via +CMSIS-Packs. + + + diff --git a/docs/installing_on_non_x86.md b/docs/installing_on_non_x86.md index 8cd5de8f2..097e3a874 100644 --- a/docs/installing_on_non_x86.md +++ b/docs/installing_on_non_x86.md @@ -1,5 +1,6 @@ -Installing on non-x86 platforms -=============================== +--- +title: Installing on non-x86 platforms +--- pyOCD itself is pure Python and should run on any platform with a modern Python installation. However, it has several dependencies with binary backends that cause trouble on non-x86 platforms or @@ -11,7 +12,7 @@ single-board computers. ## cmsis-pack-manager The main dependency that causes trouble is -[cmsis-pack-manager](https://github.com/armmbed/cmsis-pack-manager/). It has a backend written in +[cmsis-pack-manager](https://github.com/pyocd/cmsis-pack-manager/). It has a backend written in the Rust language to greatly improve performance. Unfortunately, wheels are not available for non-x86 systems. And, worse, the Rust compiler runs out of memory and dies when attempting to build on small platforms such as the Raspberry Pi. (Cross-compilation may be an option but has not been @@ -21,7 +22,7 @@ The good news is that cmsis-pack-manager is optional for pyOCD. Although it is l dependency in `setup.py` (so the vast majority of users benefit from it), pyOCD can run without it with minimal loss of functionality. The `pack` subcommand is disabled, which removes automatic CMSIS-Pack download and management. But you can still use CMSIS-Packs manually with the `--pack` -option, as described in [Target support](target_support.md). +option, as described in [Target support]({% link _docs/target_support.md %}). To install pyOCD on such a system, you need to use the `pip install --no-deps` option. This will install the core pyOCD package only, so all additional requirements must be manually installed. diff --git a/docs/multicore_debug.md b/docs/multicore_debug.md index 2c6fd0948..20cbb43ce 100644 --- a/docs/multicore_debug.md +++ b/docs/multicore_debug.md @@ -1,5 +1,6 @@ -Multicore Debug -=============== +--- +title: Multicore debug +--- pyOCD supports debugging multicore devices. It does this by serving one gdb server per core, to which you connect independant gdb instances. This is the most reliable method of debugging multicore diff --git a/docs/options.md b/docs/options.md index fa3d4703e..27c8f11de 100644 --- a/docs/options.md +++ b/docs/options.md @@ -1,5 +1,6 @@ -Session options list -================= +--- +title: Session options list +--- _**Note:** The names of these options are expected to change before the 1.0 release of pyOCD, so they will be better normalized and grouped._ @@ -10,7 +11,7 @@ Note that the `project_dir`, `no_config`, and `config` options must come from ei argument or the _options_ parameter passed to the `Session` constructor due to how early they are processed. The consequence of this is that these options cannot be set in a YAML config file. -
CommandArgumentsDescription
+
@@ -227,7 +228,7 @@ with new data. diff --git a/docs/python_api.md b/docs/python_api.md index e4b685857..f0cfac451 100644 --- a/docs/python_api.md +++ b/docs/python_api.md @@ -1,5 +1,6 @@ -Introduction to pyOCD API -========================= +--- +title: Introduction to the API +--- Using pyOCD's Python API, you have extreme flexibility and precise control, and can do anything SWD allows, at the expense of more complexity compared to `pyocd commander`. Using pyOCD like this is @@ -8,7 +9,7 @@ board bringup, or automated testing. This document assumes familiarity with the Arm CoreSight debug architecture. -See the [architecture](architecture.md) documentation for an overview of the classes and how +See the [architecture]({% link _docs/architecture.md %}) documentation for an overview of the classes and how they are connected. ## Connecting diff --git a/docs/remote_probe_access.md b/docs/remote_probe_access.md index a92f418dc..076f13233 100644 --- a/docs/remote_probe_access.md +++ b/docs/remote_probe_access.md @@ -1,5 +1,6 @@ -Remote probe access -=================== +--- +title: Remote probe access +--- PyOCD provides a server and client for sharing and accessing debug probes across a TCP/IP network connection. This can be used to provide shared debug access for multiple developers, to @@ -62,7 +63,7 @@ would be `--uid=remote:localhost:1234`. **Important:** Currently you must always specify the target type for the remote device, even in cases where the target type is automatically detected when you use the probe directly. To do this, -pass the `--target` argument followed by the target type. See [Target support](target_support.md) +pass the `--target` argument followed by the target type. See [Target support]({% link _docs/target_support.md %}) for more information about target types. Note that remote probes will not appear in the list when you run `pyocd list --probes`. diff --git a/docs/remote_probe_protocol.md b/docs/remote_probe_protocol.md index 092274fc6..521d5ba04 100644 --- a/docs/remote_probe_protocol.md +++ b/docs/remote_probe_protocol.md @@ -1,5 +1,6 @@ -Remote Probe Protocol -===================== +--- +title: Remote probe protocol +--- PyOCD provides a server and client for sharing and accessing debug probes across a TCP/IP network connection. This document describes the protocol design, available commands, and semantics. diff --git a/docs/security.md b/docs/security.md index 15e6a2cba..c45fc8c87 100644 --- a/docs/security.md +++ b/docs/security.md @@ -1,4 +1,6 @@ -# Security and Protection Features +--- +title: Security and protection +--- Many targets support some way of disabling JTAG/SWD access or protecting the flash from read-back or write. In the text below, this is called a "locked" target, though each silicon vendor tends to have their @@ -47,7 +49,7 @@ features, pyOCD can perform this unlock procedure for you. ***WARNING:** Unlocking a locked device will erase all data on the chip!* -You can add the option `auto_unlock` to your [configuration](/configuration.md): +You can add the option `auto_unlock` to your [configuration]({% link _docs/configuration.md %}): ```bash (venv) ~/devel/contrib/pyocd$ pyocd commander --target nrf52 -O auto_unlock diff --git a/docs/target_support.md b/docs/target_support.md index f2514f0cc..0c18aaa49 100644 --- a/docs/target_support.md +++ b/docs/target_support.md @@ -1,5 +1,6 @@ -Target support -============== +--- +title: Target support +--- Through both built-in support and CMSIS-Packs, pyOCD supports nearly every Cortex-M MCU that is available on the market. @@ -48,7 +49,7 @@ type name. This argument must be passed every time you run `pyocd` with a subcom to the target. Another method is to set the `target_override` session option in a `pyocd.yaml` configuration file. The -[configuration file documentation](configuration.md) describes how to do this for a specific debug +[configuration file documentation]({% link _docs/configuration.md %}) describes how to do this for a specific debug probe instead of globally. @@ -172,7 +173,7 @@ passed for every invocation in addition to the other arguments. For instance, to you might execute `pyocd gdbserver --pack=Keil.STM32L4xx_DFP.2.2.0.pack`. Note that you can pass multiple `--pack` arguments to pyOCD, which might be useful in a scripted execution of pyOCD. -For a more permanent solution, use a [`pyocd.yaml` configuration file](configuration.md). In the +For a more permanent solution, use a [`pyocd.yaml` configuration file]({% link _docs/configuration.md %}). In the config file, set the `pack` session option to either a single .pack file path or a list of paths. Now when you run the `pyocd` tool, it will automatically pick up the pack file(s) to use. diff --git a/docs/terminology.md b/docs/terminology.md index c85c6173a..634fbf573 100644 --- a/docs/terminology.md +++ b/docs/terminology.md @@ -1,5 +1,6 @@ -Terminology -=========== +--- +title: Terminology +--- These are the key terms used by pyOCD and its documentation. diff --git a/docs/user_scripts.md b/docs/user_scripts.md index 9d8f9d57f..2ec04a2fa 100644 --- a/docs/user_scripts.md +++ b/docs/user_scripts.md @@ -1,5 +1,6 @@ -User Scripts -============ +--- +title: User Scripts +--- ## Introduction From 927922a90adb5e00f8adffbdba3046bc38a65eda Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 6 Oct 2021 14:20:55 -0500 Subject: [PATCH 009/123] Readme: update with new community resources. --- README.md | 102 ++++++++++++++++++++++-------------------------------- 1 file changed, 41 insertions(+), 61 deletions(-) diff --git a/README.md b/README.md index 999d06f26..315c46eab 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,15 @@ pyOCD ===== +[\[pyocd.io\]](https://pyocd.io/) [\[Docs\]](https://pyocd.io/docs) [\[Slack\]](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) [\[Mailing list\]](https://groups.google.com/g/pyocd) +
Option NameTypeDefaultDescription
No default Either a dictionary with logging configuration, or a path to a separate yaml logging configuration -file. See the logging configuration documentation for details of how to +file. See the logging configuration documentation for details of how to use this option.
+ + + + + diff --git a/pyocd/core/options.py b/pyocd/core/options.py index e294243ee..0392117c6 100644 --- a/pyocd/core/options.py +++ b/pyocd/core/options.py @@ -133,6 +133,10 @@ "Whether to enable SWV printf output over the semihosting console. Requires the " "swv_system_clock option to be set. The SWO baud rate can be controlled with the " "swv_clock option."), + OptionInfo('debug.status_fault_retry_timeout', float, 1.0, + "Duration in seconds that a failed target status check will be retried before an error is raised. " + "Only applies while the target is running after a resume operation in the debugger and pyOCD is waiting " + "for it to halt again."), OptionInfo('gdbserver_port', int, 3333, "Base TCP port for the gdbserver."), OptionInfo('persist', bool, False, diff --git a/pyocd/gdbserver/gdbserver.py b/pyocd/gdbserver/gdbserver.py index 977fd769d..35a916ee8 100644 --- a/pyocd/gdbserver/gdbserver.py +++ b/pyocd/gdbserver/gdbserver.py @@ -23,6 +23,7 @@ import six import io from xml.etree.ElementTree import (Element, SubElement, tostring) +from typing import (Dict, Optional) from ..core import exceptions from ..core.target import Target @@ -31,6 +32,7 @@ from ..utility.conversion import (hex_to_byte_list, hex_encode, hex_decode, hex8_to_u32le) from ..utility.compatibility import (iter_single_bytes, to_bytes_safe, to_str_safe) from ..utility.server import StreamServer +from ..utility.timeout import Timeout from ..trace.swv import SWVReader from ..utility.sockets import ListenerSocket from .syscall import GDBSyscallIOHandler @@ -39,7 +41,6 @@ from .symbols import GDBSymbolProvider from ..rtos import RTOS from . import signals -from . import gdbserver_commands # lgtm[py/unused-import] from .packet_io import ( checksum, ConnectionClosedException, @@ -48,6 +49,9 @@ from ..commands.execution_context import CommandExecutionContext from ..commands.commander import ToolExitException +# Import this module, even though it's not used below, to ensure the gdbserver commands get loaded. +from . import gdbserver_commands # noqa + LOG = logging.getLogger(__name__) TRACE_MEM = LOG.getChild("trace.mem") @@ -318,17 +322,17 @@ def run(self): args=(self.GDBSERVER_START_LISTENING_EVENT, self)) notify_timer.start() - while not self.shutdown_event.isSet() and not self.detach_event.isSet(): + while not self.shutdown_event.is_set() and not self.detach_event.is_set(): connected = self.abstract_socket.connect() if connected != None: self.packet_io = GDBServerPacketIOThread(self.abstract_socket) break - if self.shutdown_event.isSet(): + if self.shutdown_event.is_set(): self._cleanup() return - if self.detach_event.isSet(): + if self.detach_event.is_set(): continue # Make sure the target is halted. Otherwise gdb gets easily confused. @@ -345,14 +349,14 @@ def run(self): def _run_connection(self): while True: try: - if self.shutdown_event.isSet(): + if self.shutdown_event.is_set(): self._cleanup() return - if self.detach_event.isSet(): + if self.detach_event.is_set(): break - if self.packet_io.interrupt_event.isSet(): + if self.packet_io.interrupt_event.is_set(): if self.non_stop: self.target.halt() self.is_target_running = False @@ -376,18 +380,18 @@ def _run_connection(self): except ConnectionClosedException: break - if self.shutdown_event.isSet(): + if self.shutdown_event.is_set(): self._cleanup() return - if self.detach_event.isSet(): + if self.detach_event.is_set(): break if self.non_stop and packet is None: sleep(0.1) continue - if len(packet) != 0: + if packet is not None and len(packet) != 0: # decode and prepare resp resp, detach = self.handle_message(packet) @@ -572,6 +576,8 @@ def _get_resume_step_addr(self, data): # Csig[;addr] elif data[0:1] in (b'C', b'S'): addr = int(data[1:].split(b';')[1], base=16) + else: + raise exceptions.DebugError("invalid step address received from gdb") return addr def resume(self, data): @@ -586,8 +592,12 @@ def resume(self, data): val = b'' - while True: - if self.shutdown_event.isSet(): + # Timeout used only if the target starts returning faults. The is_running property of this timeout + # also serves as a flag that a fault occurred and we're attempting to retry. + fault_retry_timeout = Timeout(self.session.options.get('debug.status_fault_retry_timeout')) + + while fault_retry_timeout.check(): + if self.shutdown_event.is_set(): self.packet_io.interrupt_event.clear() return self.create_rsp_packet(val) @@ -598,14 +608,32 @@ def resume(self, data): self.lock.acquire() LOG.debug("receive CTRL-C") self.packet_io.interrupt_event.clear() - self.target.halt() - val = self.get_t_response(forceSignal=signals.SIGINT) + + # Be careful about reading the target state. If we previously got a fault (the timeout + # is running) then ignore the error. In all cases we still return SIGINT. + try: + self.target.halt() + val = self.get_t_response(forceSignal=signals.SIGINT) + except exceptions.TransferError as e: + # Note: if the target is not actually halted, gdb can get confused from this point on. + # But there's not much we can do if we're getting faults attempting to control it. + if not fault_retry_timeout.is_running: + LOG.error('Exception reading target status: %s', e, exc_info=self.session.log_tracebacks) + val = ('S%02x' % signals.SIGINT).encode() break self.lock.acquire() try: - if self.target.get_state() == Target.State.HALTED: + state = self.target.get_state() + + # If we were able to successfully read the target state after previously receiving a fault, + # then clear the timeout. + if fault_retry_timeout.is_running: + LOG.info("Target control reestablished.") + fault_retry_timeout.clear() + + if state == Target.State.HALTED: # Handle semihosting if self.enable_semihosting: was_semihost = self.semihost.check_and_handle_semihost_request() @@ -618,14 +646,28 @@ def resume(self, data): LOG.debug("state halted; pc=0x%08x", pc) val = self.get_t_response() break + except exceptions.TransferError as e: + # If we get any sort of transfer error or fault while checking target status, then start + # a timeout running. Upon a later successful status check, the timeout is cleared. In the event + # that the timeout expires, this loop is exited and an error raised to gdb. + if not fault_retry_timeout.is_running: + LOG.warning("Transfer error while checking target status; retrying: %s", e, + exc_info=self.session.log_tracebacks) + fault_retry_timeout.start() except exceptions.Error as e: try: self.target.halt() except exceptions.Error: pass LOG.warning('Exception while target was running: %s', e, exc_info=self.session.log_tracebacks) + # This exception was not a transfer error, so reading the target state should be ok. val = ('S%02x' % self.target_facade.get_signal_value()).encode() break + + # Check if we exited the above loop due to a timeout after a fault. + if fault_retry_timeout.did_time_out: + LOG.error("Timed out while attempting to reestablish control over target.") + val = ('S%02x' % signals.SIGSEGV).encode() return self.create_rsp_packet(val) @@ -686,14 +728,16 @@ def v_cont(self, cmd): if not ops: return self.create_rsp_packet(b"OK") + # Maps the thread unique ID to an action char (byte). + thread_actions: Dict[int, Optional[bytes]] = {} + if self.is_threading_enabled(): - thread_actions = {} threads = self.thread_provider.get_threads() for k in threads: thread_actions[k.unique_id] = None currentThread = self.thread_provider.get_current_thread_id() else: - thread_actions = { 1 : None } # our only thread + thread_actions[1] = None # our only thread currentThread = 1 default_action = None @@ -745,7 +789,7 @@ def v_cont(self, cmd): self.is_target_running = False self.send_stop_notification(forceSignal=0) else: - LOG.error("Unsupported v_cont action '%s'" % thread_actions[1:2]) + LOG.error("Unsupported v_cont action '%s'" % thread_actions[1]) def flash_op(self, data): ops = data.split(b':')[0] From e96350b0f9cf67e72f67d7c0c6afc785e93f274e Mon Sep 17 00:00:00 2001 From: Tim Gates Date: Thu, 25 Nov 2021 09:12:54 +1100 Subject: [PATCH 028/123] docs: Fix a few typos (#1245) There are small typos in: - pyocd/coresight/sdc600.py - pyocd/probe/debug_probe.py - pyocd/probe/picoprobe.py - test/gdb_test_script.py Fixes: - Should read `retrieving` rather than `retreiving`. - Should read `repeatedly` rather than `repreatedly`. - Should read `passing` rather than `pasing`. - Should read `lengths` rather than `leghts`. - Should read `defined` rather than `defiend`. --- pyocd/coresight/sdc600.py | 2 +- pyocd/probe/debug_probe.py | 4 ++-- pyocd/probe/picoprobe.py | 2 +- test/gdb_test_script.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyocd/coresight/sdc600.py b/pyocd/coresight/sdc600.py index 4cde6014c..495fe9df1 100644 --- a/pyocd/coresight/sdc600.py +++ b/pyocd/coresight/sdc600.py @@ -57,7 +57,7 @@ class LinkPhase(Enum): """! @brief COM Port link phases.""" ## Hardware-defined link phase. PHASE1 = 1 - ## Software-defiend link phase. + ## Software-defined link phase. PHASE2 = 2 class Register: diff --git a/pyocd/probe/debug_probe.py b/pyocd/probe/debug_probe.py index de827ed6a..e95fb96c7 100644 --- a/pyocd/probe/debug_probe.py +++ b/pyocd/probe/debug_probe.py @@ -116,7 +116,7 @@ def get_all_connected_probes(cls, unique_id=None, is_explicit=False): @param cls The class instance. @param unique_id String. Optional partial unique ID value used to filter available probes. May be used by the - probe to optimize retreiving the probe list; there is no requirement to filter the results. + probe to optimize retrieving the probe list; there is no requirement to filter the results. @param is_explicit Boolean. Whether the probe type was explicitly specified in the unique ID. This can be used, for instance, to specially interpret the unique ID as an IP address or domain name when the probe class was specifically requested but not for general lists @@ -315,7 +315,7 @@ def assert_reset(self, asserted): """! @brief Assert or de-assert target's nRESET signal. Because nRESET is negative logic and usually open drain, passing True will drive it low, and - pasing False will stop driving so nRESET will be pulled up. + passing False will stop driving so nRESET will be pulled up. """ raise NotImplementedError() diff --git a/pyocd/probe/picoprobe.py b/pyocd/probe/picoprobe.py index 87f29898a..1b729dea4 100644 --- a/pyocd/probe/picoprobe.py +++ b/pyocd/probe/picoprobe.py @@ -439,7 +439,7 @@ def swd_sequence(self, sequences): @return A 2-tuple of the response status, and a sequence of bytes objects, one for each input sequence. The length of the bytes object is ( + 7) / 8. Bits are in LSB first order. """ - # Init leghts to pack and cmd queue + # Init lengths to pack and cmd queue reads_lengths = [] self._link.start_queue() # Take each sequence 'seq' in sequences diff --git a/test/gdb_test_script.py b/test/gdb_test_script.py index d2a567ebc..77084d3f4 100644 --- a/test/gdb_test_script.py +++ b/test/gdb_test_script.py @@ -498,7 +498,7 @@ def interrupt_task(): gdb.post_event(interrupt_task) -# Run the main test by repreatedly calling the generator +# Run the main test by repeatedly calling the generator # This must only run on GDB's queue def run_generator(event): global ignore_events From 87671193ae3fa00fa8b37db044f80fe2a1d1c4ae Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 23 Oct 2021 16:38:33 -0500 Subject: [PATCH 029/123] editorconfig: enable trim_trailing_whitespace, adjust file settings. --- .editorconfig | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/.editorconfig b/.editorconfig index b33dd8777..2285061ca 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,14 +1,27 @@ +# EditorConfig: https://editorconfig.org/ + # top-most EditorConfig file root = true # Unix-style newlines with a newline ending every file +# 4 space indentation for all files +# No trailing whitespace [*] -end_of_line = lf -insert_final_newline = true charset = utf-8 - -# 4 space indentation for Python -[*.py] indent_style = space indent_size = 4 -trim_trailing_whitespace = false +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +max_line_length = 120 + +# Exceptions to the rule... + +[Makefile] +indent_style = tab + +[*.bat] +end_of_line = crlf + +[*.{yaml,yml,xml,svd}] +indent_size = 2 From 7db4eeff6aac92fec0c50cba5a78f3bc47962e17 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 26 Nov 2021 12:22:28 -0600 Subject: [PATCH 030/123] cleanup: remove trailing whitespace from all source files. --- pyocd/__main__.py | 24 +- pyocd/board/board.py | 36 +-- pyocd/board/mbed_board.py | 6 +- pyocd/cache/memory.py | 14 +- pyocd/cache/register.py | 8 +- pyocd/commands/base.py | 22 +- pyocd/commands/commander.py | 46 ++-- pyocd/commands/commands.py | 142 +++++----- pyocd/commands/execution_context.py | 92 +++---- pyocd/commands/repl.py | 14 +- pyocd/commands/values.py | 18 +- pyocd/core/core_registers.py | 44 +-- pyocd/core/exceptions.py | 20 +- pyocd/core/helpers.py | 40 +-- pyocd/core/memory_interface.py | 10 +- pyocd/core/memory_map.py | 146 +++++----- pyocd/core/options_manager.py | 30 +-- pyocd/core/plugin.py | 34 +-- pyocd/core/session.py | 136 +++++----- pyocd/core/soc_target.py | 32 +-- pyocd/core/target.py | 22 +- pyocd/core/target_delegate.py | 14 +- pyocd/coresight/ap.py | 254 +++++++++--------- pyocd/coresight/component.py | 14 +- pyocd/coresight/component_ids.py | 12 +- pyocd/coresight/core_ids.py | 2 +- pyocd/coresight/coresight_target.py | 40 +-- pyocd/coresight/cortex_m.py | 192 ++++++------- pyocd/coresight/cortex_m_core_registers.py | 10 +- pyocd/coresight/cortex_m_v8m.py | 38 +-- pyocd/coresight/dap.py | 176 ++++++------ pyocd/coresight/discovery.py | 52 ++-- pyocd/coresight/dwt.py | 34 +-- pyocd/coresight/fpb.py | 8 +- pyocd/coresight/generic_mem_ap.py | 8 +- pyocd/coresight/gpr.py | 22 +- pyocd/coresight/itm.py | 10 +- pyocd/coresight/rom_table.py | 112 ++++---- pyocd/coresight/sdc600.py | 102 +++---- pyocd/coresight/tpiu.py | 14 +- pyocd/debug/breakpoints/manager.py | 10 +- pyocd/debug/context.py | 36 +-- pyocd/debug/elf/decoder.py | 4 +- pyocd/debug/elf/elf.py | 20 +- pyocd/debug/semihost.py | 34 +-- pyocd/debug/svd/loader.py | 2 +- pyocd/flash/builder.py | 112 ++++---- pyocd/flash/eraser.py | 38 +-- pyocd/flash/file_programmer.py | 38 +-- pyocd/flash/flash.py | 66 ++--- pyocd/flash/loader.py | 62 ++--- pyocd/gdbserver/context_facade.py | 46 ++-- pyocd/gdbserver/gdbserver.py | 44 +-- pyocd/gdbserver/gdbserver_commands.py | 12 +- pyocd/gdbserver/packet_io.py | 4 +- pyocd/probe/aggregator.py | 14 +- pyocd/probe/cmsis_dap_probe.py | 90 +++---- pyocd/probe/debug_probe.py | 122 ++++----- pyocd/probe/jlink_probe.py | 58 ++-- pyocd/probe/pydapaccess/cmsis_dap_core.py | 20 +- pyocd/probe/pydapaccess/dap_access_api.py | 34 +-- .../probe/pydapaccess/dap_access_cmsis_dap.py | 54 ++-- pyocd/probe/pydapaccess/dap_settings.py | 2 +- pyocd/probe/pydapaccess/interface/common.py | 12 +- .../pydapaccess/interface/hidapi_backend.py | 6 +- .../probe/pydapaccess/interface/interface.py | 2 +- .../pydapaccess/interface/pyusb_backend.py | 22 +- .../pydapaccess/interface/pyusb_v2_backend.py | 24 +- pyocd/probe/shared_probe_proxy.py | 14 +- pyocd/probe/stlink/constants.py | 16 +- pyocd/probe/stlink/stlink.py | 88 +++--- pyocd/probe/stlink/usb.py | 38 +-- pyocd/probe/stlink_probe.py | 58 ++-- pyocd/probe/swj.py | 86 +++--- pyocd/probe/tcp_client_probe.py | 88 +++--- pyocd/probe/tcp_probe_server.py | 118 ++++---- pyocd/rtos/argon.py | 30 +-- pyocd/rtos/common.py | 2 +- pyocd/rtos/freertos.py | 8 +- pyocd/rtos/rtx5.py | 18 +- pyocd/rtos/zephyr.py | 8 +- pyocd/subcommands/base.py | 20 +- pyocd/subcommands/commander_cmd.py | 6 +- pyocd/subcommands/erase_cmd.py | 14 +- pyocd/subcommands/gdbserver_cmd.py | 28 +- pyocd/subcommands/json_cmd.py | 18 +- pyocd/subcommands/list_cmd.py | 20 +- pyocd/subcommands/load_cmd.py | 16 +- pyocd/subcommands/pack_cmd.py | 86 +++--- pyocd/subcommands/reset_cmd.py | 14 +- pyocd/subcommands/server_cmd.py | 16 +- .../target/builtin/cypress/target_CY8C64xx.py | 8 +- .../target/builtin/cypress/target_CY8C6xx7.py | 2 +- pyocd/target/builtin/target_CC3220SF.py | 4 +- pyocd/target/builtin/target_HC32L07x.py | 2 +- pyocd/target/builtin/target_HC32L110.py | 4 +- pyocd/target/builtin/target_HC32L13x.py | 2 +- pyocd/target/builtin/target_HC32L19x.py | 2 +- .../target/builtin/target_LPC1114FN28_102.py | 2 +- .../builtin/target_LPC11U24FBD64_401.py | 2 +- pyocd/target/builtin/target_LPC1768.py | 10 +- pyocd/target/builtin/target_LPC4088FBD144.py | 2 +- pyocd/target/builtin/target_LPC4330.py | 2 +- .../target/builtin/target_LPC54114J256BD64.py | 2 +- .../builtin/target_LPC54608J512ET180.py | 2 +- .../target/builtin/target_LPC824M201JHI33.py | 2 +- pyocd/target/builtin/target_MAX32600.py | 2 +- pyocd/target/builtin/target_MAX32625.py | 2 +- .../target/builtin/target_MIMXRT1015xxxxx.py | 2 +- .../target/builtin/target_MIMXRT1176xxxxx.py | 6 +- pyocd/target/builtin/target_MPS3_AN522.py | 2 +- pyocd/target/builtin/target_MPS3_AN540.py | 2 +- pyocd/target/builtin/target_RP2040.py | 28 +- pyocd/target/builtin/target_RTL8195AM.py | 2 +- pyocd/target/builtin/target_STM32F051T8.py | 2 +- pyocd/target/builtin/target_STM32F103RC.py | 2 +- pyocd/target/builtin/target_STM32F439xx.py | 6 +- pyocd/target/builtin/target_STM32L031x6.py | 2 +- pyocd/target/builtin/target_STM32L432xx.py | 2 +- pyocd/target/builtin/target_STM32L475xx.py | 2 +- pyocd/target/builtin/target_lpc800.py | 2 +- pyocd/target/builtin/target_musca_a1.py | 4 +- pyocd/target/builtin/target_musca_b1.py | 16 +- pyocd/target/builtin/target_nRF51822_xxAA.py | 2 +- pyocd/target/builtin/target_ncs36510.py | 2 +- pyocd/target/builtin/target_w7500.py | 2 +- pyocd/target/family/flash_kinetis.py | 10 +- pyocd/target/family/target_lpc5500.py | 42 +-- pyocd/target/pack/cmsis_pack.py | 150 +++++------ pyocd/target/pack/flash_algo.py | 12 +- pyocd/target/pack/pack_target.py | 4 +- pyocd/tools/gdb_server.py | 6 +- pyocd/tools/lists.py | 36 +-- pyocd/trace/events.py | 48 ++-- pyocd/trace/sink.py | 18 +- pyocd/trace/swo.py | 44 +-- pyocd/trace/swv.py | 46 ++-- pyocd/utility/autoflush.py | 10 +- pyocd/utility/cmdline.py | 8 +- pyocd/utility/columns.py | 18 +- pyocd/utility/concurrency.py | 2 +- pyocd/utility/conversion.py | 10 +- pyocd/utility/graph.py | 38 +-- pyocd/utility/hex.py | 16 +- pyocd/utility/mask.py | 20 +- pyocd/utility/notification.py | 30 +-- pyocd/utility/progress.py | 24 +- pyocd/utility/sequencer.py | 70 ++--- pyocd/utility/server.py | 12 +- pyocd/utility/sockets.py | 12 +- pyocd/utility/strings.py | 14 +- pyocd/utility/timeout.py | 26 +- src/gdb_test_program/linker_script.ld | 4 +- src/gdb_test_program/main.c | 10 +- test/automated_test.py | 50 ++-- test/basic_test.py | 6 +- test/commander_test.py | 10 +- test/commands_test.py | 14 +- test/concurrency_test.py | 30 +-- test/cortex_test.py | 20 +- test/debug_context_test.py | 22 +- test/flash_loader_test.py | 22 +- test/flash_test.py | 8 +- test/gdb_test.py | 2 +- test/gdb_test_script.py | 2 +- test/import_all.py | 6 +- test/probeserver_test.py | 6 +- test/speed_test.py | 20 +- test/test_util.py | 12 +- test/unit/mockcore.py | 2 +- test/unit/test_cmdline.py | 16 +- test/unit/test_compatibility.py | 4 +- test/unit/test_conversion.py | 22 +- test/unit/test_exceptions.py | 8 +- test/unit/test_graph.py | 12 +- test/unit/test_memcache.py | 6 +- test/unit/test_memory_map.py | 66 ++--- test/unit/test_mockcore.py | 2 +- test/unit/test_notification.py | 4 +- test/unit/test_options_manager.py | 6 +- test/unit/test_pack.py | 24 +- test/unit/test_regcache.py | 18 +- test/unit/test_rom_table.py | 54 ++-- test/unit/test_sdc600.py | 2 +- test/unit/test_semihosting.py | 30 +-- test/unit/test_sequencer.py | 14 +- test/unit/test_strings_utility.py | 18 +- test/unit/test_timeout.py | 6 +- test/user_script_test.py | 6 +- 189 files changed, 2638 insertions(+), 2638 deletions(-) diff --git a/pyocd/__main__.py b/pyocd/__main__.py index 140252c8b..15fe028e8 100644 --- a/pyocd/__main__.py +++ b/pyocd/__main__.py @@ -51,9 +51,9 @@ class PyOCDTool(SubcommandBase): """! @brief Main class for the pyocd tool and subcommands. """ - + HELP = "PyOCD debug tools for Arm Cortex devices" - + ## List of subcommand classes. SUBCOMMANDS = [ CommanderSubcommand, @@ -67,7 +67,7 @@ class PyOCDTool(SubcommandBase): ServerSubcommand, RTTSubcommand, ] - + ## @brief Logging level names. LOG_LEVEL_NAMES = { 'debug': logging.DEBUG, @@ -91,21 +91,21 @@ def build_parser(self) -> argparse.ArgumentParser: parser.add_argument('-V', '--version', action='version', version=__version__) parser.add_argument('--help-options', action='store_true', help="Display available session options.") - + self.add_subcommands(parser) return parser def _setup_logging(self) -> None: """! @brief Configure the logging module. - + The quiet and verbose argument counts are used to set the log verbosity level. - + Log level for specific loggers are also configured here. """ level = max(1, self._args.command_class.DEFAULT_LOG_LEVEL + self._get_log_level_delta()) logging.basicConfig(level=level, format=LOG_FORMAT) - + # Handle settings for individual loggers from --log-level arguments. for logger_setting in self._args.log_level: try: @@ -131,7 +131,7 @@ def invoke(self) -> int: else: self._parser.print_help() return 0 - + def __call__(self, *args: Any, **kwds: Any) -> "PyOCDTool": """! @brief Hack to allow the root command object instance to be used as default command class.""" return self @@ -140,13 +140,13 @@ def run(self, args: Optional[Sequence[str]] = None) -> int: """! @brief Main entry point for command line processing.""" try: self._args = self._parser.parse_args(args) - + self._setup_logging() - + # Pass any options to DAPAccess. if hasattr(self._args, 'daparg'): DAPAccess.set_args(self._args.daparg) - + # Create an instance of the subcommand and invoke it. cmd = self._args.command_class(self._args) status = cmd.invoke() @@ -161,7 +161,7 @@ def run(self, args: Optional[Sequence[str]] = None) -> int: except Exception as e: LOG.critical("uncaught exception: %s", e, exc_info=Session.get_current().log_tracebacks) return 1 - + def show_options_help(self) -> None: """! @brief Display help for session options.""" for info_name in sorted(options.OPTIONS_INFO.keys()): diff --git a/pyocd/board/board.py b/pyocd/board/board.py index 0bc4ab12e..3be879f3d 100644 --- a/pyocd/board/board.py +++ b/pyocd/board/board.py @@ -30,15 +30,15 @@ class Board(GraphNode): """ def __init__(self, session, target=None): super(Board, self).__init__() - + # Use the session option if no target type was given to us. if target is None: target = session.options.get('target_override') - + # As a last resort, default the target to 'cortex_m'. if target is None: target = 'cortex_m' - + # Log a helpful warning when defaulting to the generic cortex_m target. if session.options.get('warning.cortex_m_default'): LOG.warning("Generic 'cortex_m' target type is selected by default; is this " @@ -48,10 +48,10 @@ def __init__(self, session, target=None): "targets types.") assert target is not None - + # Convert dashes to underscores in the target type, and convert to lower case. target = target.replace('-', '_').lower() - + # Write the effective target type back to options if it's different. if target != session.options.get('target_override'): session.options['target_override'] = target @@ -61,7 +61,7 @@ def __init__(self, session, target=None): self._test_binary = session.options.get('test_binary') self._delegate = None self._inited = False - + # Create targets from provided CMSIS pack. if session.options['pack'] is not None: pack_target.PackTargets.populate_targets_from_pack(session.options['pack']) @@ -79,10 +79,10 @@ def __init__(self, session, target=None): "available target types. " "See " "for how to install additional target support.") from exc - + # Tell the user what target type is selected. LOG.info("Target type is %s", self._target_type) - + self.add_child(self.target) def init(self): @@ -90,15 +90,15 @@ def init(self): # If we don't have a delegate set yet, see if there is a session delegate. if (self.delegate is None) and (self.session.delegate is not None): self.delegate = self.session.delegate - + # Delegate pre-init hook. if (self.delegate is not None) and hasattr(self.delegate, 'will_connect'): self.delegate.will_connect(board=self) - + # Init the target. self.target.init() self._inited = True - + # Delegate post-init hook. if (self.delegate is not None) and hasattr(self.delegate, 'did_connect'): self.delegate.did_connect(board=self) @@ -117,31 +117,31 @@ def uninit(self): @property def session(self): return self._session - + @property def delegate(self): return self._delegate - + @delegate.setter def delegate(self, the_delegate): self._delegate = the_delegate - + @property def unique_id(self): return self.session.probe.unique_id - + @property def target_type(self): return self._target_type - + @property def test_binary(self): return self._test_binary - + @property def name(self): return "generic" - + @property def description(self): return "Generic board via " + self.session.probe.vendor_name + " " \ diff --git a/pyocd/board/mbed_board.py b/pyocd/board/mbed_board.py index cf8caacd7..6487ad7b4 100644 --- a/pyocd/board/mbed_board.py +++ b/pyocd/board/mbed_board.py @@ -22,7 +22,7 @@ class MbedBoard(Board): """! @brief Mbed board class. - + This class inherits from Board and is specific to mbed boards. Particularly, this class will dynamically determine the type of connected board based on the board ID encoded in the debug probe's serial number. If the board ID is all "0" characters, it indicates the @@ -30,7 +30,7 @@ class MbedBoard(Board): """ def __init__(self, session, target=None, board_id=None): """! @brief Constructor. - + This constructor attempts to use the board ID from the serial number to determine the target type. See #BOARD_ID_TO_INFO. """ @@ -38,7 +38,7 @@ def __init__(self, session, target=None, board_id=None): unique_id = session.probe.unique_id if board_id is None: board_id = unique_id[0:4] - + # Check for null board ID. This indicates a standalone probe or generic firmware. if board_id == "0000": board_info = None diff --git a/pyocd/cache/memory.py b/pyocd/cache/memory.py index 7557dd85c..f9e68ee9b 100644 --- a/pyocd/cache/memory.py +++ b/pyocd/cache/memory.py @@ -26,19 +26,19 @@ class MemoryCache(object): """! @brief Memory cache. - + Maintains a cache of target memory. The constructor is passed a backing DebugContext object that will be used to fill the cache. - + The cache is invalidated whenever the target has run since the last cache operation (based on run tokens). If the target is currently running, all accesses cause the cache to be invalidated. - + The target's memory map is referenced. All memory accesses must be fully contained within a single memory region, or a TransferFaultError will be raised. However, if an access is outside of all regions, the access is passed to the underlying context unmodified. When an access is within a region, that region's cacheability flag is honoured. """ - + def __init__(self, context, core): self._context = context self._core = core @@ -143,11 +143,11 @@ def _read(self, addr, size): def _merge_data(self, combined, addr, size): """! @brief Extracts data from the intersection of an address range across a list of interval objects. - + The range represented by @a addr and @a size are assumed to overlap the intervals. The first and last interval in the list may have ragged edges not fully contained in the address range, in which case the correct slice of those intervals is extracted. - + @param self @param combined List of Interval objects forming a contiguous range. The @a data attribute of each interval must be a bytearray. @@ -165,7 +165,7 @@ def _merge_data(self, combined, addr, size): endOffset = offset + size result = combined[0].data[offset:endOffset] return result - + # Take slice of leading ragged edge. if len(combined) and combined[0].begin < addr: offset = addr - combined[0].begin diff --git a/pyocd/cache/register.py b/pyocd/cache/register.py index 32cefe9cd..c8a489ef8 100644 --- a/pyocd/cache/register.py +++ b/pyocd/cache/register.py @@ -24,20 +24,20 @@ class RegisterCache(object): """! @brief Cache of a core's register values. - + The only interesting part of this cache is how it handles the special registers: CONTROL, FAULTMASK, BASEPRI, PRIMASK, and CFBP. The values of the first four registers are read and written all at once as the CFBP register through the hardware DCRSR register. On reads of any of these registers, or the combined CFBP, the cache will ask the underlying context to read CFBP. It will then update the cache entries for all five registers. Writes to any of these registers just invalidate all five. - + Same logic applies for XPSR submasks. """ CFBP_INDEX = index_for_reg('cfbp') XPSR_INDEX = index_for_reg('xpsr') - + CFBP_REGS = [index_for_reg(name) for name in [ 'cfbp', 'control', @@ -116,7 +116,7 @@ def read_core_registers_raw(self, reg_list): read_list.append(self.XPSR_INDEX) xpsr_index = read_list.index(self.XPSR_INDEX) self._metrics.misses += len(read_list) - + # Read registers not in the cache from the target. if read_list: try: diff --git a/pyocd/commands/base.py b/pyocd/commands/base.py index b0000b694..8301aad38 100755 --- a/pyocd/commands/base.py +++ b/pyocd/commands/base.py @@ -29,7 +29,7 @@ class CommandMeta(type): """! @brief Metaclass for commands. - + Examines the `INFO` attribute of the command class and builds the @ref pyocd.commands.commands.ALL_COMMANDS "ALL_COMMANDS" table. """ @@ -37,11 +37,11 @@ class CommandMeta(type): def __new__(mcs, name, bases, dict): # Create the new type. new_type = type.__new__(mcs, name, bases, dict) - + # The Command base class won't have an INFO. if 'INFO' in dict: info = dict['INFO'] - + # Validate the INFO dict. assert (('names' in info) and ('group' in info) @@ -49,14 +49,14 @@ def __new__(mcs, name, bases, dict): and ('help' in info) and ((('nargs' in info) and ('usage' in info)) # Required for commands. or ('access' in info))) # Required for values. - + # Add this command to our table of commands by group. ALL_COMMANDS.setdefault(info['group'], set()).add(new_type) return new_type class CommandBase(metaclass=CommandMeta): """! @brief Base class for a command. - + Each command class must have an `INFO` attribute with the following keys: - `names`: List of names for the info. The first element is the primary name. - `group`: Optional name for the command group. The group is meant to be a context group, not type of @@ -72,7 +72,7 @@ class CommandBase(metaclass=CommandMeta): def __init__(self, context): """! @brief Constructor.""" self._context = context - + @property def context(self): """! @brief The command execution context.""" @@ -116,7 +116,7 @@ def _format_core_register(self, info, value): def _convert_value(self, arg): """! @brief Convert an argument to a 32-bit integer. - + Handles the usual decimal, binary, and hex numbers with the appropriate prefix. Also recognizes register names and address dereferencing. Dereferencing using the ARM assembler syntax. To dereference, put the value in brackets, i.e. '[r0]' or @@ -179,12 +179,12 @@ def format_help(cls, context, max_width=72): class ValueBase(CommandBase): """! @brief Base class for value commands. - + Value commands are special commands representing a value that can be read and/or written. They are used through the `show` and `set` commands. A value command has an associated access mode of read-only, write-only, or read-write. The access mode sets which of the `show` and `set` commands may be used with the value. - + Each value class must have an `INFO` attribute with the following keys: - `names`: List of names for the value. The first element is the primary name. - `group`: Optional name for the command group. The group is meant to be a context group, not type of @@ -195,11 +195,11 @@ class ValueBase(CommandBase): - `help`: String for the short help. Typically should be no more than one sentence. - `extra_help`: Optional key for a string with more detailed help. """ - + def display(self, args): """! @brief Output the value of the info.""" raise NotImplementedError() - + def modify(self, args): """! @brief Change the info to a new value.""" raise NotImplementedError() diff --git a/pyocd/commands/commander.py b/pyocd/commands/commander.py index 55045af48..5f9ac220b 100755 --- a/pyocd/commands/commander.py +++ b/pyocd/commands/commander.py @@ -33,30 +33,30 @@ class PyOCDCommander(object): """! @brief Manages the commander interface. - + Responsible for connecting the execution context, REPL, and commands, and handles connection. - + Exit codes: - 0 = no errors - 1 = command error - 2 = transfer error - 3 = failed to create session (probe might not exist) - 4 = failed to open probe - + @todo Replace use of args from argparse with something cleaner. """ - + def __init__(self, args, cmds=None): """! @brief Constructor.""" # Read command-line arguments. self.args = args self.cmds = cmds - + self.context = CommandExecutionContext(no_init=self.args.no_init) self.context.command_set.add_command_group('commander') self.session = None self.exit_code = 0 - + def run(self): """! @brief Main entry point.""" try: @@ -64,7 +64,7 @@ def run(self): if self.cmds is None: if not self.connect(): return self.exit_code - + # Print connected message, unless not initing. if not self.args.no_init: try: @@ -90,7 +90,7 @@ def run(self): # Run the REPL interface. console = PyocdRepl(self.context) console.run() - + # Otherwise, run the list of commands we were given and exit. We only connect when # there is a command that requires a connection (most do). else: @@ -113,7 +113,7 @@ def run(self): self.session.close() return self.exit_code - + def run_commands(self): """! @brief Run commands specified on the command line.""" did_connect = False @@ -121,7 +121,7 @@ def run_commands(self): for args in self.cmds: # Extract the command name. cmd = args[0].lower() - + # Handle certain commands without connecting. needs_connect = (cmd not in ('list', 'help', 'exit')) @@ -130,11 +130,11 @@ def run_commands(self): if not self.connect(): return self.exit_code did_connect = True - + # Merge commands args back to one string. # FIXME this is overly complicated cmdline = " ".join('"{}"'.format(a) for a in args) - + # Invoke action handler. result = self.context.process_command_line(cmdline) if result is not None: @@ -147,7 +147,7 @@ def connect(self): self.context.writei("Setting SWD clock to %d kHz", self.args.frequency // 1000) options = convert_session_options(self.args.options) - + # Set connect mode. The --connect option takes precedence when set. Then, if --halt is set # then the connect mode is halt. If connect_mode is set through -O then use that. # Otherwise default to attach. @@ -159,7 +159,7 @@ def connect(self): connect_mode = None else: connect_mode = 'attach' - + # Connect to board. probe = ConnectHelper.choose_probe( blocking=(not self.args.no_wait), @@ -168,10 +168,10 @@ def connect(self): if probe is None: self.exit_code = 3 return False - + # Create a proxy so the probe can be shared between the session and a possible probe server. probe_proxy = SharedDebugProbeProxy(probe) - + # Create the session. self.session = session.Session(probe_proxy, project_dir=self.args.project_dir, @@ -188,11 +188,11 @@ def connect(self): resume_on_disconnect=False, ) ) - + if not self._post_connect(): self.exit_code = 4 return False - + result = self.context.attach_session(self.session) if not result: self.exit_code = 1 @@ -200,12 +200,12 @@ def connect(self): def _post_connect(self): """! @brief Finish the connect process. - + The session is opened. The `no_init` parameter passed to the constructor determines whether the board and target are initialized. - + If an ELF file was provided on the command line, it is set on the target. - + @param self This object. @param session A @ref pyocd.core.session.Session "Session" instance. @retval True Session attached and context state inited successfully. @@ -213,7 +213,7 @@ def _post_connect(self): """ assert self.session is not None assert not self.session.is_open - + # Open the session. try: self.session.open(init_board=not self.args.no_init) @@ -237,6 +237,6 @@ def _post_connect(self): if not self.args.no_init and self.session.target.is_locked(): self.context.write("Warning: Target is locked, limited operations available. Use 'unlock' " "command to mass erase and unlock, then execute 'reinit'.") - + return True diff --git a/pyocd/commands/commands.py b/pyocd/commands/commands.py index 529c216cc..50e3d4a59 100755 --- a/pyocd/commands/commands.py +++ b/pyocd/commands/commands.py @@ -74,7 +74,7 @@ class ListCommand(CommandBase): 'usage': "", 'help': "Show available targets.", } - + def execute(self): ConnectHelper.list_connected_probes() @@ -87,7 +87,7 @@ class ExitCommand(CommandBase): 'usage': "", 'help': "Quit pyocd commander.", } - + def execute(self): from .repl import ToolExitException raise ToolExitException() @@ -101,7 +101,7 @@ class StatusCommand(CommandBase): 'usage': "", 'help': "Show the target's current state.", } - + def execute(self): if not self.context.target.is_locked(): for i, c in enumerate(self.context.target.cores): @@ -119,12 +119,12 @@ def dump_register_group(self, group_name): regs = natsort(self.context.selected_core.core_registers.iter_matching( lambda r: r.group == group_name), key=lambda r: r.name) reg_values = self.context.selected_core.read_core_registers_raw(r.name for r in regs) - + col_printer = ColumnFormatter() for info, value in zip(regs, reg_values): value_str = self._format_core_register(info, value) col_printer.add_items([(info.name, value_str)]) - + col_printer.write() def dump_registers(self, show_all=False, show_group=None): @@ -141,7 +141,7 @@ def dump_registers(self, show_all=False, show_group=None): groups_to_show = [show_group] else: groups_to_show = ['general'] - + for group in groups_to_show: self.context.writei("%s registers:", group) self.dump_register_group(group) @@ -196,12 +196,12 @@ class RegCommand(RegisterCommandBase): "be printed. If the -f option is passed, then individual fields of " "peripheral registers will be printed in addition to the full value.", } - + def parse(self, args): self.show_all = False self.reg = None self.show_fields = False - + if len(args) == 0: self.reg = "general" else: @@ -217,7 +217,7 @@ def execute(self): if self.show_all: self.dump_registers(show_all=True) return - + # Check register names first. if self.reg in self.context.selected_core.core_registers.by_name: if not self.context.selected_core.is_halted(): @@ -229,14 +229,14 @@ def execute(self): value_str = self._format_core_register(info, value) self.context.writei("%s = %s", self.reg, value_str) return - + # Now look for matching group name. matcher = UniquePrefixMatcher(self.context.selected_core.core_registers.groups) group_matches = matcher.find_all(self.reg) if len(group_matches) == 1: self.dump_registers(show_group=group_matches[0]) return - + # And finally check for peripherals. subargs = self.reg.split('.') if subargs[0] in self.context.peripherals: @@ -265,7 +265,7 @@ class WriteRegCommand(RegisterCommandBase): "When a peripheral register is written, if the -r option is passed then " "it is read back and the updated value printed.", } - + def parse(self, args): idx = 0 if len(args) == 3: @@ -338,7 +338,7 @@ class ResetCommand(CommandBase): "or 'emulated'.", } - + def parse(self, args): self.do_halt = False self.reset_type = None @@ -374,7 +374,7 @@ class DisassembleCommand(CommandBase): 'extra_help': "Only available if the capstone library is installed. To install " "capstone, run 'pip install capstone'.", } - + def parse(self, args): self.center = (len(args) > 1) and (args[0] in ('-c', '--center')) if self.center: @@ -505,7 +505,7 @@ def execute(self): if not region.is_flash: raise exceptions.CommandError("address 0x%08x is not in flash", self.addr) assert region.flash is not None - + # Program phrase to flash. region.flash.init(region.flash.Operation.PROGRAM) region.flash.program_phrase(self.addr, self.data) @@ -582,7 +582,7 @@ class SavememCommand(CommandBase): 'usage': "ADDR LEN FILENAME", 'help': "Save a range of memory to a binary file.", } - + def parse(self, args): self.addr = self._convert_value(args[0]) self.count = self._convert_value(args[1]) @@ -616,7 +616,7 @@ class LoadmemCommand(CommandBase): 'help': "Load a binary file to an address in memory (RAM or flash).", 'extra_help': "This command is deprecated in favour of the more flexible 'load'.", } - + def parse(self, args): self.addr = self._convert_value(args[0]) self.filename = args[1] @@ -639,7 +639,7 @@ class LoadCommand(CommandBase): 'usage': "FILENAME [ADDR]", 'help': "Load a binary, hex, or elf file with optional base address.", } - + def parse(self, args): self.filename = args[0] if len(args) > 1: @@ -661,7 +661,7 @@ class CompareCommand(CommandBase): 'help': "Compare a memory range against a binary file.", 'extra_help': "If the length is not provided, then the length of the file is used.", } - + def parse(self, args): self.addr = self._convert_value(args[0]) if len(args) < 3: @@ -685,7 +685,7 @@ def execute(self): file_data = bytearray(f.read()) else: file_data = bytearray(f.read(self.length)) - + if self.length is None: length = len(file_data) elif len(file_data) < self.length: @@ -693,36 +693,36 @@ def execute(self): length = len(file_data) else: length = self.length - + # Divide into 32 kB chunks. CHUNK_SIZE = 32 * 1024 chunk_count = (length + CHUNK_SIZE - 1) // CHUNK_SIZE - + addr = self.addr end_addr = addr + length offset = 0 mismatch = False - + for chunk in range(chunk_count): # Get this chunk's size. chunk_size = min(end_addr - addr, CHUNK_SIZE) self.context.writei("Comparing %d bytes @ 0x%08x", chunk_size, addr) - + data = bytearray(self.context.selected_ap.read_memory_block8(addr, chunk_size)) - + for i in range(chunk_size): if data[i] != file_data[offset+i]: mismatch = True self.context.writei("Mismatched byte at 0x%08x (offset 0x%x): 0x%02x (memory) != 0x%02x (file)", addr + i, offset + i, data[i], file_data[offset+i]) break - + if mismatch: break - + offset += chunk_size addr += chunk_size - + if not mismatch: self.context.writei("All %d bytes match.", length) @@ -741,7 +741,7 @@ class FillCommand(CommandBase): "provided, the size is determined by the pattern value's most " "significant set bit. Only RAM regions may be filled.", } - + def parse(self, args): if len(args) == 3: self.size = None @@ -755,7 +755,7 @@ def parse(self, args): self.addr = self._convert_value(args[1]) self.length = self._convert_value(args[2]) self.pattern = self._convert_value(args[3]) - + # Determine size by the highest set bit in the pattern. if self.size is None: highest = msb(self.pattern) @@ -779,20 +779,20 @@ def execute(self): elif self.size == 32: pattern_str = "0x%08x" % (self.pattern & 0xffffffff) self.pattern = conversion.u32le_list_to_byte_list([self.pattern]) - + # Divide into 32 kB chunks. CHUNK_SIZE = 32 * 1024 chunk_count = (self.length + CHUNK_SIZE - 1) // CHUNK_SIZE - + addr = self.addr end_addr = addr + self.length self.context.writei("Filling 0x%08x-0x%08x with pattern %s", addr, end_addr - 1, pattern_str) - + for chunk in range(chunk_count): # Get this chunk's size. chunk_size = min(end_addr - addr, CHUNK_SIZE) self.context.writei("Wrote %d bytes @ 0x%08x", chunk_size, addr) - + # Construct data for the chunk. if self.size == 8: data = self.pattern * chunk_size @@ -800,7 +800,7 @@ def execute(self): data = (self.pattern * ((chunk_size + 1) // 2))[:chunk_size] elif self.size == 32: data = (self.pattern * ((chunk_size + 3) // 4))[:chunk_size] - + # Write to target. self.context.selected_ap.write_memory_block8(addr, data) addr += chunk_size @@ -816,7 +816,7 @@ class FindCommand(CommandBase): 'extra_help': "A pattern of any number of bytes can be searched for. Each BYTE " "parameter must be an 8-bit value.", } - + def parse(self, args): if len(args) < 3: raise exceptions.CommandError("missing argument") @@ -826,32 +826,32 @@ def parse(self, args): for p in args[2:]: self.pattern += bytearray([self._convert_value(p)]) self.pattern_str = " ".join("%02x" % p for p in self.pattern) - + def execute(self): # Divide into 32 kB chunks. CHUNK_SIZE = 32 * 1024 chunk_count = (self.length + CHUNK_SIZE - 1) // CHUNK_SIZE - + addr = self.addr end_addr = addr + self.length self.context.writei("Searching 0x%08x-0x%08x for pattern [%s]", addr, end_addr - 1, self.pattern_str) - + match = False for chunk in range(chunk_count): # Get this chunk's size. chunk_size = min(end_addr - addr, CHUNK_SIZE) self.context.writei("Read %d bytes @ 0x%08x", chunk_size, addr) - + data = bytearray(self.context.selected_ap.read_memory_block8(addr, chunk_size)) - + offset = data.find(self.pattern) if offset != -1: match = True self.context.writei("Found pattern at address 0x%08x", addr + offset) break - + addr += chunk_size - len(self.pattern) - + if not match: self.context.writei("Failed to find pattern in range 0x%08x-0x%08x", self.addr, end_addr - 1) @@ -864,7 +864,7 @@ class EraseCommand(CommandBase): 'usage': "[ADDR] [COUNT]", 'help': "Erase all internal flash or a range of sectors.", } - + def parse(self, args): if len(args) == 0: self.erase_chip = True @@ -908,7 +908,7 @@ class UnlockCommand(CommandBase): 'usage': "", 'help': "Unlock security on the target.", } - + def execute(self): self.context.target.mass_erase() @@ -924,7 +924,7 @@ class ContinueCommand(CommandBase): "then it's state is reported. For instance, if the target is halted immediately " "after resuming, a debug event such as a breakpoint most likely occurred.", } - + def execute(self): self.context.selected_core.resume() status = self.context.selected_core.get_state() @@ -950,13 +950,13 @@ class StepCommand(CommandBase): 'usage': "[COUNT]", 'help': "Step one or more instructions.", } - + def parse(self, args): if len(args) == 1: self.count = self._convert_value(args[0]) else: self.count = 1 - + def execute(self): if not self.context.selected_core.is_halted(): self.context.write("Core is not halted; cannot step") @@ -981,7 +981,7 @@ class HaltCommand(CommandBase): 'usage': "", 'help': "Halt the target.", } - + def execute(self): self.context.selected_core.halt() @@ -1001,7 +1001,7 @@ class BreakpointCommand(CommandBase): 'usage': "ADDR", 'help': "Set a breakpoint address.", } - + def parse(self, args): self.addr = self._convert_value(args[0]) @@ -1021,7 +1021,7 @@ class RemoveBreakpointCommand(CommandBase): 'usage': "ADDR", 'help': "Remove a breakpoint.", } - + def parse(self, args): self.addr = self._convert_value(args[0]) @@ -1042,7 +1042,7 @@ class ListBreakpointsCommand(CommandBase): 'usage': "", 'help': "List breakpoints.", } - + def execute(self): availableBpCount = self.context.selected_core.available_breakpoint_count self.context.writei("%d hardware breakpoints available", availableBpCount) @@ -1062,7 +1062,7 @@ class WatchpointCommand(CommandBase): 'usage': "ADDR [r|w|rw] [1|2|4]", 'help': "Set a watchpoint address, and optional access type (default rw) and size (4).", } - + def parse(self, args): self.addr = self._convert_value(args[0]) if len(args) > 1: @@ -1096,7 +1096,7 @@ class RemoveWatchpointCommand(CommandBase): 'usage': "ADDR", 'help': "Remove a watchpoint.", } - + def parse(self, args): self.addr = self._convert_value(args[0]) @@ -1118,7 +1118,7 @@ class ListWatchpointsCommand(CommandBase): 'usage': "", 'help': "List watchpoints.", } - + def execute(self): if self.context.selected_core.dwt is None: raise exceptions.CommandError("DWT not present") @@ -1131,7 +1131,7 @@ def execute(self): for i, wp in enumerate(wps): # TODO fix requirement to access WATCH_TYPE_TO_FUNCT self.context.writei("%d: 0x%08x, %d bytes, %s", - i, wp.addr, wp.size, + i, wp.addr, wp.size, WATCHPOINT_FUNCTION_NAME_MAP[self.context.selected_core.dwt.WATCH_TYPE_TO_FUNCT[wp.func]]) class SelectCoreCommand(CommandBase): @@ -1143,7 +1143,7 @@ class SelectCoreCommand(CommandBase): 'usage': "[NUM]", 'help': "Select CPU core by number or print selected core.", } - + def parse(self, args): if len(args) == 0: self.show_core = True @@ -1170,7 +1170,7 @@ class ReadDpCommand(CommandBase): 'usage': "ADDR", 'help': "Read DP register.", } - + def parse(self, args): self.addr = self._convert_value(args[0]) @@ -1187,7 +1187,7 @@ class WriteDpCommand(CommandBase): 'usage': "ADDR DATA", 'help': "Write DP register.", } - + def parse(self, args): self.addr = self._convert_value(args[0]) self.data = self._convert_value(args[1]) @@ -1314,7 +1314,7 @@ def execute(self): if self.context.elf is None: self.context.write("No ELF available") return - + lineInfo = self.context.elf.address_decoder.get_line_for_address(self.addr) if lineInfo is not None: path = os.path.join(lineInfo.dirname, lineInfo.filename).decode() @@ -1322,7 +1322,7 @@ def execute(self): pathline = "{}:{}".format(path, line) else: pathline = "" - + fnInfo = self.context.elf.address_decoder.get_function_for_address(self.addr) if fnInfo is not None: name = fnInfo.name.decode() @@ -1330,7 +1330,7 @@ def execute(self): else: name = "" offset = 0 - + self.context.writef("{addr:#10x} : {fn}+{offset} : {pathline}", addr=self.addr, fn=name, offset=offset, pathline=pathline) @@ -1352,7 +1352,7 @@ def execute(self): if self.context.elf is None: self.context.write("No ELF available") return - + sym = self.context.elf.symbol_decoder.get_symbol_for_name(self.name) if sym is not None: if sym.type == 'STT_FUNC': @@ -1394,7 +1394,7 @@ def execute(self): if self.context.session.gdbservers[core_number] is not None: server = self.context.session.gdbservers[core_number] del self.context.session.gdbservers[core_number] - + # Stop the server and wait for it to terminate. server.stop() while server.is_alive(): @@ -1404,7 +1404,7 @@ def execute(self): elif self.action == 'status': if core_number in self.context.session.gdbservers: self.context.writef("gdbserver for core {0} is running", core_number) - else: + else: self.context.writef("gdbserver for core {0} is not running", core_number) class ProbeserverCommand(CommandBase): @@ -1469,7 +1469,7 @@ def execute(self): # Check readability. if 'r' not in value_class.INFO['access']: raise exceptions.CommandError("value '%s' is not readable" % self.name) - + # Execute show operation. value_object = value_class(self.context) value_object.display(self.args) @@ -1515,7 +1515,7 @@ def execute(self): # Check writability. if 'w' not in value_class.INFO['access']: raise exceptions.CommandError("value '%s' is not modifiable" % self.name) - + # Execute set operation. value_object = value_class(self.context) value_object.modify(self.args) @@ -1544,7 +1544,7 @@ class HelpCommand(CommandBase): 'usage': "[CMD]", 'help': "Show help for commands.", } - + HELP_ADDENDUM = """ All register names are also available as commands that print the register's value. Any ADDR or LEN argument will accept a register name. @@ -1591,19 +1591,19 @@ def execute(self): return except IndexError: pass - + self.context.write(cmd_class.format_help(self.context, self.term_width)) def _list_commands(self, title, command_list, help_format): cmds = {} nominal_cmds = [] - + for klass in command_list: cmd_name = klass.INFO['names'][0] cmds[cmd_name] = klass nominal_cmds.append(cmd_name) nominal_cmds.sort() - + self.context.write(title + ":\n" + ("-" * len(title))) for cmd_name in nominal_cmds: cmd_klass = cmds[cmd_name] diff --git a/pyocd/commands/execution_context.py b/pyocd/commands/execution_context.py index 2725b191c..905104ab5 100755 --- a/pyocd/commands/execution_context.py +++ b/pyocd/commands/execution_context.py @@ -33,7 +33,7 @@ class CommandSet(object): """! @brief Holds a set of command classes.""" - + ## Whether command and infos modules have been loaded yet. DID_LOAD_COMMAND_MODULES = False @@ -44,10 +44,10 @@ def __init__(self): self._values = {} self._value_classes = set() self._value_matcher = UniquePrefixMatcher() - + # Ensure these modules get loaded so the ALL_COMMANDS dicts are filled. self._load_modules() - + @classmethod def _load_modules(cls): # Load these modules in order to populate the dicts with standard commands. This must be @@ -57,31 +57,31 @@ def _load_modules(cls): from . import commands from . import values cls.DID_LOAD_COMMAND_MODULES = True - + @property def commands(self): return self._commands - + @property def command_classes(self): return self._command_classes - + @property def command_matcher(self): return self._command_matcher - + @property def values(self): return self._values - + @property def value_classes(self): return self._value_classes - + @property def value_matcher(self): return self._value_matcher - + def add_command_group(self, group_name): """! @brief Add commands belonging to a group to the command set. @param self The command set. @@ -89,7 +89,7 @@ def add_command_group(self, group_name): """ from .base import ALL_COMMANDS self.add_commands(ALL_COMMANDS.get(group_name, set())) - + def add_commands(self, commands): """! @brief Add some commands to the command set. @param self The command set. @@ -102,7 +102,7 @@ def add_commands(self, commands): self._commands.update(cmd_names) self._command_classes.update(cmd_classes) self._command_matcher.add_items(cmd_names.keys()) - + value_names = {name: klass for klass in value_classes for name in klass.INFO['names']} self._values.update(value_names) self._value_classes.update(value_classes) @@ -117,11 +117,11 @@ def add_commands(self, commands): class CommandExecutionContext(object): """! @brief Manages command execution. - + This class holds persistent state for command execution, and provides the interface for executing commands and command lines. """ - + def __init__(self, no_init=False, output_stream=None): """! @brief Constructor. @param self This object. @@ -142,18 +142,18 @@ def __init__(self, no_init=False, output_stream=None): self._selected_ap_address = None self._peripherals = {} self._loaded_peripherals = False - + # Add in the standard commands. self._command_set.add_command_group('standard') - + def write(self, message='', **kwargs): """! @brief Write a fixed message to the output stream. - + The message is written to the output stream passed to the constructor, terminated with a newline by default. The `end` keyword argument can be passed to change the terminator. No formatting is applied to the message. If formatting is required, use the writei() or writef() methods instead. - + @param self This object. @param message The text to write to the output. If not a string object, it is run through str(). """ @@ -163,26 +163,26 @@ def write(self, message='', **kwargs): if not isinstance(message, str): message = str(message) self._output.write(message + end) - + def writei(self, fmt, *args, **kwargs): """! @brief Write an interpolated string to the output stream. - + The formatted string is written to the output stream passed to the constructor, terminated with a newline by default. The `end` keyword argument can be passed to change the terminator. - + @param self This object. @param fmt Format string using printf-style "%" formatters. """ assert isinstance(fmt, str) message = fmt % args self.write(message, **kwargs) - + def writef(self, fmt, *args, **kwargs): """! @brief Write a formatted string to the output stream. - + The formatted string is written to the output stream passed to the constructor, terminated with a newline by default. The `end` keyword argument can be passed to change the terminator. - + @param self This object. @param fmt Format string using the format() mini-language. """ @@ -192,10 +192,10 @@ def writef(self, fmt, *args, **kwargs): def attach_session(self, session): """! @brief Associate a session with the command context. - + Various data for the context are initialized. This includes selecting the initially selected core and MEM-AP, and getting an ELF file that was set on the target. - + @param self This object. @param session A @ref pyocd.core.session.Session "Session" instance. @retval True Session attached and context state inited successfully. @@ -211,47 +211,47 @@ def attach_session(self, session): # Selected core defaults to the target's default selected core. if self.selected_core is None: self.selected_core = self.target.selected_core - + # Get the AP for the selected core. if self.selected_core is not None: self.selected_ap_address = self.selected_core.ap.address except IndexError: pass - + # Fall back to the first MEM-AP. if self.selected_ap_address is None: for ap_num in sorted(self.target.aps.keys()): if isinstance(self.target.aps[ap_num], MEM_AP): self.selected_ap_address = ap_num break - + return True - + @property def session(self): return self._session - + @property def board(self): return self._session and self._session.board - + @property def target(self): return self._session and self._session.target - + @property def probe(self): return self._session and self._session.probe - + @property def elf(self): return self.target and self.target.elf - + @property def command_set(self): """! @brief CommandSet with commands available in this context.""" return self._command_set - + @property def peripherals(self): """! @brief Dict of SVD peripherals.""" @@ -260,24 +260,24 @@ def peripherals(self): self._peripherals[p.name.lower()] = p self._loaded_peripherals = True return self._peripherals - + @property def output_stream(self): return self._output - + @output_stream.setter def output_stream(self, stream): self._output = stream - + @property def selected_core(self): """! @brief The Target instance for the selected core.""" return self._selected_core - + @selected_core.setter def selected_core(self, value): self._selected_core = value - + @property def selected_ap_address(self): return self._selected_ap_address @@ -285,7 +285,7 @@ def selected_ap_address(self): @selected_ap_address.setter def selected_ap_address(self, value): self._selected_ap_address = value - + @property def selected_ap(self): if self.selected_ap_address is None: @@ -327,7 +327,7 @@ def _split_commands(self, line): def parse_command(self, cmdline): """! @brief Create a CommandInvocation from a single command.""" cmdline = cmdline.strip() - + # Check for Python or shell command lines. first_char = cmdline[0] if first_char in '$!': @@ -353,7 +353,7 @@ def parse_command(self, cmdline): ", ".join("'%s'" % c for c in all_matches))) else: raise exceptions.CommandError("unrecognized command '%s'" % cmd) - + return CommandInvocation(matched_command, args, self.execute_command) def execute_command(self, invocation): @@ -389,7 +389,7 @@ def handle_python(self, invocation): # Lazily build the python environment. if self._python_namespace is None: self._build_python_namespace() - + result = eval(invocation.cmd, globals(), self._python_namespace) if result is not None: if isinstance(result, int): @@ -402,7 +402,7 @@ def handle_python(self, invocation): if self.session.log_tracebacks: LOG.error("Exception while executing expression: %s", e, exc_info=True) raise exceptions.CommandError("exception while executing expression: %s" % e) - + def handle_system(self, invocation): """! @brief Evaluate a system call command.""" try: diff --git a/pyocd/commands/repl.py b/pyocd/commands/repl.py index 30bf8502f..2579b8b30 100755 --- a/pyocd/commands/repl.py +++ b/pyocd/commands/repl.py @@ -26,23 +26,23 @@ class ToolExitException(Exception): """! @brief Special exception indicating the tool should exit. - + This exception is only raised by the `exit` command. """ pass class PyocdRepl(object): """! @brief Read-Eval-Print-Loop for pyOCD commander.""" - + PROMPT = 'pyocd> ' - + PYOCD_HISTORY_ENV_VAR = 'PYOCD_HISTORY' PYOCD_HISTORY_LENGTH_ENV_VAR = 'PYOCD_HISTORY_LENGTH' DEFAULT_HISTORY_FILE = ".pyocd_history" def __init__(self, command_context): self.context = command_context - + # Attempt to import readline. If we import readline from the module level, it may be imported # when initing non-interactive subcommands such as gdbserver, and it causes the process to be # stopped if started in the background (& suffix in sh). @@ -52,11 +52,11 @@ def __init__(self, command_context): # Enable readline history. self._history_path = os.environ.get(self.PYOCD_HISTORY_ENV_VAR, os.path.join(os.path.expanduser("~"), self.DEFAULT_HISTORY_FILE)) - + # Read command history and set history length. try: readline.read_history_file(self._history_path) - + history_len = int(os.environ.get(self.PYOCD_HISTORY_LENGTH_ENV_VAR, session.Session.get_current().options.get('commander.history_length'))) readline.set_history_length(history_len) @@ -87,7 +87,7 @@ def run(self): print() except ToolExitException: pass - + def run_one_command(self, line): """! @brief Execute a single command line and handle exceptions.""" try: diff --git a/pyocd/commands/values.py b/pyocd/commands/values.py index 2ed1c07cb..c0f94be7b 100755 --- a/pyocd/commands/values.py +++ b/pyocd/commands/values.py @@ -173,14 +173,14 @@ class FaultValue(ValueBase): def display(self, args): showAll = ('-a' in args) - + CFSR = 0xe000ed28 HFSR = 0xe000ed2c DFSR = 0xe000ed30 MMFAR = 0xe000ed34 BFAR = 0xe000ed38 # AFSR = 0xe000ed3c - + MMFSR_fields = [ ('IACCVIOL', 0), ('DACCVIOL', 1), @@ -218,7 +218,7 @@ def display(self, args): ('VCATCH', 3), ('EXTERNAL', 4), ] - + def print_fields(regname, value, fields, showAll): if value == 0 and not showAll: return @@ -227,7 +227,7 @@ def print_fields(regname, value, fields, showAll): bit = (value >> bitpos) & 1 if showAll or bit != 0: self.context.writei(" %s = 0x%x", name, bit) - + if self.context.selected_core is None: self.context.write("No core is selected") return @@ -240,7 +240,7 @@ def print_fields(regname, value, fields, showAll): dfsr = self.context.selected_core.read32(DFSR) mmfar = self.context.selected_core.read32(MMFAR) bfar = self.context.selected_core.read32(BFAR) - + print_fields('MMFSR', mmfsr, MMFSR_fields, showAll) if showAll or mmfsr & (1 << 7): # MMFARVALID self.context.writei(" MMFAR = 0x%08x", mmfar) @@ -270,7 +270,7 @@ def modify(self, args): raise exceptions.CommandError("missing reset state") state = int(args[0], base=0) self.context.writef("nRESET = {}", state) - + # Use the probe to assert reset if the DP doesn't exist for some reason, otherwise # use the DP so reset notifications are sent. if self.context.target.dp is None: @@ -327,7 +327,7 @@ def display(self, args): def modify(self, args): if len(args) == 0: raise exceptions.CommandError("missing argument") - + ap_num = int(args[0], base=0) if self.context.target.dp.adi_version == coresight.dap.ADIVersion.ADIv5: ap_addr = coresight.ap.APv1Address(ap_num) @@ -415,7 +415,7 @@ class TargetGraphValue(ValueBase): def display(self, args): self.context.board.dump() - + class LockedValue(ValueBase): INFO = { 'names': ['locked'], @@ -479,7 +479,7 @@ def modify(self, args): if self.context.selected_core is None: self.context.write("No core is selected") return - + try: self.context.selected_core.set_vector_catch(convert_vector_catch(args[0])) except ValueError as e: diff --git a/pyocd/core/core_registers.py b/pyocd/core/core_registers.py index ba0c90a8f..ad6372544 100644 --- a/pyocd/core/core_registers.py +++ b/pyocd/core/core_registers.py @@ -24,10 +24,10 @@ class CoreRegisterInfo(object): """! @brief Useful information about a core register. - + Provides properties for classification of the register, and utilities to convert to and from the raw integer representation of the register value. - + Each core register has both a name (string), which is always lowercase, and an integer index. The index is a unique identifier with an architecture-specified meaning. """ @@ -37,7 +37,7 @@ class CoreRegisterInfo(object): # This is just a placeholder. The architecture-specific subclass should override the definition. Its # value is set to None to cause an exception if used. _NAME_MAP = None - + ## Map of register index to info. # # This is just a placeholder. The architecture-specific subclass should override the definition. Its @@ -50,7 +50,7 @@ def add_to_map(cls, all_regs): for reg in all_regs: cls._NAME_MAP[reg.name] = reg cls._INDEX_MAP[reg.index] = reg - + @classmethod def get(cls, reg): """! @brief Return the CoreRegisterInfo instance for a register. @@ -81,32 +81,32 @@ def __init__(self, name, index, bitsize, reg_type, reg_group, reg_num=None, feat def name(self): """! @brief Name of the register. Always lowercase.""" return self._name - + @property def index(self): """! @brief Integer index of the register.""" return self._index - + @property def bitsize(self): """! @brief Bit width of the register..""" return self._bitsize - + @property def group(self): """! @brief Named group the register is contained within.""" return self._group - + @property def gdb_type(self): """! @brief Value type specific to gdb.""" return self._gdb_type - + @property def gdb_regnum(self): """! @brief Register number specific to gdb.""" return self._gdb_regnum - + @property def gdb_feature(self): """! @brief GDB architecture feature to which the register belongs.""" @@ -126,7 +126,7 @@ def is_single_float_register(self): def is_double_float_register(self): """! @brief Returns true for registers holding double-precision float values""" return self.gdb_type == 'ieee_double' - + def from_raw(self, value): """! @brief Convert register value from raw (integer) to canonical type.""" # Convert int to float. @@ -135,7 +135,7 @@ def from_raw(self, value): elif self.is_double_float_register: value = conversion.u64_to_float64(value) return value - + def to_raw(self, value): """! @brief Convert register value from canonical type to raw (integer).""" # Convert float to int. @@ -147,23 +147,23 @@ def to_raw(self, value): else: raise TypeError("non-float register value has float type") return value - + def clone(self): """! @brief Return a copy of the register info.""" return copy(self) - + def __eq__(self, other): return isinstance(other, CoreRegisterInfo) and (self.index == other.index) - + def __hash__(self): return hash(self.index) - + def __repr__(self): return "<{}@{:#x} {}={} {}-bit>".format(self.__class__.__name__, id(self), self.name, self.index, self.bitsize) class CoreRegistersIndex(object): """! @brief Class to hold indexes of available core registers. - + This class is meant to be used by a core to hold the set of core registers that are actually present on a particular device, as determined by runtime inspection of the core. A number of properties are made available to access the core registers by various keys. @@ -175,17 +175,17 @@ def __init__(self): self._by_name = {} self._by_index = {} self._by_feature = {} - + @property def groups(self): """! @brief Set of unique register group names.""" return self._groups - + @property def as_set(self): """! @brief Set of available registers as CoreRegisterInfo objects.""" return self._all - + @property def by_name(self): """! @brief Dict of (register name) -> CoreRegisterInfo.""" @@ -200,7 +200,7 @@ def by_index(self): def by_feature(self): """! @brief Dict of (register gdb feature) -> List[CoreRegisterInfo].""" return self._by_feature - + def iter_matching(self, predicate): """! @brief Iterate over registers matching a given predicate callable. @param self The object. @@ -210,7 +210,7 @@ def iter_matching(self, predicate): for reg in self._all: if predicate(reg): yield reg - + def add_group(self, regs): """! @brief Add a list of registers. @param self The object. diff --git a/pyocd/core/exceptions.py b/pyocd/core/exceptions.py index d8a334334..d407afabd 100644 --- a/pyocd/core/exceptions.py +++ b/pyocd/core/exceptions.py @@ -20,7 +20,7 @@ class Error(RuntimeError): class InternalError(Error): """! @brief Internal consistency or logic error. - + This error indicates that something has happened that shouldn't be possible. """ pass @@ -63,11 +63,11 @@ class TransferTimeoutError(TransferError): class TransferFaultError(TransferError): """! @brief A memory fault occurred. - + This exception class is extended to optionally record the start address and an optional length of the attempted memory access that caused the fault. The address and length, if available, will be included in the description of the exception when it is converted to a string. - + Positional arguments passed to the constructor are passed through to the superclass' constructor, and thus operate like any other standard exception class. Keyword arguments of 'fault_address' and 'length' can optionally be passed to the constructor to initialize the fault @@ -86,15 +86,15 @@ def fault_address(self): @fault_address.setter def fault_address(self, addr): self._address = addr - + @property def fault_end_address(self): return (self._address + self._length - 1) if (self._length is not None) else self._address - + @property def fault_length(self): return self._length - + @fault_length.setter def fault_length(self, length): self._length = length @@ -111,10 +111,10 @@ def __str__(self): if self._length is not None: desc += "-0x%08x" % self.fault_end_address return desc - + class FlashFailure(TargetError): """! @brief Exception raised when flashing fails for some reason. - + Positional arguments passed to the constructor are passed through to the superclass' constructor, and thus operate like any other standard exception class. The flash address that failed and/or result code from the algorithm can optionally be recorded in the exception, if @@ -124,11 +124,11 @@ def __init__(self, *args, **kwargs): super(FlashFailure, self).__init__(*args) self._address = kwargs.get('address', None) self._result_code = kwargs.get('result_code', None) - + @property def address(self): return self._address - + @property def result_code(self): return self._result_code diff --git a/pyocd/core/helpers.py b/pyocd/core/helpers.py index ea40a6ef7..5c6815d7c 100644 --- a/pyocd/core/helpers.py +++ b/pyocd/core/helpers.py @@ -27,29 +27,29 @@ class ConnectHelper(object): """! @brief Helper class for streamlining the probe discovery and session creation process. - + This class provides several static methods that wrap the DebugProbeAggregator methods with a simple command-line user interface, or provide a single method that performs a common access pattern. """ - + @staticmethod def get_sessions_for_all_connected_probes(blocking=True, unique_id=None, options=None, **kwargs): """! @brief Return a list of Session objects for all connected debug probes. - + This method is useful for listing detailed information about connected probes, especially those that have associated boards, as the Session object will have a Board instance. - + The returned list of sessions is sorted by the combination of the debug probe's description and unique ID. - + @param blocking Specifies whether to wait for a probe to be connected if there are no available probes. @param unique_id String to match against probes' unique IDs using a contains match. If the default of None is passed, then all available probes are matched. @param options Dictionary of session options. @param kwargs Session options passed as keyword arguments. - + @return A list of Session objects. The returned Session objects are not yet active, in that open() has not yet been called. If _blocking_ is True, the list will contain at least one session. If _blocking_ is False and there are no probes connected then an empty list @@ -62,17 +62,17 @@ def get_sessions_for_all_connected_probes(blocking=True, unique_id=None, options @staticmethod def get_all_connected_probes(blocking=True, unique_id=None, print_wait_message=True): """! @brief Return a list of DebugProbe objects for all connected debug probes. - + The returned list of debug probes is always sorted by the combination of the probe's description and unique ID. - + @param blocking Specifies whether to wait for a probe to be connected if there are no available probes. A message will be printed @param unique_id String to match against probes' unique IDs using a contains match. If the default of None is passed, then all available probes are matched. @param print_wait_message Whether to print a message to the command line when waiting for a probe to be connected and _blocking_ is True. - + @return A list of DebugProbe instances. If _blocking_ is True, the list will contain at least one probe. If _blocking_ is False and there are no probes connected then an empty list will be returned. @@ -101,8 +101,8 @@ def get_all_connected_probes(blocking=True, unique_id=None, print_wait_message=T @staticmethod def list_connected_probes(): - """! @brief List the connected debug probes. - + """! @brief List the connected debug probes. + Prints a list of all connected probes to stdout. If no probes are connected, a message saying as much is printed instead. """ @@ -116,22 +116,22 @@ def list_connected_probes(): @staticmethod def choose_probe(blocking=True, return_first=False, unique_id=None): """! @brief Return a debug probe possibly chosen by the user. - + This method provides an easy to use command line interface for selecting one of the connected debug probes. It has parameters that control filtering of probes by unique ID and automatic selection of the first discovered probe. - + If, after application of the _unique_id_ and _return_first_ parameters, there are still multiple debug probes to choose from, the user is presented with a simple command-line UI to select a probe (or abort the selection process). - + @param blocking Specifies whether to wait for a probe to be connected if there are no available probes. @param return_first If more than one probe is connected, a _return_first_ of True will select the first discovered probe rather than present a selection choice to the user. @param unique_id String to match against probes' unique IDs using a contains match. If the default of None is passed, then all available probes are matched. - + @return Either None or a DebugProbe instance. """ # Get all matching probes, sorted by name. @@ -191,17 +191,17 @@ def choose_probe(blocking=True, return_first=False, unique_id=None): def session_with_chosen_probe(blocking=True, return_first=False, unique_id=None, auto_open=True, options=None, **kwargs): """! @brief Create a session with a probe possibly chosen by the user. - + This method provides an easy to use command line interface for selecting one of the connected debug probes, then creating and opening a Session instance. It has several parameters that control filtering of probes by unique ID and automatic selection of the first discovered probe. In addition, you can pass session options to the Session either with the _options_ parameter or directly as keyword arguments. - + If, after application of the _unique_id_ and _return_first_ parameter, there are still multiple debug probes to choose from, the user is presented with a simple command-line UI to select a probe (or abort the selection process). - + Most commonly, this method will be used directly in a **with** statement: @code with ConnectHelper.session_with_chosen_probe() as session: @@ -216,7 +216,7 @@ def session_with_chosen_probe(blocking=True, return_first=False, unique_id=None, with session: # the session is open and ready for use @endcode - + @param blocking Specifies whether to wait for a probe to be connected if there are no available probes. @param return_first If more than one probe is connected, a _return_first_ of True will select @@ -227,7 +227,7 @@ def session_with_chosen_probe(blocking=True, return_first=False, unique_id=None, context manager. @param options Dictionary of session options. @param kwargs Session options passed as keyword arguments. - + @return Either None or a Session instance. """ # Choose a probe. diff --git a/pyocd/core/memory_interface.py b/pyocd/core/memory_interface.py index 0bd655b77..f5251cf27 100644 --- a/pyocd/core/memory_interface.py +++ b/pyocd/core/memory_interface.py @@ -21,13 +21,13 @@ class MemoryInterface(object): def write_memory(self, addr, data, transfer_size=32): """! @brief Write a single memory location. - + By default the transfer size is a word.""" raise NotImplementedError() - + def read_memory(self, addr, transfer_size=32, now=True): """! @brief Read a memory location. - + By default, a word will be read.""" raise NotImplementedError() @@ -38,11 +38,11 @@ def write_memory_block32(self, addr, data): def read_memory_block32(self, addr, size): """! @brief Read an aligned block of 32-bit words.""" raise NotImplementedError() - + def write64(self, addr, value): """! @brief Shorthand to write a 64-bit word.""" self.write_memory(addr, value, 64) - + def write32(self, addr, value): """! @brief Shorthand to write a 32-bit word.""" self.write_memory(addr, value, 32) diff --git a/pyocd/core/memory_map.py b/pyocd/core/memory_map.py index c8c53c82f..dbbcfb564 100644 --- a/pyocd/core/memory_map.py +++ b/pyocd/core/memory_map.py @@ -45,7 +45,7 @@ def check_range(start, end=None, length=None, range=None): @total_ordering class MemoryRangeBase(object): """! @brief Base class for a range of memory. - + This base class provides the basic address range support and methods to test for containment or intersection with another range. """ @@ -87,13 +87,13 @@ def intersects_range(self, start, end=None, length=None, range=None): start, end = check_range(start, end, length, range) return (start <= self.start and end >= self.start) or (start <= self.end and end >= self.end) \ or (start >= self.start and end <= self.end) - + def __hash__(self): return hash("%08x%08x%08x" % (self.start, self.end, self.length)) - + def __eq__(self, other): return self.start == other.start and self.length == other.length - + def __lt__(self, other): return self.start < other.start or (self.start == other.start and self.length == other.length) @@ -106,13 +106,13 @@ def __init__(self, start=0, end=0, length=None, region=None): @property def region(self): return self._region - + def __hash__(self): h = super(MemoryRange, self).__hash__() if self.region is not None: h ^= hash(self.region) return h - + def __eq__(self, other): return self.start == other.start and self.length == other.length and self.region == other.region @@ -122,9 +122,9 @@ def __repr__(self): class MemoryRegion(MemoryRangeBase): """! @brief One contiguous range of memory. - + Memory regions have attributes accessible via the normal dot syntax. - + - `name`: Name of the region, which defaults to the region type in lowercase. - `access`: Composition of r, w, x, s. - `alias`: If set, this is the name of another region that of which this region is an alias. @@ -141,7 +141,7 @@ class MemoryRegion(MemoryRangeBase): as memory-mapped OTP or configuration flash. - `is_testable`: Whether pyOCD should consider the region in its functional tests. - `is_external`: If true, the region is backed by an external memory device such as SDRAM or QSPI. - + Several attributes are available whose values are computed from other attributes. These should not be set when creating the region. - `is_ram` @@ -154,7 +154,7 @@ class MemoryRegion(MemoryRangeBase): - `is_secure` - `is_nonsecure` """ - + ## Default attribute values for all memory region types. DEFAULT_ATTRS = { 'name': lambda r: r.type.name.lower(), @@ -166,7 +166,7 @@ class MemoryRegion(MemoryRangeBase): 'is_cacheable': True, 'invalidate_cache_on_run': True, 'is_testable': True, - 'is_external': False, + 'is_external': False, 'is_ram': lambda r: r.type == MemoryType.RAM, 'is_rom': lambda r: r.type == MemoryType.ROM, 'is_flash': lambda r: r.type == MemoryType.FLASH, @@ -177,12 +177,12 @@ class MemoryRegion(MemoryRangeBase): 'is_secure': lambda r: 's' in r.access, 'is_nonsecure': lambda r: not r.is_secure, } - + def __init__(self, type=MemoryType.OTHER, start=0, end=0, length=None, **attrs): """! Memory region constructor. - + Memory regions are required to have non-zero lengths, unlike memory ranges. - + Some common optional region attributes passed as keyword arguments: - name: If a name is not provided, the name is set to the region type in lowercase. - access: composition of r, w, x, s @@ -197,7 +197,7 @@ def __init__(self, type=MemoryType.OTHER, start=0, end=0, length=None, **attrs): self._map = None self._type = type self._attributes = attrs - + # Assign default values to any attributes missing from kw args. for k, v in self.DEFAULT_ATTRS.items(): if k not in self._attributes: @@ -210,15 +210,15 @@ def map(self): @map.setter def map(self, the_map): self._map = the_map - + @property def type(self): return self._type - + @property def attributes(self): return self._attributes - + @property def alias(self): # Resolve alias reference. @@ -231,7 +231,7 @@ def alias(self): return referent else: return alias_value - + def __getattr__(self, name): try: v = self._attributes[name] @@ -245,7 +245,7 @@ def __getattr__(self, name): def _get_attributes_for_clone(self): """@brief Return a dict containing all the attributes of this region. - + This method must be overridden by subclasses to include in the returned dict any instance attributes not present in the `_attributes` attribute. """ @@ -255,7 +255,7 @@ def clone_with_changes(self, **modified_attrs): """@brief Create a duplicate this region with some of its attributes modified.""" new_attrs = self._get_attributes_for_clone() new_attrs.update(modified_attrs) - + return self.__class__(**new_attrs) def __copy__(self): @@ -263,7 +263,7 @@ def __copy__(self): # Need to redefine __hash__ since we redefine __eq__. __hash__ = MemoryRangeBase.__hash__ - + def __eq__(self, other): # Include type and attributes in equality comparison. return self.start == other.start and self.length == other.length \ @@ -299,7 +299,7 @@ class DefaultFlashWeights: class FlashRegion(MemoryRegion): """! @brief Contiguous region of flash memory. - + Flash regions have a number of attributes in addition to those available in all region types. - `blocksize`: Erase sector size in bytes. - `sector_size`: Alias for `blocksize`. @@ -317,7 +317,7 @@ class FlashRegion(MemoryRegion): - `flash`: After connection, this attribute holds the instance of `flash_class` for this region. - `are_erased_sectors_readable`: Specifies whether the flash controller allows reads of erased sectors, or will fault such reads. Default is True. - + `sector_size` and `blocksize` are aliases of each other. If one is set via the constructor, the other will have the same value. """ @@ -347,13 +347,13 @@ def __init__(self, start=0, end=0, length=None, **attrs): self._algo = attrs.get('algo', None) self._flm = attrs.get('flm', None) self._flash = None - + if ('flash_class' in attrs) and (attrs['flash_class'] is not None): self._flash_class = attrs['flash_class'] assert issubclass(self._flash_class, Flash) else: self._flash_class = Flash - + # Remove writable region attributes from attributes dict so there is only one copy. try: del self._attributes['algo'] @@ -367,39 +367,39 @@ def __init__(self, start=0, end=0, length=None, **attrs): del self._attributes['flm'] except KeyError: pass - + @property def algo(self): return self._algo - + @algo.setter def algo(self, flash_algo): self._algo = flash_algo - + @property def flm(self): return self._flm - + @flm.setter def flm(self, flm_path): self._flm = flm_path - + @property def flash_class(self): return self._flash_class - + @flash_class.setter def flash_class(self, klass): self._flash_class = klass - + @property def flash(self): return self._flash - + @flash.setter def flash(self, flash_instance): self._flash = flash_instance - + def is_data_erased(self, d): """! @brief Helper method to check if a block of data is erased. @param self @@ -425,7 +425,7 @@ def _get_attributes_for_clone(self): # Need to redefine __hash__ since we redefine __eq__. __hash__ = MemoryRegion.__hash__ - + def __eq__(self, other): # Include flash algo, class, and flm in equality test. return super(FlashRegion, self).__eq__(other) and self.algo == other.algo and \ @@ -451,7 +451,7 @@ def __init__(self, start=0, end=0, length=None, **attrs): attrs['type'] = MemoryType.DEVICE super(DeviceRegion, self).__init__(start=start, end=end, length=length, **attrs) -## @brief Map from memory type to class. +## @brief Map from memory type to class. MEMORY_TYPE_CLASS_MAP = { MemoryType.OTHER: MemoryRegion, MemoryType.RAM: RamRegion, @@ -462,10 +462,10 @@ def __init__(self, start=0, end=0, length=None, **attrs): class MemoryMap(collections.abc.Sequence): """! @brief Memory map consisting of memory regions. - + The normal way to create a memory map is to instantiate regions directly in the call to the constructor. - + @code map = MemoryMap( FlashRegion( start=0, @@ -473,25 +473,25 @@ class MemoryMap(collections.abc.Sequence): blocksize=0x400, is_boot_memory=True, algo=FLASH_ALGO), - + RamRegion( start=0x10000000, length=0x1000) ) @endcode - + The memory map can also be modified by adding and removing regions at runtime. Regardless of the order regions are added, the list of regions contained in the memory map is always maintained sorted by start address. MemoryMap objects implement the collections.abc.Sequence interface. """ - + def __init__(self, *more_regions): """! @brief Constructor. - + All parameters passed to the constructor are assumed to be MemoryRegion instances, and are passed to add_regions(). The resulting memory map is sorted by region start address. - + @param self @param more_regions Zero or more MemoryRegion objects passed as separate parameters. """ @@ -501,7 +501,7 @@ def __init__(self, *more_regions): @property def regions(self): """! @brief List of all memory regions. - + Regions in the returned list are sorted by start address. """ return self._regions @@ -513,7 +513,7 @@ def region_count(self): def clone(self): """! @brief Create a duplicate of the memory map. - + The duplicate memory map contains shallow copies of each of the regions. This is intended to be used so that `Target` objects in different but simultaneously live sessions have independant copies of the target's memory map. @@ -522,13 +522,13 @@ def clone(self): def add_regions(self, *more_regions): """! @brief Add multiple regions to the memory map. - + There are two options for passing the list of regions to be added. The first is to pass each region as a separate parameter, similar to how the constructor is intended to be used. The second option is to pass either a list or tuple of regions. - + The region list is kept sorted. If no regions are provided, the call is a no-op. - + @param self @param more_regions Either a single tuple or list, or one or more MemoryRegion objects passed as separate parameters. @@ -538,15 +538,15 @@ def add_regions(self, *more_regions): regionsToAdd = more_regions[0] else: regionsToAdd = more_regions - + for newRegion in regionsToAdd: self.add_region(newRegion) def add_region(self, new_region): """! @brief Add one new region to the map. - + The region list is resorted after adding the provided region. - + @param self @param new_region An instance of MemoryRegion to add. A new instance that is a copy of this argument may be added to the memory map in order to guarantee unique region names. @@ -555,11 +555,11 @@ def add_region(self, new_region): existing_names = [r.name for r in self._regions if r] if new_region.name and (new_region.name in existing_names): new_region = new_region.clone_with_changes(name=uniquify_name(new_region.name, existing_names)) - + new_region.map = self self._regions.append(new_region) self._regions.sort() - + def remove_region(self, region): """! @brief Removes a memory region from the map. @param self @@ -572,7 +572,7 @@ def remove_region(self, region): def get_boot_memory(self): """! @brief Returns the first region marked as boot memory. - + @param self @return MemoryRegion or None. """ @@ -583,7 +583,7 @@ def get_boot_memory(self): def get_region_for_address(self, address): """! @brief Returns the first region containing the given address. - + @param self @param address An integer target address. @return MemoryRegion or None. @@ -595,7 +595,7 @@ def get_region_for_address(self, address): def is_valid_address(self, address): """! @brief Determines whether an address is contained by any region. - + @param self @param address An integer target address. @return Boolean indicating whether the address was contained by a region. @@ -604,7 +604,7 @@ def is_valid_address(self, address): def get_contained_regions(self, start, end=None, length=None, range=None): """! @brief Get all regions fully contained by an address range. - + @param self @param start The start address or a MemoryRange object. @param end Optional end address. @@ -618,7 +618,7 @@ def get_contained_regions(self, start, end=None, length=None, range=None): def get_intersecting_regions(self, start, end=None, length=None, range=None): """! @brief Get all regions intersected by an address range. - + @param self @param start The start address or a MemoryRange object. @param end Optional end address. @@ -629,12 +629,12 @@ def get_intersecting_regions(self, start, end=None, length=None, range=None): """ start, end = check_range(start, end, length, range) return [r for r in self._regions if r.intersects_range(start, end)] - + def iter_matching_regions(self, **kwargs): """! @brief Iterate over regions matching given criteria. - + Useful attributes to match on include 'type', 'name', 'is_default', and others. - + @param self @param kwargs Values for region attributes that must match. """ @@ -651,15 +651,15 @@ def iter_matching_regions(self, **kwargs): mismatch = True if mismatch: continue - + yield r - + def get_first_matching_region(self, **kwargs): """! @brief Get the first region matching a given memory type. - + The region of given type with the lowest start address is returned. If there are no regions with that type, None is returned instead. - + @param self @param type One of the MemoryType enums. @return A MemoryRegion object or None. @@ -667,14 +667,14 @@ def get_first_matching_region(self, **kwargs): for r in self.iter_matching_regions(**kwargs): return r return None - + def get_default_region_of_type(self, type): """! @brief Get the default region of a given memory type. - + If there are multiple regions of the specified type marked as default, then the one with the lowest start address will be returned. None is returned if there are no default regions of the type. - + @param self @param type One of the MemoryType enums. @return A MemoryRegion object or None. @@ -687,22 +687,22 @@ def __eq__(self, other): def __iter__(self): """! @brief Enable iteration over the memory map.""" return iter(self._regions) - + def __reversed__(self): """! @brief Reverse iteration over the memory map.""" return reversed(self._regions) - + def __getitem__(self, key): """! @brief Return a region indexed by name or number.""" if isinstance(key, str): return self.get_first_matching_region(name=key) else: return self._regions[key] - + def __len__(self): """! @brief Return the number of regions.""" return len(self._regions) - + def __contains__(self, key): if isinstance(key, int): return self.is_valid_address(key) diff --git a/pyocd/core/options_manager.py b/pyocd/core/options_manager.py index 9b84e6f6b..162f76891 100644 --- a/pyocd/core/options_manager.py +++ b/pyocd/core/options_manager.py @@ -37,12 +37,12 @@ class OptionsManager(Notifier): """! @brief Handles session option management for a session. - + The options manager supports multiple layers of option priority. When an option's value is accessed, the highest priority layer that contains a value for the option is used. This design makes it easy to load options from multiple sources. The default value specified for an option in the OPTIONS_INFO dictionary provides a layer with an infinitely low priority. - + Users can subscribe to notifications for changes to option values by calling the subscribe() method. The notification events are the option names themselves. The source for notifications is always the options manager instance. The notification data is an instance of OptionChangeInfo @@ -55,10 +55,10 @@ def __init__(self): """ super(OptionsManager, self).__init__() self._layers = [] - + def _update_layers(self, new_options, update_operation): """! @brief Internal method to add a new layer dictionary. - + @param self @param new_options Dictionary of option values. @param update_operation Callable to add the layer. Must accept a single parameter, which is @@ -74,23 +74,23 @@ def _update_layers(self, new_options, update_operation): def add_front(self, new_options): """! @brief Add a new highest priority layer of option values. - + @param self @param new_options Dictionary of option values. """ self._update_layers(new_options, partial(self._layers.insert, 0)) - + def add_back(self, new_options): """! @brief Add a new lowest priority layer of option values. - + @param self @param new_options Dictionary of option values. """ self._update_layers(new_options, self._layers.append) - + def _convert_options(self, new_options): """! @brief Prepare a dictionary of session options for use by the manager. - + 1. Strip dictionary entries with a value of None. 2. Replace double-underscores ("__") with a dot ("."). 3. Convert option names to all-lowercase. @@ -106,7 +106,7 @@ def _convert_options(self, new_options): def is_set(self, key): """! @brief Return whether a value is set for the specified option. - + This method returns True as long as any layer has a value set for the option, even if the value is the same as the default value. If the option is not set in any layer, then False is returned regardless of whether the default value is None. @@ -129,18 +129,18 @@ def get(self, key): if key in layer: return layer[key] return self.get_default(key) - + def set(self, key, value): """! @brief Set an option in the current highest priority layer.""" self.update({key: value}) - + def update(self, new_options): """! @brief Set multiple options in the current highest priority layer.""" filtered_options = self._convert_options(new_options) previous_values = {name: self.get(name) for name in filtered_options.keys()} self._layers[0].update(filtered_options) self._notify_changes(previous_values, filtered_options) - + def _notify_changes(self, previous, options): """! @brief Send notifications that the specified options have changed.""" for name, new_value in options.items(): @@ -151,11 +151,11 @@ def _notify_changes(self, previous, options): def __contains__(self, key): """! @brief Returns whether the named option has a non-default value.""" return self.is_set(key) - + def __getitem__(self, key): """! @brief Return the highest priority value for the option, or its default.""" return self.get(key) - + def __setitem__(self, key, value): """! @brief Set an option in the current highest priority layer.""" self.set(key, value) diff --git a/pyocd/core/plugin.py b/pyocd/core/plugin.py index 7a9b993c8..5ec856684 100644 --- a/pyocd/core/plugin.py +++ b/pyocd/core/plugin.py @@ -24,50 +24,50 @@ class Plugin(object): """! @brief Class that describes a plugin for pyOCD. - + Each plugin vends a subclass of Plugin that describes itself and provides meta-actions. - + An instance is created and queried for whether the plugin can be loaded by calling should_load(). If this method returns True, then load() is called. The default implementation will always load, and does nothing when loaded. """ - + def should_load(self): """! @brief Whether the plugin should be loaded.""" return True - + def load(self): """! @brief Load the plugin and return the plugin implementation. - + This method can perform any actions required to load the plugin beyond simply returning the implementation. - + @return An object appropriate for the plugin type, which normally would be a class object. """ pass - + @property def options(self): """! @brief A list of options added by the plugin. @return List of @ref pyocd.core.options.OptionInfo "OptionInfo" objects. """ return [] - + @property def version(self): """! @brief Current version of the plugin. - + The default implementation returns pyOCD's version. - + @return String with the plugin's version, such as '2.13.4'. """ return pyocd_version - + @property def name(self): """! @brief Name of the plugin.""" raise NotImplementedError() - + @property def description(self): """! @brief Short description of the plugin.""" @@ -75,10 +75,10 @@ def description(self): def load_plugin_classes_of_type(plugin_group, plugin_dict, base_class): """! @brief Helper method to load plugins. - + Plugins are expected to return an implementation class from their Plugin.load() method. This class must be derived from `base_class`. - + @param plugin_group String of the plugin group, e.g. 'pyocd.probe'. @param plugin_dict Dictionary to fill with loaded plugin classes. @param base_class The required superclass for plugin implementation classes. @@ -90,7 +90,7 @@ class must be derived from `base_class`. LOG.warning("Plugin '%s' of type '%s' has an invalid plugin object", entry_point.name, plugin_group) continue - + # Ask the plugin whether it should be loaded. if plugin.should_load(): # Load the plugin and stuff the implementation class it gives @@ -100,7 +100,7 @@ class must be derived from `base_class`. plugin.name, plugin_group) continue plugin_dict[plugin.name] = impl_class - + # Add any plugin options. add_option_set(plugin.options) - + diff --git a/pyocd/core/session.py b/pyocd/core/session.py index d4f4e9d4f..c9e4531e6 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -45,33 +45,33 @@ class Session(Notifier): """! @brief Top-level object for a debug session. - + This class represents a debug session with a single debug probe. It is the root of the object graph, where it owns the debug probe and the board objects. - + Another important function of this class is that it contains a dictionary of session-scope options. These would normally be passed in from the command line. Options can also be loaded from a config file. Precedence for session options: - + 1. Keyword arguments to constructor. 2. _options_ parameter to constructor. 3. Probe-specific options from a config file. 4. General options from a config file. 5. _option_defaults_ parameter to constructor. - + The session also tracks several other objects: - @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances created for any cores. - @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer". - The user script proxy. - + See the @ref pyocd.core.helpers.ConnectHelper "ConnectHelper" class for several methods that make it easy to create new sessions, with or without user interaction in the case of multiple - available debug probes. A common pattern is to combine @ref + available debug probes. A common pattern is to combine @ref pyocd.core.helpers.ConnectHelper.session_with_chosen_probe() "ConnectHelper.session_with_chosen_probe()" and a **with** block. - + A Session instance can be used as a context manager. The session will, by default, be automatically opened when the context is entered. And, of course, it will be closed when the **with** block is exited (which is harmless if the session was never opened). If you wish to @@ -79,18 +79,18 @@ class Session(Notifier): exception is raised while opening a session inside a **with** statement, the session will be closed for you to undo any partial initialisation. """ - + ## @brief Weak reference to the most recently created session. _current_session = None - + @classmethod def get_current(cls): """! @brief Return the most recently created Session instance or a default Session. - + By default this method will return the most recently created Session object that is still alive. If no live session exists, a new default session will be created and returned. That at least provides access to the user's config file(s). - + Used primarily so code that doesn't have a session reference can access session options. This method should only be used to access options that are unlikely to differ between sessions, or for debug or other purposes. @@ -102,18 +102,18 @@ def get_current(cls): def __init__(self, probe, auto_open=True, options=None, option_defaults=None, **kwargs): """! @brief Session constructor. - + Creates a new session using the provided debug probe. Session options are merged from the _options_ parameter and any keyword arguments. Normally a board instance is created that can either be a generic board or a board associated with the debug probe. - + Note that the 'project_dir' and 'config' options must be set in either keyword arguments or the _options_ parameter. - + Passing in a _probe_ that is None is allowed. This is useful to create a session that operates only as a container for session options. In this case, the board instance is not created, so the #board attribute will be None. Such a Session cannot be opened. - + @param self @param probe The @ref pyocd.probe.debug_probe. "DebugProbe" instance. May be None. @param auto_open Whether to automatically open the session when used as a context manager. @@ -124,9 +124,9 @@ def __init__(self, probe, auto_open=True, options=None, option_defaults=None, ** @param kwargs Session options passed as keyword arguments. """ super(Session, self).__init__() - + Session._current_session = weakref.ref(self) - + self._probe = probe self._closed = True self._inited = False @@ -137,22 +137,22 @@ def __init__(self, probe, auto_open=True, options=None, option_defaults=None, ** self._options = OptionsManager() self._gdbservers = {} self._probeserver = None - + # Set this session on the probe, if we were given a probe. if probe is not None: probe.session = self - + # Update options. self._options.add_front(kwargs) self._options.add_back(options) - + # Init project directory. if self.options.get('project_dir') is None: self._project_dir = os.environ.get('PYOCD_PROJECT_DIR') or os.getcwd() else: self._project_dir = os.path.abspath(os.path.expanduser(self.options.get('project_dir'))) LOG.debug("Project directory: %s", self.project_dir) - + # Apply common configuration settings from the config file. config = self._get_config() probesConfig = config.pop('probes', None) @@ -164,29 +164,29 @@ def __init__(self, probe, auto_open=True, options=None, option_defaults=None, ** if str(uid).lower() in probe.unique_id.lower(): LOG.info("Using config settings for probe %s" % (probe.unique_id)) self._options.add_back(settings) - + # Merge in lowest priority options. self._options.add_back(option_defaults) - + # Logging config. self._configure_logging() - + # Bail early if we weren't provided a probe. if probe is None: self._board = None return - + # Load the user script. self._load_user_script() - + # Ask the probe if it has an associated board, and if not then we create a generic one. self._board = probe.create_associated_board() or Board(self) - + def _get_config(self): # Load config file if one was provided via options, and no_config option was not set. if not self.options.get('no_config'): configPath = self.find_user_file('config_file', _CONFIG_FILE_NAMES) - + if configPath is not None: try: with open(configPath, 'r') as configFile: @@ -202,12 +202,12 @@ def _get_config(self): return config except IOError as err: LOG.warning("Error attempting to access config file '%s': %s", configPath, err) - + return {} - + def find_user_file(self, option_name, filename_list): """! @brief Search the project directory for a file. - + @retval None No matching file was found. @retval string An absolute path to the requested file. """ @@ -215,7 +215,7 @@ def find_user_file(self, option_name, filename_list): filePath = self.options.get(option_name) else: filePath = None - + # Look for default filenames if a path wasn't provided. if filePath is None: for filename in filename_list: @@ -229,18 +229,18 @@ def find_user_file(self, option_name, filename_list): filePath = os.path.expanduser(filePath) if not os.path.isabs(filePath): filePath = os.path.join(self.project_dir, filePath) - + return filePath - + def _configure_logging(self): """! @brief Load a logging config dict or file.""" # Get logging config that could have been loaded from the config file. config = self.options.get('logging') - + # Allow logging setting to refer to another file. if isinstance(config, str): loggingConfigPath = self.find_user_file(None, [config]) - + if loggingConfigPath is not None: try: with open(loggingConfigPath, 'r') as configFile: @@ -260,75 +260,75 @@ def _configure_logging(self): # Remove an empty 'loggers' key. if ('loggers' in config) and (config['loggers'] is None): del config['loggers'] - + try: logging.config.dictConfig(config) except (ValueError, TypeError, AttributeError, ImportError) as err: LOG.warning("Error applying logging configuration: %s", err) - + @property def is_open(self): """! @brief Boolean of whether the session has been opened.""" return self._inited and not self._closed - + @property def probe(self): """! @brief The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" instance.""" return self._probe - + @property def board(self): """! @brief The @ref pyocd.board.board.Board "Board" object.""" return self._board - + @property def target(self): """! @brief The @ref pyocd.core.target.soc_target "SoCTarget" object representing the SoC. - + This is the @ref pyocd.core.target.soc_target "SoCTarget" instance owned by the board. """ return self.board.target - + @property def options(self): """! @brief The @ref pyocd.core.options_manager.OptionsManager "OptionsManager" object.""" return self._options - + @property def project_dir(self): """! @brief Path to the project directory.""" return self._project_dir - + @property def delegate(self): """! @brief An optional delegate object for customizing behaviour.""" return self._delegate - + @delegate.setter def delegate(self, new_delegate): """! @brief Setter for the `delegate` property.""" self._delegate = new_delegate - + @property def user_script_proxy(self): """! @brief The UserScriptDelegateProxy object for a loaded user script.""" return self._user_script_proxy - + @property def gdbservers(self): """! @brief Dictionary of core numbers to @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances.""" return self._gdbservers - + @property def probeserver(self): """! @brief A @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer" instance.""" return self._probeserver - + @probeserver.setter def probeserver(self, server): """! @brief Setter for the `probeserver` property.""" self._probeserver = server - + @property def log_tracebacks(self): """! @brief Quick access to debug.traceback option since it is widely used.""" @@ -347,10 +347,10 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.close() return False - + def _init_user_script_namespace(self, user_script_path): """! @brief Create the namespace dict used for user scripts. - + This initial namespace has only those objects that are available very early in the session init process. For instance, the Target instance isn't available yet. The _update_user_script_namespace() method is used to add such objects to the namespace @@ -392,7 +392,7 @@ def _init_user_script_namespace(self, user_script_path): 'options': self.options, 'LOG': logging.getLogger('pyocd.user_script'), } - + def _update_user_script_namespace(self): """! @brief Add objects available only after init to the user script namespace.""" if self._user_script_namespace is not None: @@ -403,7 +403,7 @@ def _update_user_script_namespace(self): 'dp': self.target.dp, 'aps': self.target.aps, }) - + def _load_user_script(self): scriptPath = self.find_user_file('user_script', _USER_SCRIPT_NAMES) @@ -413,16 +413,16 @@ def _load_user_script(self): with open(scriptPath, 'r') as scriptFile: LOG.debug("Loading user script: %s", scriptPath) scriptSource = scriptFile.read() - + self._init_user_script_namespace(scriptPath) - + scriptCode = compile(scriptSource, scriptPath, 'exec') # Executing the code will create definitions in the namespace for any # functions or classes. A single namespace is shared for both globals and # locals so that script-level definitions are available within the # script functions. exec(scriptCode, self._user_script_namespace, self._user_script_namespace) - + # Create the proxy for the user script. It becomes the delegate unless # another delegate was already set. self._user_script_proxy = UserScriptDelegateProxy(self._user_script_namespace) @@ -433,13 +433,13 @@ def _load_user_script(self): def open(self, init_board=True): """! @brief Open the session. - + This method does everything necessary to begin a debug session. It first loads the user script, if there is one. The user script will be available via the _user_script_proxy_ property. Then it opens the debug probe and sets the clock rate from the `frequency` user option. Finally, it inits the board (which will init the target, which performs the full target init sequence). - + @param self @param init_board This parameter lets you prevent the board from being inited, which can be useful in board bringup situations. It's also used by pyocd commander's "no init" @@ -448,10 +448,10 @@ def open(self, init_board=True): if not self._inited: assert self._probe is not None, "Cannot open a session without a probe." assert self._board is not None, "Must have a board to open a session." - + # Add in the full set of objects for the user script. self._update_user_script_namespace() - + self._probe.open() self._closed = False self._probe.set_clock(self.options.get('frequency')) @@ -461,7 +461,7 @@ def open(self, init_board=True): def close(self): """! @brief Close the session. - + Uninits the board and disconnects then closes the probe. """ if self._closed: @@ -475,7 +475,7 @@ def close(self): self._inited = False except exceptions.Error: LOG.error("exception during board uninit:", exc_info=self.log_tracebacks) - + if self._probe.is_open: try: self._probe.disconnect() @@ -488,14 +488,14 @@ def close(self): class UserScriptFunctionProxy(object): """! @brief Proxy for user script functions. - - This proxy makes arguments to user script functions optional. + + This proxy makes arguments to user script functions optional. """ def __init__(self, fn): self._fn = fn self._spec = getfullargspec(fn) - + def __call__(self, **kwargs): args = {} for arg in self._spec.args: @@ -509,7 +509,7 @@ class UserScriptDelegateProxy(object): def __init__(self, script_namespace): super(UserScriptDelegateProxy, self).__init__() self._script = script_namespace - + def __getattr__(self, name): if name in self._script: fn = self._script[name] diff --git a/pyocd/core/soc_target.py b/pyocd/core/soc_target.py index 783e70d7b..b61bd4d42 100644 --- a/pyocd/core/soc_target.py +++ b/pyocd/core/soc_target.py @@ -28,17 +28,17 @@ class SoCTarget(Target, GraphNode): """! @brief Represents a microcontroller system-on-chip. - + An instance of this class is the root of the chip-level object graph. It has child nodes for the DP and all cores. As a concrete subclass of Target, it provides methods to control the device, access memory, adjust breakpoints, and so on. - + For single core devices, the SoCTarget has mostly equivalent functionality to the Target object for the core. Multicore devices work differently. This class tracks a "selected core", to which all actions are directed. The selected core can be changed at any time. You may also directly access specific cores and perform operations on them. """ - + VENDOR = "Generic" def __init__(self, session, memory_map=None): @@ -61,7 +61,7 @@ def selected_core(self): if self._selected_core is None: return None return self.cores[self._selected_core] - + @selected_core.setter def selected_core(self, core_number): if core_number not in self.cores: @@ -84,11 +84,11 @@ def elf(self, filename): if self.session.options['cache.read_code_from_elf']: self.cores[core_number].set_target_context( ElfReaderContext(self.cores[core_number].get_target_context(), self._elf)) - + @property def supported_security_states(self): return self.selected_core.supported_security_states - + @property def core_registers(self): return self.selected_core.core_registers @@ -98,28 +98,28 @@ def add_core(self, core): core.set_target_context(CachingDebugContext(core)) self.cores[core.core_number] = core self.add_child(core) - + if self._selected_core is None: self._selected_core = core.core_number def create_init_sequence(self): # Return an empty call sequence. The subclass must override this. return CallSequence() - + def init(self): # If we don't have a delegate installed yet but there is a session delegate, use it. if (self.delegate is None) and (self.session.delegate is not None): self.delegate = self.session.delegate - + # Create and execute the init sequence. seq = self.create_init_sequence() self.call_delegate('will_init_target', target=self, init_sequence=seq) seq.invoke() self.call_delegate('did_init_target', target=self) - + def post_connect_hook(self): """! @brief Hook function called after post_connect init task. - + This hook lets the target subclass configure the target as necessary. """ pass @@ -218,7 +218,7 @@ def reset_and_halt(self, reset_type=None): def get_state(self): return self.selected_core.get_state() - + def get_security_state(self): return self.selected_core.get_security_state() @@ -235,11 +235,11 @@ def get_target_context(self, core=None): if core is None: core = self._selected_core return self.cores[core].get_target_context() - + def trace_start(self): self.call_delegate('trace_start', target=self, mode=0) - + def trace_stop(self): self.call_delegate('trace_stop', target=self, mode=0) - - + + diff --git a/pyocd/core/target.py b/pyocd/core/target.py index 92436213e..98710449c 100644 --- a/pyocd/core/target.py +++ b/pyocd/core/target.py @@ -34,7 +34,7 @@ class State(Enum): SLEEPING = 4 ## Core is locked up. LOCKUP = 5 - + class SecurityState(Enum): """! @brief Security states for a processor with the Security extension.""" ## PE is in the Non-secure state. @@ -81,7 +81,7 @@ class WatchpointType(Enum): class VectorCatch: """! Vector catch option masks. - + These constants can be OR'd together to form any combination of vector catch settings. """ ## Disable vector catch. @@ -145,7 +145,7 @@ class Event(Enum): class RunType(Enum): """! Run type for run notifications. - + An enum of this type is set as the data attribute on PRE_RUN and POST_RUN notifications. """ ## Target is being resumed. @@ -155,7 +155,7 @@ class RunType(Enum): class HaltReason(Enum): """! Halt type for halt notifications. - + An value of this type is returned from Target.get_halt_reason(). It is also used as the data attribute on PRE_HALT and POST_HALT notifications. """ @@ -186,18 +186,18 @@ def __init__(self, session, memory_map=None): @property def session(self): return self._session - + @property def delegate(self): return self._delegate - + @delegate.setter def delegate(self, the_delegate): self._delegate = the_delegate - + def delegate_implements(self, method_name): return (self._delegate is not None) and (hasattr(self._delegate, method_name)) - + def call_delegate(self, method_name, *args, **kwargs): if self.delegate_implements(method_name): return getattr(self._delegate, method_name)(*args, **kwargs) @@ -208,11 +208,11 @@ def call_delegate(self, method_name, *args, **kwargs): @property def svd_device(self): return self._svd_device - + @property def supported_security_states(self): raise NotImplementedError() - + @property def core_registers(self): raise NotImplementedError() @@ -288,7 +288,7 @@ def reset_and_halt(self, reset_type=None): def get_state(self): raise NotImplementedError() - + def get_security_state(self): raise NotImplementedError() diff --git a/pyocd/core/target_delegate.py b/pyocd/core/target_delegate.py index 04bfdafbc..64063319d 100644 --- a/pyocd/core/target_delegate.py +++ b/pyocd/core/target_delegate.py @@ -16,14 +16,14 @@ class TargetDelegateInterface(object): """! @brief Abstract class defining the delegate interface for targets. - + Note that delegates don't actually have to derive from this class due to Python's dynamic method dispatching. """ def __init__(self, session): self._session = session - + def will_connect(self, board): """! @brief Pre-init hook for the board. @param self @@ -31,7 +31,7 @@ def will_connect(self, board): @return Ignored. """ pass - + def did_connect(self, board): """! @brief Post-initialization hook for the board. @param self @@ -49,7 +49,7 @@ def will_init_target(self, target, init_sequence): @return Ignored. """ pass - + def did_init_target(self, target): """! @brief Post-initialization hook. @param self @@ -66,7 +66,7 @@ def will_start_debug_core(self, core): @retval "False or None" Continue with normal behaviour. """ pass - + def did_start_debug_core(self, core): """! @brief Post-initialization hook. @param self @@ -83,7 +83,7 @@ def will_stop_debug_core(self, core): @retval "False or None" Continue with normal behaviour. """ pass - + def did_stop_debug_core(self, core): """! @brief Post-cleanup hook for the core. @param self @@ -174,5 +174,5 @@ def trace_stop(self, target, mode): @return Ignored. """ pass - + diff --git a/pyocd/coresight/ap.py b/pyocd/coresight/ap.py index 1c0349962..a0d7aae4f 100644 --- a/pyocd/coresight/ap.py +++ b/pyocd/coresight/ap.py @@ -173,119 +173,119 @@ class APVersion(Enum): @total_ordering class APAddressBase(object): """! @brief Base class for AP addresses. - + An instance of this class has a "nominal address", which is an integer address in terms of how it is typically referenced. For instance, for an APv1, the nominal address is the unshifted APSEL, e.g. 0, 1, 2, and so on. This value is accessible by the _nominal_address_ property. It is also used for hashing and ordering. One intentional side effect of this is that APAddress instances match against the integer value of their nominal address, which is particularly useful when they are keys in a dictionary. - + In addition to the nominal address, there is an abstract _address_ property implemented by the version-specific subclasses. This is the value used by the DP hardware and passed to the DebugPort's read_ap() and write_ap() methods. - + The class also indicates which version of AP is targeted: either APv1 or APv2. The _ap_version_ property reports this version number, though it is also encoded by the subclass. The AP version is coupled with the address because the two are intrinsically connected; the version defines the address format. """ - + def __init__(self, address): """! @brief Constructor accepting the nominal address.""" self._nominal_address = address - + @property def ap_version(self): """! @brief Version of the AP, as an APVersion enum.""" raise NotImplementedError() - + @property def nominal_address(self): """! @brief Integer AP address in the form in which one speaks about it. - + This value is used for comparisons and hashing.""" return self._nominal_address - + @property def address(self): """! @brief Integer AP address used as a base for register accesses. - + This value can be passed to the DebugPort's read_ap() or write_ap() methods. Offsets of registers can be added to this value to create register addresses.""" raise NotImplementedError() - + @property def idr_address(self): """! @brief Address of the IDR register.""" raise NotImplementedError() - + def __hash__(self): return hash(self.nominal_address) - + def __eq__(self, other): return (self.nominal_address == other.nominal_address) \ if isinstance(other, APAddressBase) else (self.nominal_address == other) - + def __lt__(self, other): return (self.nominal_address < other.nominal_address) \ if isinstance(other, APAddressBase) else (self.nominal_address < other) - + def __repr__(self): return "<{}@{:#x} {}>".format(self.__class__.__name__, id(self), str(self)) class APv1Address(APAddressBase): """! @brief Represents the address for an APv1. - + The nominal address is the 8-bit APSEL value. This is written into the top byte of the DP SELECT register to select the AP to communicate with. """ - + @property def ap_version(self): """! @brief APVersion.APv1.""" return APVersion.APv1 - + @property def apsel(self): """! @brief Alias for the _nominal_address_ property.""" return self._nominal_address - + @property def address(self): return self.apsel << APSEL_SHIFT - + @property def idr_address(self): """! @brief Address of the IDR register.""" return AP_IDR - + def __str__(self): return "#%d" % self.apsel class APv2Address(APAddressBase): """! @brief Represents the address for an APv2. - + ADIv6 uses an APB bus to communicate with APv2 instances. The nominal address is simply the base address of the APB slave. The APB bus address width is variable from 12-52 bits in 8-bit steps. This address is written the DP SELECT and possibly SELECT1 (for greater than 32 bit addresses) registers to choose the AP to communicate with. """ - + @property def ap_version(self): """! @brief Returns APVersion.APv2.""" return APVersion.APv2 - + @property def address(self): return self._nominal_address - + @property def idr_address(self): """! @brief Address of the IDR register.""" return APv2_IDR - + def __str__(self): return "@0x%x" % self.address @@ -295,28 +295,28 @@ class AccessPort(object): @staticmethod def probe(dp, ap_num): """! @brief Determine if an AP exists with the given AP number. - + Only applicable for ADIv5. - + @param dp DebugPort instance. @param ap_num The AP number (APSEL) to probe. @return Boolean indicating if a valid AP exists with APSEL=ap_num. """ idr = dp.read_ap((ap_num << APSEL_SHIFT) | AP_IDR) return idr != 0 - + @staticmethod def create(dp, ap_address, cmpid=None): """! @brief Create a new AP object. - + Determines the type of the AP by examining the IDR value and creates a new AP object of the appropriate class. See #AP_TYPE_MAP for the mapping of IDR fields to class. - + @param dp DebugPort instance. @param ap_address An instance of either APv1Address or APv2Address. @return An AccessPort subclass instance. - + @exception TargetError Raised if there is not a valid AP for the ap_num. """ # Attempt to read the IDR for this APSEL. If we get a zero back then there is @@ -325,7 +325,7 @@ def create(dp, ap_address, cmpid=None): idr = dp.read_ap(ap_address.address + ap_address.idr_address) if idr == 0: raise exceptions.TargetError("Invalid AP address (%s)" % ap_address) - + # Extract IDR fields used for lookup. designer = (idr & AP_IDR_JEP106_MASK) >> AP_IDR_JEP106_SHIFT apClass = (idr & AP_IDR_CLASS_MASK) >> AP_IDR_CLASS_SHIFT @@ -345,11 +345,11 @@ def create(dp, ap_address, cmpid=None): name = None klass = AccessPort flags = 0 - + ap = klass(dp, ap_address, idr, name, flags, cmpid) ap.init() return ap - + def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): """! @brief AP constructor. @param self @@ -371,11 +371,11 @@ def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): self.core = None self._flags = flags self._cmpid = cmpid - + @property def short_description(self): return self.type_name + str(self.address) - + @property def ap_version(self): """! @brief The AP's major version determined by ADI version. @@ -389,10 +389,10 @@ def init(self): # Read IDR if it wasn't given to us in the ctor. if self.idr is None: self.idr = self.read_reg(self.address.idr_address) - + self.variant = (self.idr & AP_IDR_VARIANT_MASK) >> AP_IDR_VARIANT_SHIFT self.revision = (self.idr & AP_IDR_REVISION_MASK) >> AP_IDR_REVISION_SHIFT - + # Get the type name for this AP. self.ap_class = (self.idr & AP_IDR_CLASS_MASK) >> AP_IDR_CLASS_SHIFT self.ap_type = self.idr & AP_IDR_TYPE_MASK @@ -402,7 +402,7 @@ def init(self): desc = "proprietary" LOG.info("%s IDR = 0x%08x (%s)", self.short_description, self.idr, desc) - + def find_components(self): """! @brief Find CoreSight components attached to this AP.""" pass @@ -414,19 +414,19 @@ def read_reg(self, addr, now=True): @locked def write_reg(self, addr, data): self.dp.write_ap(self.address.address + addr, data) - + def lock(self): """! @brief Lock the AP from access by other threads.""" self.dp.probe.lock() - + def unlock(self): """! @brief Unlock the AP.""" self.dp.probe.unlock() - + @contextmanager def locked(self): """! @brief Context manager for locking the AP using a with statement. - + All public methods of AccessPort and its subclasses are automatically locked, so manual locking usually is not necessary unless you need to hold the lock across multiple AP accesses. @@ -434,19 +434,19 @@ def locked(self): self.lock() yield self.unlock() - + def __repr__(self): return "<{}@{:x} {} idr={:08x} rom={:08x}>".format( self.__class__.__name__, id(self), self.short_description, self.idr, self.rom_addr) class MEM_AP(AccessPort, memory_interface.MemoryInterface): """! @brief MEM-AP component. - + This class supports MEM-AP v1 and v2. - + The bits of HPROT have the following meaning. Not all bits are implemented in all MEM-APs. AHB-Lite only implements HPROT[3:0]. - + HPROT[0] = 1 data access, 0 instr fetch
HPROT[1] = 1 priviledge, 0 user
HPROT[2] = 1 bufferable, 0 non bufferable
@@ -454,7 +454,7 @@ class MEM_AP(AccessPort, memory_interface.MemoryInterface): HPROT[4] = 1 lookupincache, 0 no cache
HPROT[5] = 1 allocate in cache, 0 no allocate in cache
HPROT[6] = 1 shareable, 0 non shareable
- + Extensions not supported: - Large Data Extension - Large Physical Address Extension @@ -463,7 +463,7 @@ class MEM_AP(AccessPort, memory_interface.MemoryInterface): def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): super(MEM_AP, self).__init__(dp, ap_address, idr, name, flags, cmpid) - + # Check AP version and set the offset to the control and status registers. if self.ap_version == APVersion.APv1: self._reg_offset = 0 @@ -471,22 +471,22 @@ def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): self._reg_offset = MEM_APv2_CONTROL_REG_OFFSET else: assert False, "Unrecognized AP version %s" % self.ap_version - + self._impl_hprot = 0 self._impl_hnonsec = 0 - + ## Default HPROT value for CSW. self._hprot = HPROT_DATA | HPROT_PRIVILEGED - + ## Default HNONSEC value for CSW. self._hnonsec = SECURE - + ## Base CSW value to use. self._csw = DEFAULT_CSW_VALUE - + ## Cached current CSW value. self._cached_csw = -1 - + ## Supported transfer sizes. self._transfer_sizes = (32,) @@ -496,16 +496,16 @@ def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): # size supported by all targets. A size smaller than the supported size will decrease # performance due to the extra address writes, but will not create any read/write errors. self.auto_increment_page_size = 0x1000 if (self._flags & AP_4K_WRAP) else 0x400 - + ## Number of DAR registers. self._dar_count = 0 - + ## Mask of addresses. This indicates whether 32-bit or 64-bit addresses are supported. self._address_mask = 0xffffffff - + ## Whether the Large Data extension is supported. self._has_large_data = False - + # Ask the probe for an accelerated memory interface for this AP. If it provides one, # then bind our memory interface APIs to its methods. Otherwise use our standard # memory interface based on AP register accesses. @@ -521,7 +521,7 @@ def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): self.read_memory = self._read_memory self.write_memory_block32 = self._write_memory_block32 self.read_memory_block32 = self._read_memory_block32 - + # Subscribe to reset events. self.dp.session.subscribe(self._reset_did_occur, (Target.Event.PRE_RESET, Target.Event.POST_RESET)) @@ -529,29 +529,29 @@ def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): def supported_transfer_sizes(self): """! @brief Tuple of transfer sizes supported by this AP.""" return self._transfer_sizes - + @property def is_enabled(self): """! @brief Whether any memory transfers are allowed by this AP. - + Memory transfers may be disabled by an input signal to the AP. This is often done when debug security is enabled on the device, to disallow debugger access to internal memory. """ return self.is_enabled_for(Target.SecurityState.NONSECURE) - + def is_enabled_for(self, security_state): """! @brief Checks whether memory transfers are allowed by this AP for the given security state. - + Memory transfers may be disabled by an input signal to the AP. This is often done when debug security is enabled on the device, to disallow debugger access to internal memory. - + @param self The AP instance. @param security_state One of the @ref pyocd.core.target.Target.SecurityState "SecurityState" enums. @return Boolean indicating whether memory transfers can be performed in the requested security state. You may change the security state used for transfers with the hnonsec property and hnonsec_lock() method. """ assert isinstance(security_state, Target.SecurityState) - + # Call to superclass to read CSW. We want to bypass our CSW cache since the enable signal can change # asynchronously. csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW) @@ -566,10 +566,10 @@ def is_enabled_for(self, security_state): @locked def init(self): """! @brief Initialize the MEM-AP. - + This method interrogates the MEM-AP to determine its capabilities, and performs any initial setup that is required. - + It performs these checks: - Check for Long Address extension. - Check for Large Data extension. @@ -584,22 +584,22 @@ def init(self): - (v2 only) Configure the error mode. """ super(MEM_AP, self).init() - + # Read initial CSW. Superclass register access methods are used to avoid the CSW cache. original_csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW) - + def _init_cfg(): """! @brief Read MEM-AP CFG register.""" cfg = self.read_reg(self._reg_offset + MEM_AP_CFG) - + # Check for 64-bit address support. if cfg & MEM_AP_CFG_LA_MASK: self._address_mask = 0xffffffffffffffff - + # Check for Large Data extension. if cfg & MEM_AP_CFG_LD_MASK: self._has_large_data = True - + # Check v2 MEM-AP CFG fields. if self.ap_version == APVersion.APv2: # Set autoinc page size if TARINC is non-zero. Otherwise we've already set the @@ -607,34 +607,34 @@ def _init_cfg(): tarinc = (cfg & MEM_AP_CFG_TARINC_MASK) >> MEM_AP_CFG_TARINC_SHIFT if tarinc != 0: self.auto_increment_page_size = 1 << (9 + tarinc) - + # Determine supported err mode. err = (cfg & MEM_AP_CFG_ERR_MASK) >> MEM_AP_CFG_ERR_SHIFT if err == MEM_AP_CFG_ERR_V1: # Configure the error mode such that errors are passed upstream, but they don't # prevent future transactions. self._csw &= ~(CSW_ERRSTOP | CSW_ERRNPASS) - + # Clear TRR in case we attach to a device with a sticky error already set. self.write_reg(self._reg_offset + MEM_AP_TRR, MEM_AP_TRR_ERR_MASK) - + # Init size of DAR register window. darsize = (cfg & MEM_AP_CFG_DARSIZE_MASK) >> MEM_AP_CFG_DARSIZE_SHIFT self._dar_count = (1 << darsize) // 4 def _init_transfer_sizes(): """! @brief Determine supported transfer sizes. - + If the #AP_ALL_TX_SZ flag is set, then we know a priori that this AP implementation supports 8-, 16- and 32- transfer sizes. If the Large Data extension is implemented, then this flag is ignored. - + Note in ADIv6: "If a MEM-AP implementation does not support the Large Data Extension, but does support various access sizes, it must support word, halfword, and byte accesses." So, if the Large Data extension is present, then we have to individually test each transfer size (aside from the required 32-bit). - + If Large Data is not present, then only one non-32-bit transfer size needs to be tested to determine if the AP supports both 8- and 16-bit transfers in addition to the required 32-bit. """ @@ -643,10 +643,10 @@ def _init_transfer_sizes(): if (self._flags & AP_ALL_TX_SZ) and not self._has_large_data: self._transfer_sizes = (8, 16, 32) return - + def _test_transfer_size(sz): """! @brief Utility to verify whether the MEM-AP supports a given transfer size. - + From ADIv6: If the CSW.Size field is written with a value corresponding to a size that is not supported, or with a reserved value: A read of the field returns a value corresponding to a supported @@ -654,45 +654,45 @@ def _test_transfer_size(sz): """ # Write CSW_SIZE to select requested transfer size. AccessPort.write_reg(self, self._reg_offset + MEM_AP_CSW, original_csw & ~CSW_SIZE | sz) - + # Read back CSW and see if SIZE matches what we wrote. csw_cb = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW, now=False) - + return lambda: (csw_cb() & CSW_SIZE) == sz - + # Thus if LD ext is not present, we only need to test one size. if self._has_large_data: # Need to scan all sizes except 32-bit, which is required. SIZES_TO_TEST = (CSW_SIZE8, CSW_SIZE16, CSW_SIZE64, CSW_SIZE128, CSW_SIZE256) - + sz_result_cbs = ((sz, _test_transfer_size(sz)) for sz in SIZES_TO_TEST) self._transfer_sizes = ([32] + [(8 * (1 << sz)) for sz, cb in sz_result_cbs if cb()]) self._transfer_sizes.sort() - + elif _test_transfer_size(CSW_SIZE16)(): self._transfer_sizes = (8, 16, 32) def _init_hprot(): """! @brief Init HPROT HNONSEC. - + Determines the implemented bits of HPROT and HNONSEC in this MEM-AP. The defaults for these fields of the CSW are based on the implemented bits. """ default_hprot = (original_csw & CSW_HPROT_MASK) >> CSW_HPROT_SHIFT default_hnonsec = (original_csw & CSW_HNONSEC_MASK) >> CSW_HNONSEC_SHIFT LOG.debug("%s default HPROT=%x HNONSEC=%x", self.short_description, default_hprot, default_hnonsec) - + # Now attempt to see which HPROT and HNONSEC bits are implemented. AccessPort.write_reg(self, self._reg_offset + MEM_AP_CSW, original_csw | CSW_HNONSEC_MASK | CSW_HPROT_MASK) csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW) - + self._impl_hprot = (csw & CSW_HPROT_MASK) >> CSW_HPROT_SHIFT self._impl_hnonsec = (csw & CSW_HNONSEC_MASK) >> CSW_HNONSEC_SHIFT LOG.debug("%s implemented HPROT=%x HNONSEC=%x", self.short_description, self._impl_hprot, self._impl_hnonsec) - + # Update current HPROT and HNONSEC, and the current base CSW value. self.hprot = self._hprot & self._impl_hprot self.hnonsec = self._hnonsec & self._impl_hnonsec @@ -714,13 +714,13 @@ def _init_rom_table_base(): self.rom_addr = base & AP_BASE_BASEADDR_MASK # clear format and present bits else: raise exceptions.TargetError("invalid AP BASE value 0x%08x" % base) - + # Run the init tests. _init_cfg() _init_transfer_sizes() _init_hprot() _init_rom_table_base() - + # Restore unmodified value of CSW. AccessPort.write_reg(self, self._reg_offset + MEM_AP_CSW, original_csw) @@ -731,10 +731,10 @@ def find_components(self): if not self.is_enabled: LOG.warning("Skipping CoreSight discovery for %s because it is disabled", self.short_description) return - + # Import locally to work around circular import. from .rom_table import (CoreSightComponentID, ROMTable) - + # Read the ROM table component IDs. cmpid = CoreSightComponentID(None, self, self.rom_addr) cmpid.read_id_registers() @@ -750,7 +750,7 @@ def find_components(self): @property def implemented_hprot_mask(self): return self._impl_hprot - + @property def implemented_hnonsec_mask(self): return self._impl_hnonsec @@ -758,15 +758,15 @@ def implemented_hnonsec_mask(self): @property def hprot(self): return self._hprot - + @hprot.setter @locked def hprot(self, value): """! @brief Setter for current HPROT value used for memory transactions. - + The bits of HPROT have the following meaning. Not all bits are implemented in all MEM-APs. AHB-Lite only implements HPROT[3:0]. - + HPROT[0] = 1 data access, 0 instr fetch
HPROT[1] = 1 priviledge, 0 user
HPROT[2] = 1 bufferable, 0 non bufferable
@@ -776,32 +776,32 @@ def hprot(self, value): HPROT[6] = 1 shareable, 0 non shareable
""" self._hprot = value & (CSW_HPROT_MASK >> CSW_HPROT_SHIFT) - + self._csw = ((self._csw & ~CSW_HPROT_MASK) | (self._hprot << CSW_HPROT_SHIFT)) - + @property def hnonsec(self): return self._hnonsec - + @hnonsec.setter @locked def hnonsec(self, value): """! @brief Setter for current HNONSEC value used for memory transactions. - + Not all MEM-APs support control of HNONSEC. In particular, only the AHB5-AP used for v8-M Cortex-M systems does. The AXI-AP for Cortex-A systems also allows this control. - + @param value 0 is secure, 1 is non-secure. """ self._hnonsec = value & (CSW_HNONSEC_MASK >> CSW_HNONSEC_SHIFT) - + self._csw = ((self._csw & ~CSW_HNONSEC_MASK) | (self._hnonsec << CSW_HNONSEC_SHIFT)) - + class _MemAttrContext(object): """! @brief Context manager for temporarily setting HPROT and/or HNONSEC. - + The AP is locked during the lifetime of the context manager. This means that only the calling thread can perform memory transactions. """ @@ -811,7 +811,7 @@ def __init__(self, ap, hprot=None, hnonsec=None): self._saved_hprot = None self._hnonsec = hnonsec self._saved_hnonsec = None - + def __enter__(self): self._ap.lock() if self._hprot is not None: @@ -821,7 +821,7 @@ def __enter__(self): self._saved_hnonsec = self._ap.hnonsec self._ap.hnonsec = self._hnonsec return self - + def __exit__(self, type, value, traceback): if self._saved_hprot is not None: self._ap.hprot = self._saved_hprot @@ -833,18 +833,18 @@ def __exit__(self, type, value, traceback): def hprot_lock(self, hprot): """! @brief Context manager to temporarily change HPROT.""" return self._MemAttrContext(self, hprot=hprot) - + def hnonsec_lock(self, hnonsec): """! @brief Context manager to temporarily change HNONSEC. - + @see secure_lock(), nonsecure_lock() """ return self._MemAttrContext(self, hnonsec=hnonsec) - + def secure_lock(self): """! @brief Context manager to temporarily set the AP to use secure memory transfers.""" return self.hnonsec_lock(SECURE) - + def nonsecure_lock(self): """! @brief Context manager to temporarily set AP to use non-secure memory transfers.""" return self.hnonsec_lock(NONSECURE) @@ -877,11 +877,11 @@ def write_reg(self, addr, data): if ap_regaddr == self._reg_offset + MEM_AP_CSW: self._invalidate_cache() raise - + def _invalidate_cache(self): """! @brief Invalidate cached registers associated with this AP.""" self._cached_csw = -1 - + def _reset_did_occur(self, notification): """! @brief Handles reset notifications to invalidate CSW cache.""" # We clear the cache on all resets just to be safe. @@ -890,9 +890,9 @@ def _reset_did_occur(self, notification): @locked def _write_memory(self, addr, data, transfer_size=32): """! @brief Write a single memory location. - + By default the transfer size is a word - + @exception TransferError Raised if the requested transfer size is not supported by the AP. """ assert (addr & (transfer_size // 8 - 1)) == 0 @@ -914,7 +914,7 @@ def _write_memory(self, addr, data, transfer_size=32): try: self.write_reg(self._reg_offset + MEM_AP_TAR, addr) - + if transfer_size <= 32: self.write_reg(self._reg_offset + MEM_AP_DRW, data) else: @@ -934,9 +934,9 @@ def _write_memory(self, addr, data, transfer_size=32): @locked def _read_memory(self, addr, transfer_size=32, now=True): """! @brief Read a memory location. - + By default, a word will be read. - + @exception TransferError Raised if the requested transfer size is not supported by the AP. """ assert (addr & (transfer_size // 8 - 1)) == 0 @@ -950,7 +950,7 @@ def _read_memory(self, addr, transfer_size=32, now=True): try: self.write_reg(self._reg_offset + MEM_AP_CSW, self._csw | TRANSFER_SIZE[transfer_size]) self.write_reg(self._reg_offset + MEM_AP_TAR, addr) - + if transfer_size <= 32: result_cb = self.read_reg(self._reg_offset + MEM_AP_DRW, now=False) else: @@ -998,7 +998,7 @@ def read_mem_cb(): def _write_block32_page(self, addr, data): """! @brief Write a single transaction's worth of aligned words. - + The transaction must not cross the MEM-AP's auto-increment boundary. This method is not locked because it is only called by _write_memory_block32(), which is locked. @@ -1025,7 +1025,7 @@ def _write_block32_page(self, addr, data): def _read_block32_page(self, addr, size): """! @brief Read a single transaction's worth of aligned words. - + The transaction must not cross the MEM-AP's auto-increment boundary. This method is not locked because it is only called by _read_memory_block32(), which is locked. @@ -1070,7 +1070,7 @@ def _write_memory_block32(self, addr, data): @locked def _read_memory_block32(self, addr, size): """! @brief Read a block of aligned words in memory. - + @return A list of word values. """ assert (addr & 0x3) == 0 @@ -1091,10 +1091,10 @@ def _handle_error(self, error, num): class AHB_AP(MEM_AP): """! @brief AHB-AP access port subclass. - + This subclass checks for the AP_MSTRTYPE flag, and if set configures that field in the CSW register to use debugger transactions. Only the M3 and M4 AHB-AP implements MSTRTYPE. - + Another AHB-AP specific addition is that an attempt is made to set the TRCENA bit in the DEMCR register before reading the ROM table. This is required on some Cortex-M devices, otherwise certain ROM table entries will read as zeroes or other garbage. @@ -1107,10 +1107,10 @@ def init(self): # Check for and enable the Master Type bit on AHB-APs where it might be implemented. if self._flags & AP_MSTRTYPE: self._init_mstrtype() - + def _init_mstrtype(self): """! @brief Set master type control in CSW. - + Only the v1 AHB-AP from Cortex-M3 and Cortex-M4 implements the MSTRTYPE flag to control whether transactions appear as debugger or internal accesses. """ diff --git a/pyocd/coresight/component.py b/pyocd/coresight/component.py index 3391495c8..9570e6de0 100644 --- a/pyocd/coresight/component.py +++ b/pyocd/coresight/component.py @@ -19,7 +19,7 @@ class CoreSightComponent(GraphNode): """! @brief CoreSight component base class.""" - + @classmethod def factory(cls, ap, cmpid, address): """! @brief Common CoreSightComponent factory.""" @@ -34,30 +34,30 @@ def __init__(self, ap, cmpid=None, addr=None): self._ap = ap self._cmpid = cmpid self._address = addr if (addr is not None) else (cmpid.address if cmpid else None) - + @property def ap(self): return self._ap - + @property def cmpid(self): return self._cmpid - + @cmpid.setter def cmpid(self, newCmpid): self._cmpid = newCmpid - + @property def address(self): return self._address - + @address.setter def address(self, newAddr): self._address = newAddr class CoreSightCoreComponent(CoreSightComponent): """! @brief CoreSight component for a CPU core. - + This class serves only as a superclass for identifying core-type components. """ pass diff --git a/pyocd/coresight/component_ids.py b/pyocd/coresight/component_ids.py index 33cff8eef..fe638a2bf 100644 --- a/pyocd/coresight/component_ids.py +++ b/pyocd/coresight/component_ids.py @@ -120,19 +120,19 @@ class CmpInfo(NamedTuple): (ARM_ID, CORESIGHT_CLASS, 0x927, 0x11, 0) : CmpInfo('TPIU', 'SC300', None ), (ARM_ID, CORESIGHT_CLASS, 0x932, 0x31, 0x0a31) : CmpInfo('MTB', 'M0+', None ), (ARM_ID, CORESIGHT_CLASS, 0x950, 0x13, 0) : CmpInfo('PTM', 'A9', None ), - (ARM_ID, CORESIGHT_CLASS, 0x961, 0x32, 0) : CmpInfo('ETF', None, None ), # Trace Memory Controller ETF - (ARM_ID, CORESIGHT_CLASS, 0x962, 0x63, 0x0a63) : CmpInfo('STM', None, None ), # System Trace Macrocell - (ARM_ID, CORESIGHT_CLASS, 0x963, 0x63, 0x0a63) : CmpInfo('STM-500', None, None ), # System Trace Macrocell + (ARM_ID, CORESIGHT_CLASS, 0x961, 0x32, 0) : CmpInfo('ETF', None, None ), # Trace Memory Controller ETF + (ARM_ID, CORESIGHT_CLASS, 0x962, 0x63, 0x0a63) : CmpInfo('STM', None, None ), # System Trace Macrocell + (ARM_ID, CORESIGHT_CLASS, 0x963, 0x63, 0x0a63) : CmpInfo('STM-500', None, None ), # System Trace Macrocell (ARM_ID, CORESIGHT_CLASS, 0x975, 0x13, 0x4a13) : CmpInfo('ETM', 'M7', None ), (ARM_ID, CORESIGHT_CLASS, 0x9a0, 0x16, 0) : CmpInfo('PMU', 'A9', None ), (ARM_ID, CORESIGHT_CLASS, 0x9a1, 0x11, 0) : CmpInfo('TPIU', 'M4', TPIU.factory ), (ARM_ID, CORESIGHT_CLASS, 0x9a3, 0x13, 0x0) : CmpInfo('MTB', 'M0', None ), - (ARM_ID, CORESIGHT_CLASS, 0x9a4, 0x34, 0x0a34) : CmpInfo('GPR', None, GPR.factory ), # Granular Power Requestor + (ARM_ID, CORESIGHT_CLASS, 0x9a4, 0x34, 0x0a34) : CmpInfo('GPR', None, GPR.factory ), # Granular Power Requestor (ARM_ID, CORESIGHT_CLASS, 0x9a5, 0x16, 0) : CmpInfo('PMU', 'A5', None ), (ARM_ID, CORESIGHT_CLASS, 0x9a6, 0x14, 0x1a14) : CmpInfo('CTI', 'M0+', None ), (ARM_ID, CORESIGHT_CLASS, 0x9a7, 0x16, 0) : CmpInfo('PMU', 'A7', None ), (ARM_ID, CORESIGHT_CLASS, 0x9a9, 0x11, 0) : CmpInfo('TPIU', 'M7', TPIU.factory ), - (ARM_ID, CORESIGHT_CLASS, 0x9ba, 0x55, 0x0a55) : CmpInfo('PMC-100', None, None ), # Programmable MBIST Controller + (ARM_ID, CORESIGHT_CLASS, 0x9ba, 0x55, 0x0a55) : CmpInfo('PMC-100', None, None ), # Programmable MBIST Controller (ARM_ID, CORESIGHT_CLASS, 0x9db, 0x13, 0x4a13) : CmpInfo('ETM', 'A32', None ), # ETMv4 (ARM_ID, CORESIGHT_CLASS, 0x9db, 0x14, 0x1a14) : CmpInfo('CTI', 'A32', None ), # CTIv2 (ARM_ID, CORESIGHT_CLASS, 0x9db, 0x16, 0x2a16) : CmpInfo('PMU', 'A32', None ), # PMUv3 @@ -213,7 +213,7 @@ class CmpInfo(NamedTuple): (ARM_ID, GENERIC_CLASS, 0x00c, 0x00, 0) : CmpInfo('SCS', 'v7-M', CortexM.factory ), (ARM_ID, GENERIC_CLASS, 0x00d, 0x00, 0) : CmpInfo('SCS', 'SC000', CortexM.factory ), (ARM_ID, GENERIC_CLASS, 0x00e, 0x00, 0) : CmpInfo('FPB', 'v7-M', FPB.factory ), - (ARM_ID, SYSTEM_CLASS, 0x101, 0x00, 0) : CmpInfo('TSGEN', None, None ), # Timestamp Generator + (ARM_ID, SYSTEM_CLASS, 0x101, 0x00, 0) : CmpInfo('TSGEN', None, None ), # Timestamp Generator (FSL_ID, CORESIGHT_CLASS, 0x000, 0x04, 0) : CmpInfo('MTBDWT', None, None ), } diff --git a/pyocd/coresight/core_ids.py b/pyocd/coresight/core_ids.py index 601551a3a..db2bd0621 100644 --- a/pyocd/coresight/core_ids.py +++ b/pyocd/coresight/core_ids.py @@ -56,7 +56,7 @@ class CoreArchitecture(Enum): ARMv7M = 2 ARMv8M_BASE = 3 ARMv8M_MAIN = 4 - + class CortexMExtension(Enum): """! @brief Extensions for the Cortex-M architecture.""" FPU = "FPU" # Single-Precision floating point diff --git a/pyocd/coresight/coresight_target.py b/pyocd/coresight/coresight_target.py index 57de6622f..0bd6aca4e 100644 --- a/pyocd/coresight/coresight_target.py +++ b/pyocd/coresight/coresight_target.py @@ -32,16 +32,16 @@ class CoreSightTarget(SoCTarget): """! @brief Represents an SoC that uses CoreSight debug infrastructure. - + This class adds Arm CoreSight-specific discovery and initialization code to SoCTarget. """ - + def __init__(self, session, memory_map=None): # Supply a default memory map. if (memory_map is None) or (memory_map.region_count == 0): memory_map = self._create_default_cortex_m_memory_map() LOG.debug("Using default Cortex-M memory map (no memory map supplied)") - + super(CoreSightTarget, self).__init__(session, memory_map) self.dp = dap.DebugPort(session.probe, self) self._svd_load_thread = None @@ -97,12 +97,12 @@ def create_init_sequence(self): ('create_flash', self.create_flash), ('notify', lambda : self.session.notify(Target.Event.POST_CONNECT, self)) ) - + return seq def disconnect(self, resume=True): """! @brief Disconnect from the target. - + Same as SoCTarget.disconnect(), except that it asks the DebugPort to power down. """ self.session.notify(Target.Event.PRE_DISCONNECT, self) @@ -111,10 +111,10 @@ def disconnect(self, resume=True): core.disconnect(resume) self.dp.disconnect() self.call_delegate('did_disconnect', target=self, resume=resume) - + def create_discoverer(self): """! @brief Init task to create the discovery object. - + Instantiates the appropriate @ref pyocd.coresight.discovery.CoreSightDiscovery CoreSightDiscovery subclass for the target's ADI version. """ @@ -122,7 +122,7 @@ def create_discoverer(self): def pre_connect(self): """! @brief Handle some of the connect modes. - + This init task performs a connect pre-reset or asserts reset if the connect mode is under-reset. """ @@ -133,10 +133,10 @@ def pre_connect(self): elif mode == 'under-reset': LOG.info("Asserting reset prior to connect") self.dp.assert_reset(True) - + def perform_halt_on_connect(self): """! @brief Halt cores. - + This init task performs a connect pre-reset or asserts reset if the connect mode is under-reset. """ @@ -154,10 +154,10 @@ def perform_halt_on_connect(self): except exceptions.Error as err: LOG.warning("Could not halt core #%d: %s", core.core_number, err, exc_info=self.session.log_tracebacks) - + def post_connect(self): """! @brief Handle cleaning up some of the connect modes. - + This init task de-asserts reset if the connect mode is under-reset. """ mode = self.session.options.get('connect_mode') @@ -173,10 +173,10 @@ def post_connect(self): except exceptions.Error as err: LOG.warning("Could not halt core #%d: %s", core.core_number, err, exc_info=self.session.log_tracebacks) - + def create_flash(self): """! @brief Instantiates flash objects for memory regions. - + This init task iterates over flash memory regions and for each one creates the Flash instance. It uses the flash_algo and flash_class properties of the region to know how to construct the flash object. @@ -208,12 +208,12 @@ def create_flash(self): algo = pack_algo.get_pyocd_flash_algo( page_size, self.memory_map.get_default_region_of_type(MemoryType.RAM)) - + # If we got a valid algo from the FLM, set it on the region. This will then # be used below. if algo is not None: region.algo = algo - + # If the constructor of the region's flash class takes the flash_algo arg, then we # need the region to have a flash algo dict to pass to it. Otherwise we assume the # algo is built-in. @@ -227,10 +227,10 @@ def create_flash(self): continue else: obj = klass(self) - + # Set the region in the flash instance. obj.region = region - + # Store the flash object back into the memory region. region.flash = obj @@ -268,5 +268,5 @@ def reset(self, reset_type=None): self.dp.reset() else: super().reset(reset_type) - - + + diff --git a/pyocd/coresight/cortex_m.py b/pyocd/coresight/cortex_m.py index 8a1d934c8..96aef8357 100644 --- a/pyocd/coresight/cortex_m.py +++ b/pyocd/coresight/cortex_m.py @@ -37,7 +37,7 @@ class CortexM(Target, CoreSightCoreComponent): """! @brief CoreSight component for a v6-M or v7-M Cortex-M core. - + This class has basic functions to access a Cortex-M core: - init - read/write memory @@ -49,7 +49,7 @@ class CortexM(Target, CoreSightCoreComponent): APSR_MASK = 0xF80F0000 EPSR_MASK = 0x0700FC00 IPSR_MASK = 0x000001FF - + # Thumb bit in XPSR. XPSR_THUMB = 0x01000000 @@ -125,12 +125,12 @@ class CortexM(Target, CoreSightCoreComponent): # Coprocessor Access Control Register CPACR = 0xE000ED88 CPACR_CP10_CP11_MASK = (3 << 20) | (3 << 22) - + # Interrupt Control and State Register ICSR = 0xE000ED04 ICSR_PENDSVCLR = (1 << 27) ICSR_PENDSTCLR = (1 << 25) - + VTOR = 0xE000ED08 SCR = 0xE000ED10 SHPR1 = 0xE000ED18 @@ -149,11 +149,11 @@ class CortexM(Target, CoreSightCoreComponent): NVIC_AIRCR_SYSRESETREQ = (1 << 2) NVIC_AIRCR_PRIGROUP_MASK = 0x700 NVIC_AIRCR_PRIGROUP_SHIFT = 8 - + NVIC_ICER0 = 0xE000E180 # NVIC Clear-Enable Register 0 NVIC_ICPR0 = 0xE000E280 # NVIC Clear-Pending Register 0 NVIC_IPR0 = 0xE000E400 # NVIC Interrupt Priority Register 0 - + SYSTICK_CSR = 0xE000E010 DBGKEY = (0xA05F << 16) @@ -174,18 +174,18 @@ class CortexM(Target, CoreSightCoreComponent): def factory(cls, ap, cmpid, address): # Create a new core instance. root = ap.dp.target - core = cls(root.session, ap, root.memory_map, root._new_core_num, cmpid, address) - + core = cls(root.session, ap, root.memory_map, root._new_core_num, cmpid, address) + # Associate this core with the AP. if ap.core is not None: raise exceptions.TargetError("AP#%d has multiple cores associated with it" % ap.ap_num) ap.core = core - + # Add the new core to the root target. root.add_core(core) - + root._new_core_num += 1 - + return core def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address=None): @@ -207,10 +207,10 @@ def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address self._reset_catch_saved_demcr = 0 self.fpb = None self.dwt = None - + # Default to software reset using the default software reset method. self._default_reset_type = Target.ResetType.SW - + # Select default sw reset type based on whether multicore debug is enabled and which core # this is. self._default_software_reset_type = Target.ResetType.SW_SYSRESETREQ \ @@ -225,7 +225,7 @@ def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address def add_child(self, cmp): """! @brief Connect related CoreSight components.""" super(CortexM, self).add_child(cmp) - + if isinstance(cmp, FPB): self.fpb = cmp self.bp_manager.add_provider(cmp) @@ -256,20 +256,20 @@ def elf(self): @elf.setter def elf(self, elffile): self._elf = elffile - + @property def default_reset_type(self): return self._default_reset_type - + @default_reset_type.setter def default_reset_type(self, reset_type): assert isinstance(reset_type, Target.ResetType) self._default_reset_type = reset_type - + @property def default_software_reset_type(self): return self._default_software_reset_type - + @default_software_reset_type.setter def default_software_reset_type(self, reset_type): """! @brief Modify the default software reset method. @@ -281,11 +281,11 @@ def default_software_reset_type(self, reset_type): assert reset_type in (Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET, Target.ResetType.SW_EMULATED) self._default_software_reset_type = reset_type - + @property def supported_security_states(self): """! @brief Tuple of security states supported by the processor. - + @return Tuple of @ref pyocd.core.target.Target.SecurityState "Target.SecurityState". For v6-M and v7-M cores, the return value only contains SecurityState.NONSECURE. """ @@ -293,7 +293,7 @@ def supported_security_states(self): def init(self): """! @brief Cortex M initialization. - + The bus must be accessible when this method is called. """ if not self.call_delegate('will_start_debug_core', core=self): @@ -323,7 +323,7 @@ def disconnect(self, resume=True): def _build_registers(self): """! @brief Build set of core registers available on this code. - + This method builds the list of core registers for this particular core. This includes all available core registers, and some variants of registers such as 'ipsr', 'iapsr', and the individual CFBP registers as well as 'cfbp' itself. This set of registers is available in @@ -348,17 +348,17 @@ def _read_core_type(self): arch = (cpuid & CortexM.CPUID_ARCHITECTURE_MASK) >> CortexM.CPUID_ARCHITECTURE_POS self.core_type = (cpuid & CortexM.CPUID_PARTNO_MASK) >> CortexM.CPUID_PARTNO_POS - + self.cpu_revision = (cpuid & CortexM.CPUID_VARIANT_MASK) >> CortexM.CPUID_VARIANT_POS self.cpu_patch = (cpuid & CortexM.CPUID_REVISION_MASK) >> CortexM.CPUID_REVISION_POS - + # Only v7-M supports VECTRESET. if arch == CortexM.ARMv7M: self._architecture = CoreArchitecture.ARMv7M self._supports_vectreset = True else: self._architecture = CoreArchitecture.ARMv6M - + if self.core_type in CORE_TYPE_NAME: LOG.info("CPU core #%d is %s r%dp%d", self.core_number, CORE_TYPE_NAME[self.core_type], self.cpu_revision, self.cpu_patch) else: @@ -366,7 +366,7 @@ def _read_core_type(self): def _check_for_fpu(self): """! @brief Determine if a core has an FPU. - + The core architecture must have been identified prior to calling this function. """ # FPU is not supported in these architectures. @@ -386,7 +386,7 @@ def _check_for_fpu(self): if self.has_fpu: self._extensions.append(CortexMExtension.FPU) - + # Now check whether double-precision is supported. # (Minimal tests to distinguish current permitted ARMv7-M and # ARMv8-M FPU types; used for printing only). @@ -407,13 +407,13 @@ def _check_for_fpu(self): def write_memory(self, addr, value, transfer_size=32): """! @brief Write a single memory location. - + By default the transfer size is a word.""" self.ap.write_memory(addr, value, transfer_size) def read_memory(self, addr, transfer_size=32, now=True): """! @brief Read a memory location. - + By default, a word will be read.""" result = self.ap.read_memory(addr, transfer_size, now) @@ -458,22 +458,22 @@ def halt(self): def step(self, disable_interrupts=True, start=0, end=0, hook_cb=None): """! @brief Perform an instruction level step. - + This API will execute one or more individual instructions on the core. With default parameters, it masks interrupts and only steps a single instruction. The _start_ and _stop_ parameters define an address range of [_start_, _end_). The core will be repeatedly stepped until the PC falls outside this range, a debug event occurs, or the optional callback returns True. - + The _disable_interrupts_ parameter controls whether to allow stepping into interrupts. This function preserves the previous interrupt mask state. - + If the _hook_cb_ parameter is set to a callable, it will be invoked repeatedly to give the caller a chance to check for interrupt requests or other reasons to exit. Note that stepping may take a very long time for to return in cases such as stepping over a branch into the Secure world where the debugger doesn't have secure debug access, or similar for Privileged code in the case of UDE. - + @param self The object. @param disable_interrupts Boolean specifying whether to mask interrupts during the step. @param start Integer start address for range stepping. Not included in the range. @@ -481,7 +481,7 @@ def step(self, disable_interrupts=True, start=0, end=0, hook_cb=None): @param hook_cb Optional callable taking no parameters and returning a boolean. The signature is `hook_cb() -> bool`. Invoked repeatedly while waiting for step operations to complete. If the callback returns True, then stepping is stopped immediately. - + @exception DebugError Raised if debug is not enabled on the core. """ # Save DHCSR and make sure the core is halted. We also check that C_DEBUGEN is set because if it's @@ -552,7 +552,7 @@ def step(self, disable_interrupts=True, start=0, end=0, hook_cb=None): # Check for stop reasons other than HALTED, which will have been set by our step action. if (self.read32(CortexM.DFSR) & ~CortexM.DFSR_HALTED) != 0: break - + # Restore interrupt mask state. if maskints_differs: self.write32(CortexM.DHCSR, @@ -570,36 +570,36 @@ def clear_debug_cause_bits(self): | CortexM.DFSR_BKPT | CortexM.DFSR_HALTED ) - + def _perform_emulated_reset(self): """! @brief Emulate a software reset by writing registers. - + All core registers are written to reset values. This includes setting the initial PC and SP to values read from the vector table, which is assumed to be located at the based of the boot memory region. - + If the memory map does not provide a boot region, then the current value of the VTOR register is reused, as it should at least point to a valid vector table. - + The current value of DEMCR.VC_CORERESET determines whether the core will be resumed or left halted. - + Note that this reset method will not set DHCSR.S_RESET_ST or DFSR.VCATCH. """ # Halt the core before making changes. self.halt() - + bootMemory = self.memory_map.get_boot_memory() if bootMemory is None: # Reuse current VTOR value if we don't know the boot memory region. vectorBase = self.read32(self.VTOR) else: vectorBase = bootMemory.start - + # Read initial SP and PC. initialSp = self.read32(vectorBase) initialPc = self.read32(vectorBase + 4) - + # Init core registers. regList = ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12', 'psp', 'msp', 'lr', 'pc', 'xpsr', 'cfbp'] @@ -612,13 +612,13 @@ def _perform_emulated_reset(self): 0x01000000, # XPSR 0, # CFBP ] - + if self.has_fpu: regList += [('s%d' % n) for n in range(32)] + ['fpscr'] valueList += [0] * 33 - + self.write_core_registers_raw(regList, valueList) - + # "Reset" SCS registers. data = [ (self.ICSR_PENDSVCLR | self.ICSR_PENDSTCLR), # ICSR @@ -634,7 +634,7 @@ def _perform_emulated_reset(self): ] self.write_memory_block32(self.ICSR, data) self.write32(self.CPACR, 0) - + if self.has_fpu: data = [ 0, # FPCCR @@ -642,10 +642,10 @@ def _perform_emulated_reset(self): 0, # FPDSCR ] self.write_memory_block32(self.FPCCR, data) - + # "Reset" SysTick. self.write_memory_block32(self.SYSTICK_CSR, [0] * 3) - + # "Reset" NVIC registers. numregs = (self.read32(self.ICTR) & 0xf) + 1 self.write_memory_block32(self.NVIC_ICER0, [0xffffffff] * numregs) @@ -654,7 +654,7 @@ def _perform_emulated_reset(self): def _get_actual_reset_type(self, reset_type): """! @brief Determine the reset type to use given defaults and passed in type.""" - + # Default to reset_type session option if reset_type parameter is None. If the session # option isn't set, then use the core's default reset type. if reset_type is None: @@ -665,7 +665,7 @@ def _get_actual_reset_type(self, reset_type): # Convert session option value to enum. resetOption = self.session.options.get('reset_type') reset_type = cmdline.convert_reset_type(resetOption) - + # The converted option will be None if the option value is 'default'. if reset_type is None: reset_type = self.default_reset_type @@ -673,15 +673,15 @@ def _get_actual_reset_type(self, reset_type): reset_type = self.default_reset_type else: assert isinstance(reset_type, Target.ResetType) - + # If the reset type is just SW, then use our default software reset type. if reset_type is Target.ResetType.SW: reset_type = self.default_software_reset_type - + # Fall back to emulated sw reset if the vectreset is specified and the core doesn't support it. if (reset_type is Target.ResetType.SW_VECTRESET) and (not self._supports_vectreset): reset_type = Target.ResetType.SW_EMULATED - + return reset_type def _perform_reset(self, reset_type): @@ -699,7 +699,7 @@ def _perform_reset(self, reset_type): mask = CortexM.NVIC_AIRCR_VECTRESET else: raise exceptions.InternalError("unhandled reset type") - + # Transfer errors are ignored on the AIRCR write for resets. On a few systems, the reset # apparently happens so quickly that we can't even finish the SWD transaction. try: @@ -708,13 +708,13 @@ def _perform_reset(self, reset_type): self.flush() except exceptions.TransferError: self.flush() - + # Post reset delay. sleep(self.session.options.get('reset.post_delay')) def _post_reset_core_accessibility_test(self): """! @brief Wait for the system to come out of reset and this core to be accessible. - + Keep reading the DHCSR until we get a good response with S_RESET_ST cleared, or we time out. There's nothing we can do if the test times out, and in fact if this is a secondary core on a multicore system then timing out is almost guaranteed. @@ -744,17 +744,17 @@ def _post_reset_core_accessibility_test(self): def reset(self, reset_type=None): """! @brief Reset the core. - + The reset method is selectable via the reset_type parameter as well as the reset_type session option. If the reset_type parameter is not specified or None, then the reset_type option will be used. If the option is not set, or if it is set to a value of 'default', the the core's default_reset_type property value is used. So, the session option overrides the core's default, while the parameter overrides everything. - + Note that only v7-M cores support the `VECTRESET` software reset method. If this method is chosen but the core doesn't support it, the the reset method will fall back to an emulated software reset. - + After a call to this function, the core is running. """ reset_type = self._get_actual_reset_type(reset_type) @@ -774,7 +774,7 @@ def reset(self, reset_type=None): # We only need to test accessibility after reset for system-level resets. # If a hardware reset is being used, then the DP will perform its post-reset recovery for us. Out of the # other reset types, only a system-level reset by SW_SYSRESETREQ require us to ensure the DP reset recovery - # is performed. VECTRESET + # is performed. VECTRESET if reset_type is Target.ResetType.SW_SYSRESETREQ: self.ap.dp.post_reset_recovery() if reset_type in (Target.ResetType.HW, Target.ResetType.SW_SYSRESETREQ): @@ -790,7 +790,7 @@ def set_reset_catch(self, reset_type=None): LOG.debug("set reset catch, core %d", self.core_number) self._reset_catch_delegate_result = self.call_delegate('set_reset_catch', core=self, reset_type=reset_type) - + # Default behaviour if the delegate didn't handle it. if not self._reset_catch_delegate_result: # Halt the target. @@ -802,7 +802,7 @@ def set_reset_catch(self, reset_type=None): # Enable reset vector catch if needed. if (self._reset_catch_saved_demcr & CortexM.DEMCR_VC_CORERESET) == 0: self.write_memory(CortexM.DEMCR, self._reset_catch_saved_demcr | CortexM.DEMCR_VC_CORERESET) - + def clear_reset_catch(self, reset_type=None): """! @brief Disable halt on reset.""" LOG.debug("clear reset catch, core %d", self.core_number) @@ -859,10 +859,10 @@ def get_state(self): return Target.State.HALTED else: return Target.State.RUNNING - + def get_security_state(self): """! @brief Returns the current security state of the processor. - + @return @ref pyocd.core.target.Target.SecurityState "Target.SecurityState" enumerator. For v6-M and v7-M cores, SecurityState.NONSECURE is always returned. """ @@ -909,14 +909,14 @@ def check_reg_list(self, reg_list): def read_core_register(self, reg): """! @brief Read one core register. - + The core must be halted or reads will fail. - + @param self The core. @param reg Either the register's name in lowercase or an integer register index. @return The current value of the register. Most core registers return an integer value, while the floating point single and double precision register return a float value. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to read the register. @@ -927,14 +927,14 @@ def read_core_register(self, reg): def read_core_register_raw(self, reg): """! @brief Read a core register without type conversion. - + The core must be halted or reads will fail. - + @param self The core. @param reg Either the register's name in lowercase or an integer register index. @return The current integer value of the register. Even float register values are returned as integers (thus the "raw"). - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to read the register. @@ -944,15 +944,15 @@ def read_core_register_raw(self, reg): def read_core_registers_raw(self, reg_list): """! @brief Read one or more core registers. - + The core must be halted or reads will fail. - + @param self The core. @param reg_list List of registers to read. Each element in the list can be either the register's name in lowercase or the integer register index. @return List of integer values of the registers requested to be read. The result list will be the same length as _reg_list_. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to read one or more registers. @@ -964,7 +964,7 @@ def read_core_registers_raw(self, reg_list): def _base_read_core_registers_raw(self, reg_list): """! @brief Private core register read routine. - + Items in the _reg_list_ must be pre-converted to index and only include valid registers for the core. @@ -979,16 +979,16 @@ def _base_read_core_registers_raw(self, reg_list): "s" if (len(reg_list) > 1) else "", ", ".join(CortexMCoreRegisterInfo.get(r).name for r in reg_list), self.core_number)) - + # Handle doubles. doubles = [reg for reg in reg_list if CortexMCoreRegisterInfo.get(reg).is_double_float_register] hasDoubles = len(doubles) > 0 if hasDoubles: originalRegList = reg_list - + # Strip doubles from reg_list. reg_list = [reg for reg in reg_list if not CortexMCoreRegisterInfo.get(reg).is_double_float_register] - + # Read float regs required to build doubles. singleRegList = [] for reg in doubles: @@ -1032,12 +1032,12 @@ def _base_read_core_registers_raw(self, reg_list): val &= CortexMCoreRegisterInfo.get(reg).psr_mask reg_vals.append(val) - + if fail_list: raise exceptions.CoreRegisterAccessError("failed to read register{0} {1}".format( "s" if (len(fail_list) > 1) else "", ", ".join(CortexMCoreRegisterInfo.get(r).name for r in fail_list))) - + # Merge double regs back into result list. if hasDoubles: results = [] @@ -1058,13 +1058,13 @@ def _base_read_core_registers_raw(self, reg_list): def write_core_register(self, reg, data): """! @brief Write a CPU register. - + The core must be halted or the write will fail. - + @param self The core. @param reg The name of the register to write. @param data New value of the register. Float registers accept float values. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to write the register. @@ -1074,13 +1074,13 @@ def write_core_register(self, reg, data): def write_core_register_raw(self, reg, data): """! @brief Write a CPU register without type conversion. - + The core must be halted or the write will fail. - + @param self The core. @param reg The name of the register to write. @param data New value of the register. Must be an integer, even for float registers. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to write the register. @@ -1089,7 +1089,7 @@ def write_core_register_raw(self, reg, data): def write_core_registers_raw(self, reg_list, data_list): """! @brief Write one or more core registers. - + The core must be halted or writes will fail. @param self The core. @@ -1097,13 +1097,13 @@ def write_core_registers_raw(self, reg_list, data_list): register's name in lowercase or the integer register index. @param data_list List of values for the registers in the corresponding positions of _reg_list_. All values must be integers, even for float registers. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to write one or more registers. """ assert len(reg_list) == len(data_list) - + # convert to index only reg_list = [CortexMCoreRegisterInfo.register_name_to_index(reg) for reg in reg_list] self.check_reg_list(reg_list) @@ -1111,7 +1111,7 @@ def write_core_registers_raw(self, reg_list, data_list): def _base_write_core_registers_raw(self, reg_list, data_list): """! @brief Private core register write routine. - + Items in the _reg_list_ must be pre-converted to index and only include valid registers for the core. Similarly, data_list items must be pre-converted to integer values. @@ -1126,7 +1126,7 @@ def _base_write_core_registers_raw(self, reg_list, data_list): "s" if (len(reg_list) > 1) else "", ", ".join(CortexMCoreRegisterInfo.get(r).name for r in reg_list), self.core_number)) - + # Read special register if it is present in the list and # convert doubles to single float register writes. cfbpValue = None @@ -1146,7 +1146,7 @@ def _base_write_core_registers_raw(self, reg_list, data_list): else: # Other register, just copy directly. reg_data_list.append((reg, data)) - + # Write out registers dhcsr_cb_list = [] for reg, data in reg_data_list: @@ -1182,7 +1182,7 @@ def _base_write_core_registers_raw(self, reg_list, data_list): dhcsr_val = dhcsr_cb() if (dhcsr_val & CortexM.S_REGRDY) == 0: fail_list.append(reg_and_data[0]) - + if fail_list: raise exceptions.CoreRegisterAccessError("failed to write register{0} {1}".format( "s" if (len(fail_list) > 1) else "", @@ -1190,7 +1190,7 @@ def _base_write_core_registers_raw(self, reg_list, data_list): def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): """! @brief Set a hardware or software breakpoint at a specific location in memory. - + @retval True Breakpoint was set. @retval False Breakpoint could not be set. """ @@ -1286,10 +1286,10 @@ def is_debug_trap(self): def is_vector_catch(self): return self.get_halt_reason() == Target.HaltReason.VECTOR_CATCH - + def get_halt_reason(self): """! @brief Returns the reason the core has halted. - + @return @ref pyocd.core.target.Target.HaltReason "Target.HaltReason" enumerator or None. """ dfsr = self.read32(CortexM.DFSR) diff --git a/pyocd/coresight/cortex_m_core_registers.py b/pyocd/coresight/cortex_m_core_registers.py index fabac0174..1a20816fe 100644 --- a/pyocd/coresight/cortex_m_core_registers.py +++ b/pyocd/coresight/cortex_m_core_registers.py @@ -28,7 +28,7 @@ class CortexMCoreRegisterInfo(CoreRegisterInfo): """! @brief Core register subclass for Cortex-M registers. - + For most registers, the index is the value written to the DCRSR register to read or write the core register. Other core registers not directly supported by DCRSR have special index values that are interpreted by the helper methods on this class and the core register read/write code in CortexM @@ -37,7 +37,7 @@ class CortexMCoreRegisterInfo(CoreRegisterInfo): ## Map of register name to info. _NAME_MAP = {} - + ## Map of register index to info. _INDEX_MAP = {} @@ -84,12 +84,12 @@ def psr_mask(self): class CoreRegisterGroups: """! @brief Namespace for lists of Cortex-M core register information.""" - + _I = CortexMCoreRegisterInfo # Reduce table width. # For most registers, the index is the DCRSR register selector value. Those registers not directly # supported by the DCRSR have special values that are interpreted by the register read/write methods. - + ## @brief Registers common to all M-profile cores. M_PROFILE_COMMON = [ # Name index bits type group gdbnum feature @@ -241,7 +241,7 @@ class CoreRegisterGroups: _I('d14', -0x5c, 64, 'ieee_double', 'double', 36, "org.gnu.gdb.arm.vfp"), _I('d15', -0x5e, 64, 'ieee_double', 'double', 37, "org.gnu.gdb.arm.vfp"), ] - + del _I # Cleanup namespace. # Build info map. diff --git a/pyocd/coresight/cortex_m_v8m.py b/pyocd/coresight/cortex_m_v8m.py index ca01e7181..0f47c7bf7 100644 --- a/pyocd/coresight/cortex_m_v8m.py +++ b/pyocd/coresight/cortex_m_v8m.py @@ -32,18 +32,18 @@ class CortexM_v8M(CortexM): ## DFSR.PMU added in v8.1-M. DFSR_PMU = (1 << 5) - + DSCSR = 0xE000EE08 DSCSR_CDSKEY = 0x00020000 DSCSR_CDS = 0x00010000 DSCSR_SBRSEL = 0x00000002 DSCSR_SBRSELEN = 0x00000001 - + # Processor Feature Register 1 PFR1 = 0xE000ED44 PFR1_SECURITY_MASK = 0x000000f0 PFR1_SECURITY_SHIFT = 4 - + PFR1_SECURITY_EXT_V8_0 = 0x1 # Base security extension. PFR1_SECURITY_EXT_V8_1 = 0x3 # v8.1-M adds several instructions. @@ -59,11 +59,11 @@ def __init__(self, rootTarget, ap, memory_map=None, core_num=0, cmpid=None, addr # Only v7-M supports VECTRESET. self._supports_vectreset = False - + @property def supported_security_states(self): """! @brief Tuple of security states supported by the processor. - + @return Tuple of @ref pyocd.core.target.Target.SecurityState "Target.SecurityState". The result depends on whether the Security extension is enabled. """ @@ -83,10 +83,10 @@ def _read_core_type(self): arch = (cpuid & CortexM.CPUID_ARCHITECTURE_MASK) >> CortexM.CPUID_ARCHITECTURE_POS self.core_type = (cpuid & CortexM.CPUID_PARTNO_MASK) >> CortexM.CPUID_PARTNO_POS - + self.cpu_revision = (cpuid & CortexM.CPUID_VARIANT_MASK) >> CortexM.CPUID_VARIANT_POS self.cpu_patch = (cpuid & CortexM.CPUID_REVISION_MASK) >> CortexM.CPUID_REVISION_POS - + pfr1 = self.read32(self.PFR1) pfr1_sec = ((pfr1 & self.PFR1_SECURITY_MASK) >> self.PFR1_SECURITY_SHIFT) self.has_security_extension = pfr1_sec in (self.PFR1_SECURITY_EXT_V8_0, self.PFR1_SECURITY_EXT_V8_1) @@ -94,12 +94,12 @@ def _read_core_type(self): self._extensions.append(CortexMExtension.SEC) if pfr1_sec == self.PFR1_SECURITY_EXT_V8_1: self._extensions.append(CortexMExtension.SEC_V81) - + if arch == self.ARMv8M_BASE: self._architecture = CoreArchitecture.ARMv8M_BASE else: self._architecture = CoreArchitecture.ARMv8M_MAIN - + if self.core_type in CORE_TYPE_NAME: if self.has_security_extension: LOG.info("CPU core #%d is %s r%dp%d (security ext present)", self.core_number, CORE_TYPE_NAME[self.core_type], self.cpu_revision, self.cpu_patch) @@ -110,11 +110,11 @@ def _read_core_type(self): def _check_for_fpu(self): """! @brief Determine if a core has an FPU. - + In addition to the tests performed by CortexM, this method tests for the MVE extension. """ super(CortexM_v8M, self)._check_for_fpu() - + # Check for MVE. mvfr1 = self.read32(self.MVFR1) mve = (mvfr1 & self.MVFR1_MVE_MASK) >> self.MVFR1_MVE_SHIFT @@ -125,7 +125,7 @@ def _check_for_fpu(self): def _build_registers(self): super(CortexM_v8M, self)._build_registers() - + # Registers available with Security extension, either Baseline or Mainline. if self.has_security_extension: self._core_registers.add_group(CoreRegisterGroups.V8M_SEC_ONLY) @@ -133,18 +133,18 @@ def _build_registers(self): # Mainline-only registers. if self.architecture == CoreArchitecture.ARMv8M_MAIN: self._core_registers.add_group(CoreRegisterGroups.V7M_v8M_ML_ONLY) - + # Registers available when both Mainline and Security extensions are implemented. if self.has_security_extension: self._core_registers.add_group(CoreRegisterGroups.V8M_ML_SEC_ONLY) - + # MVE registers. if CortexMExtension.MVE in self.extensions: self._core_registers.add_group(CoreRegisterGroups.V81M_MVE_ONLY) - + def get_security_state(self): """! @brief Returns the current security state of the processor. - + @return @ref pyocd.core.target.Target.SecurityState "Target.SecurityState" enumerator. """ dscsr = self.read32(self.DSCSR) @@ -161,12 +161,12 @@ def clear_debug_cause_bits(self): | CortexM.DFSR_DWTTRAP | CortexM.DFSR_BKPT | CortexM.DFSR_HALTED) - + def get_halt_reason(self): """! @brief Returns the reason the core has halted. - + This overridden version of this method adds support for v8.x-M halt reasons. - + @return @ref pyocd.core.target.Target.HaltReason "Target.HaltReason" enumerator or None. """ dfsr = self.read32(self.DFSR) diff --git a/pyocd/coresight/dap.py b/pyocd/coresight/dap.py index 6d07723fb..de4fbbb4e 100644 --- a/pyocd/coresight/dap.py +++ b/pyocd/coresight/dap.py @@ -112,25 +112,25 @@ class ADIVersion(Enum): class DPConnector: """! @brief Establishes a connection to the DP for a given wire protocol. - + This class will ask the probe to connect using a given wire protocol. Then it makes multiple attempts at sending the SWJ sequence to select the wire protocol and read the DP IDR register. """ - + def __init__(self, probe): self._probe = probe self._session = probe.session self._idr = None - + # Make sure we have a session, since we get the session from the probe and probes have their session set # after creation. assert self._session is not None, "DPConnector requires the probe to have a session" - + @property def idr(self): """! @brief DPIDR instance containing values read from the DP IDR register.""" return self._idr - + def _get_protocol(self, protocol): # Convert protocol from setting if not passed as parameter. if protocol is None: @@ -139,16 +139,16 @@ def _get_protocol(self, protocol): if protocol not in self._probe.supported_wire_protocols: raise exceptions.DebugError("requested wire protocol %s not supported by the debug probe" % protocol.name) return protocol - + def connect(self, protocol=None): """! @brief Establish a connection to the DP. - + This method causes the debug probe to connect using the wire protocol. - + @param self @param protocol One of the @ref pyocd.probe.debug_probe.DebugProbe.Protocol "DebugProbe.Protocol" enums. If not provided, will default to the `dap_protocol` setting. - + @exception DebugError @exception TransferError """ @@ -161,7 +161,7 @@ def connect(self, protocol=None): # If this is not None then the probe is already connected. current_wire_protocol = self._probe.wire_protocol already_connected = current_wire_protocol is not None - + if already_connected: self._check_protocol(current_wire_protocol, protocol) else: @@ -171,7 +171,7 @@ def connect(self, protocol=None): self._connect_dp(protocol) finally: self._probe.unlock() - + def _check_protocol(self, current_wire_protocol, protocol): # Warn about mismatched current and requested wire protocols. if (protocol is not current_wire_protocol) and (protocol is not DebugProbe.Protocol.DEFAULT): @@ -183,7 +183,7 @@ def _connect_probe(self, protocol): # Debug log with the selected protocol. if protocol is not DebugProbe.Protocol.DEFAULT: LOG.debug("Using %s wire protocol", protocol.name) - + # Connect using the selected protocol. self._probe.connect(protocol) @@ -191,7 +191,7 @@ def _connect_probe(self, protocol): if protocol is DebugProbe.Protocol.DEFAULT: protocol = self._probe.wire_protocol LOG.debug("Default wire protocol selected; using %s", protocol.name) - + def _connect_dp(self, protocol): # Get SWJ settings. use_dormant = self._session.options.get('dap_swj_use_dormant') @@ -200,33 +200,33 @@ def _connect_dp(self, protocol): # Create object to send SWJ sequences. swj = SWJSequenceSender(self._probe, use_dormant) - + # Multiple attempts to select protocol and read DP IDR. for attempt in range(4): try: if send_swj: swj.select_protocol(protocol) - + # Attempt to read the DP IDR register. self._idr = self.read_idr() - + # Successful connection so exit the loop. break except exceptions.TransferError: # If not sending the SWJ sequence, just reraise; there's nothing more to do. if not send_swj: raise - + # If the read of the DP IDCODE fails, retry SWJ sequence. The DP may have been # in a state where it thought the SWJ sequence was an invalid transfer. We also # try enabling use of dormant state if it wasn't already enabled. LOG.debug("DP IDCODE read failed; resending SWJ sequence (use dormant=%s)", use_dormant) - + if attempt == 1: # If already using dormant mode, just raise, we don't need to retry the same mode. if use_dormant: raise - + # After the second attempt, switch to enabling dormant mode. swj.use_dormant = True elif attempt == 3: @@ -250,7 +250,7 @@ class DebugPort: ## Number of times to try to read DP registers after hw reset before attempting reconnect. _RESET_RECOVERY_ATTEMPTS_BEFORE_RECONNECT = 1 - + def __init__(self, probe, target): """! @brief Constructor. @param self The DebugPort object. @@ -275,7 +275,7 @@ def __init__(self, probe, target): self._have_probe_capabilities = False self._did_check_version = False self._log_dp_info = True - + # DPv3 attributes self._is_dpv3 = False self._addr_size = None @@ -283,79 +283,79 @@ def __init__(self, probe, target): self._errmode = None self._base_addr = None self._apacc_mem_interface = None - + # Subscribe to reset events. self._session.subscribe(self._reset_did_occur, (Target.Event.PRE_RESET, Target.Event.POST_RESET)) @property def probe(self): return self._probe - + @property def session(self): return self._session - + @property def adi_version(self): return ADIVersion.ADIv6 if self._is_dpv3 else ADIVersion.ADIv5 - + @property def base_address(self): """! @brief Base address of the first component for an ADIv6 system.""" return self._base_addr - + @property def apacc_memory_interface(self): """! @brief Memory interface for performing APACC transactions.""" if self._apacc_mem_interface is None: self._apacc_mem_interface = APAccessMemoryInterface(self) return self._apacc_mem_interface - + @property def next_access_number(self): self._access_number += 1 return self._access_number - + def lock(self): """! @brief Lock the DP from access by other threads.""" self.probe.lock() - + def unlock(self): """! @brief Unlock the DP.""" self.probe.unlock() def connect(self, protocol=None): """! @brief Connect to the target. - + This method causes the debug probe to connect using the selected wire protocol. The probe must have already been opened prior to this call. - + Unlike create_connect_sequence(), this method is intended to be used when manually constructing a DebugPort instance. It simply calls create_connect_sequence() and invokes the returned call sequence. - + @param self @param protocol One of the @ref pyocd.probe.debug_probe.DebugProbe.Protocol "DebugProbe.Protocol" enums. If not provided, will default to the `protocol` setting. """ self._protocol = protocol self.create_connect_sequence().invoke() - + def disconnect(self): """! @brief Disconnect from target. - + DP debug is powered down. See power_down_debug(). """ self.power_down_debug() def create_connect_sequence(self): """! @brief Returns call sequence to connect to the target. - + Returns a @ref pyocd.utility.sequence.CallSequence CallSequence that will connect to the DP, power up debug and the system, check the DP version to identify whether the target uses ADI v5 or v6, then clears sticky errors. - + The probe must have already been opened prior to this method being called. - + @param self @return @ref pyocd.utility.sequence.CallSequence CallSequence """ @@ -399,22 +399,22 @@ def _connect(self): LOG.log(logging.INFO if self._log_dp_info else logging.DEBUG, "DP IDR = 0x%08x (v%d%s rev%d)", self.dpidr.idr, self.dpidr.version, " MINDP" if self.dpidr.mindp else "", self.dpidr.revision) - + def _check_version(self): self._is_dpv3 = (self.dpidr.version == 3) if self._is_dpv3: # Check that the probe will be able to access ADIv6 APs. if self._probe_managed_ap_select and not self._probe_supports_apv2_addresses: raise exceptions.ProbeError("connected to ADIv6 target with probe that does not support APv2 addresses") - + idr1 = self.read_reg(DP_IDR1) - + self._addr_size = idr1 & DPIDR1_ASIZE_MASK self._addr_mask = (1 << self._addr_size) - 1 self._errmode_supported = (idr1 & DPIDR1_ERRMODE_MASK) != 0 - + LOG.debug("DP IDR1 = 0x%08x (addr size=%d, errmode=%d)", idr1, self._addr_size, self._errmode_supported) - + # Read base system address. baseptr0 = self.read_reg(DP_BASEPTR0) valid = (baseptr0 & BASEPTR0_VALID_MASK) != 0 @@ -426,7 +426,7 @@ def _check_version(self): base &= self._addr_mask self._base_addr = base - + LOG.debug("DP BASEPTR = 0x%08x", self._base_addr) else: LOG.warning("DPv3 has no valid base address") @@ -448,10 +448,10 @@ def write_reg(self, addr, data): def power_up_debug(self): """! @brief Assert DP power requests. - + Request both debug and system power be enabled, and wait until the request is acked. There is a timeout for the request. - + @return Boolean indicating whether the power up request succeeded. """ # Send power up request for system and debug. @@ -464,22 +464,22 @@ def power_up_debug(self): break else: return False - + return True def power_down_debug(self): """! @brief Deassert DP power requests. - + ADIv6 says that we must not clear CSYSPWRUPREQ and CDBGPWRUPREQ at the same time. ADIv5 says CSYSPWRUPREQ must not be set to 1 while CDBGPWRUPREQ is set to 0. So we start with deasserting system power, then debug power. Each deassertion has its own timeout. - + @return Boolean indicating whether the power down request succeeded. """ # Power down system first. self.write_reg(DP_CTRL_STAT, CDBGPWRUPREQ | MASKLANE | TRNNORMAL) - + with Timeout(DP_POWER_REQUEST_TIMEOUT) as time_out: while time_out.check(): r = self.read_reg(DP_CTRL_STAT) @@ -490,7 +490,7 @@ def power_down_debug(self): # Now power down debug. self.write_reg(DP_CTRL_STAT, MASKLANE | TRNNORMAL) - + with Timeout(DP_POWER_REQUEST_TIMEOUT) as time_out: while time_out.check(): r = self.read_reg(DP_CTRL_STAT) @@ -498,21 +498,21 @@ def power_down_debug(self): break else: return False - + return True def _invalidate_cache(self): """! @brief Invalidate cached DP registers.""" self._cached_dp_select = None - + def _reset_did_occur(self, notification): """! @brief Handles reset notifications to invalidate register cache. - + The cache is cleared on all resets just to be safe. On most devices, warm resets do not reset debug logic, but it does happen on some devices. """ self._invalidate_cache() - + def post_reset_recovery(self): """! @brief Wait for the target to recover from reset, with auto-reconnect if needed.""" # Check if we can access DP registers. If this times out, then reconnect the DP and retry. @@ -549,15 +549,15 @@ def post_reset_recovery(self): def reset(self, *, send_notifications=True): """! @brief Hardware reset. - + Pre- and post-reset notifications are sent. - + This method can be called before the DebugPort is connected. @param self This object. @param send_notifications Optional keyword-only parameter used by higher-level reset methods so they can manage the sending of reset notifications themselves, in order to provide more context in the notification. - + @todo Should automatic recovery from a disconnected DAP be provided for these low-level hardware resets like is done for CortexM.reset()? """ @@ -572,12 +572,12 @@ def reset(self, *, send_notifications=True): def assert_reset(self, asserted, *, send_notifications=True): """! @brief Assert or deassert the hardware reset signal. - + A pre-reset notification is sent before asserting reset, whereas a post-reset notification is sent after deasserting reset. - + This method can be called before the DebugPort is connected. - + @param self This object. @param asserted True if nRESET is to be driven low; False will drive nRESET high. @param send_notifications Optional keyword-only parameter used by higher-level reset methods so they can @@ -596,7 +596,7 @@ def assert_reset(self, asserted, *, send_notifications=True): def is_reset_asserted(self): """! @brief Returns the current state of the nRESET signal. - + This method can be called before the DebugPort is initalized. @retval True Reset is asserted; nRESET is low. @@ -613,7 +613,7 @@ def set_clock(self, frequency): def _write_dp_select(self, mask, value): """! @brief Modify part of the DP SELECT register and write if cache is stale. - + The DP lock must already be acquired before calling this method. """ # Compute the new SELECT value and see if we need to write it. @@ -623,22 +623,22 @@ def _write_dp_select(self, mask, value): select = (self._cached_dp_select & ~mask) | value if select == self._cached_dp_select: return - + # Update the SELECT register and cache. self.write_dp(DP_SELECT, select) self._cached_dp_select = select - + def _set_dpbanksel(self, addr, is_write): """! @brief Updates the DPBANKSEL field of the SELECT register as required. - + Several DP registers (most, actually) ignore DPBANKSEL. If one of those is being accessed, any value of DPBANKSEL can be used. Otherwise SELECT is updated if necessary and a lock acquired so another thread doesn't change DPBANKSEL until thsi transaction is complete. - + This method also handles the case where the debug probe manages DPBANKSEL on its own, such as with STLink. - + @return Whether the access needs a lock on DP SELECT. @exception exceptions.ProbeError Raised when a banked register is being accessed but the probe doesn't support DPBANKSEL. @@ -650,11 +650,11 @@ def _set_dpbanksel(self, addr, is_write): registers_ignoring_dpbanksel = (DP_SELECT, DP_RDBUFF) else: registers_ignoring_dpbanksel = (DP_ABORT, DP_SELECT, DP_RDBUFF) - + if (addr & DPADDR_MASK) not in registers_ignoring_dpbanksel: # Get the DP bank. dpbanksel = (addr & DPADDR_DPBANKSEL_MASK) >> DPADDR_DPBANKSEL_SHIFT - + # Check if the probe handles this for us. if self._probe_managed_dpbanksel: # If there is a nonzero DPBANKSEL and the probe doesn't support this, @@ -663,7 +663,7 @@ def _set_dpbanksel(self, addr, is_write): raise exceptions.ProbeError("probe does not support banked DP registers") else: return False - + # Update the selected DP bank. self.lock() self._write_dp_select(SELECT_DPBANKSEL_MASK, dpbanksel) @@ -675,7 +675,7 @@ def read_dp(self, addr, now=True): if (addr & DPADDR_MASK) % 4 != 0: raise ValueError("DP address must be word aligned") num = self.next_access_number - + # Update DPBANKSEL if required. did_lock = self._set_dpbanksel(addr, False) @@ -715,7 +715,7 @@ def write_dp(self, addr, data): if (addr & DPADDR_MASK) % 4 != 0: raise ValueError("DP address must be word aligned") num = self.next_access_number - + # Update DPBANKSEL if required. did_lock = self._set_dpbanksel(addr, True) @@ -731,10 +731,10 @@ def write_dp(self, addr, data): self.unlock() return True - + def _select_ap(self, addr): """! @brief Write DP_SELECT to choose the given AP. - + Handles the case where the debug probe manages selecting an AP itself, in which case we never write SELECT directly. @@ -743,7 +743,7 @@ def _select_ap(self, addr): # If the probe handles selecting the AP for us, there's nothing to do here. if self._probe_managed_ap_select: return False - + # Write DP SELECT to select the probe. self.lock() if self.adi_version == ADIVersion.ADIv5: @@ -831,7 +831,7 @@ def read_ap_multiple(self, addr, count=1, now=True): assert isinstance(addr, int) num = self.next_access_number did_lock = False - + try: did_lock = self._select_ap(addr) TRACE.debug("read_ap_multiple:%06d (addr=0x%08x, count=%i)", num, addr, count) @@ -885,20 +885,20 @@ def clear_sticky_err(self): class APAccessMemoryInterface(memory_interface.MemoryInterface): """! @brief Memory interface for performing simple APACC transactions. - + This class allows the caller to generate Debug APB transactions from a DPv3. It simply adapts the MemoryInterface to APACC transactions. - + By default, it passes memory transaction addresses unmodified to the DP. But an instance can be constructed by passing an APAddress object to the constructor that offsets transaction addresses so they are relative to the APAddress base. - + Only 32-bit transfers are supported. """ - + def __init__(self, dp, ap_address=None): """! @brief Constructor. - + @param self @param dp The DebugPort object. @param ap_address Optional instance of APAddress. If provided, all memory transaction @@ -910,11 +910,11 @@ def __init__(self, dp, ap_address=None): self._offset = ap_address.address else: self._offset = 0 - + @property def dp(self): return self._dp - + @property def short_description(self): if self._ap_address is None: @@ -924,20 +924,20 @@ def short_description(self): def write_memory(self, addr, data, transfer_size=32): """! @brief Write a single memory location. - + By default the transfer size is a word.""" if transfer_size != 32: raise exceptions.DebugError("unsupported transfer size") - + return self._dp.write_ap(self._offset + addr, data) - + def read_memory(self, addr, transfer_size=32, now=True): """! @brief Read a memory location. - + By default, a word will be read.""" if transfer_size != 32: raise exceptions.DebugError("unsupported transfer size") - + return self._dp.read_ap(self._offset + addr, now) def write_memory_block32(self, addr, data): diff --git a/pyocd/coresight/discovery.py b/pyocd/coresight/discovery.py index ec31a02c4..2b4ab2293 100644 --- a/pyocd/coresight/discovery.py +++ b/pyocd/coresight/discovery.py @@ -31,15 +31,15 @@ class CoreSightDiscovery(object): def __init__(self, target): """! @brief Constructor.""" self._target = target - + @property def target(self): return self._target - + @property def dp(self): return self.target.dp - + @property def session(self): return self.target.session @@ -67,7 +67,7 @@ def _create_components(self): self._apply_to_all_components(self._create_component, filter=lambda c: c.factory is not None and c.factory not in (cortex_m.CortexM.factory, cortex_m_v8m.CortexM_v8M.factory)) - + def _apply_to_all_components(self, action, filter=None): # Iterate over every top-level ROM table. for ap in [x for x in self.dp.aps.values() if x.rom_table]: @@ -75,10 +75,10 @@ def _apply_to_all_components(self, action, filter=None): class ADIv5Discovery(CoreSightDiscovery): """! @brief Component discovery process for ADIv5. - + Component discovery for ADIv5 proceeds as follows. Each of the steps is labeled with the name of the init task for that step. - + 1. `find_aps`: Perform an AP scan. Probe each AP at APSEL=0..255. By default the scan stops on the first invalid APSEL, as determined by testing the IDR value (0 is invalid). This can be overridden by a session option. @@ -104,11 +104,11 @@ def discover(self): def _find_aps(self): """! @brief Find valid APs using the ADIv5 method. - + Scans for valid APs starting at APSEL=0. The default behaviour is to stop after reading 0 for the AP's IDR twice in succession. If the `scan_all_aps` session option is set to True, then the scan will instead probe every APSEL from 0-255. - + If there is already a list of valid APs defined for the @ref pyocd.coresight.dap.DebugPort DebugPort (the `valid_aps` attribute), then scanning is not performed. This is to allow a predetermined list of valid APSELs to be used in place of a scan. A few MCUs will lock up @@ -119,7 +119,7 @@ def _find_aps(self): # skipping the AP scan by providing a predetermined list of valid APSELs. if self.dp.valid_aps is not None: return - + ap_list = [] apsel = 0 invalid_count = 0 @@ -138,13 +138,13 @@ def _find_aps(self): exc_info=self.session.log_tracebacks) break apsel += 1 - + # Update the AP list once we know it's complete. self.dp.valid_aps = ap_list def _create_aps(self): """! @brief Init task that returns a call sequence to create APs. - + For each AP in the #valid_aps list, an AccessPort object is created. The new objects are added to the #aps dict, keyed by their AP number. """ @@ -154,7 +154,7 @@ def _create_aps(self): ('create_ap.{}'.format(apsel), lambda apsel=apsel: self._create_1_ap(apsel)) ) return seq - + def _create_1_ap(self, apsel): """! @brief Init task to create a single AP object.""" try: @@ -164,7 +164,7 @@ def _create_1_ap(self, apsel): except exceptions.Error as e: LOG.error("Exception reading AP#%d IDR: %s", apsel, e, exc_info=self.session.log_tracebacks) - + def _find_components(self): """! @brief Init task that generates a call sequence to ask each AP to find its components.""" seq = CallSequence() @@ -176,10 +176,10 @@ def _find_components(self): class ADIv6Discovery(CoreSightDiscovery): """! @brief Component discovery process for ADIv6. - + The process for discovering components in ADIv6 proceeds as follows. Each of the steps is labeled with the name of the init task for that step. - + 1. `find_root_components`: Examine the component pointed to by the DP BASEPTR register(s). If it's a ROM table, read it and examine components pointed to by the entries. This creates the AP instances. @@ -188,7 +188,7 @@ class ADIv6Discovery(CoreSightDiscovery): 3. `create_cores`: Create any discovered core (CPU) components. The cores are created first to ensure that other components have a core to which they may be connected. 4. `create_components`: Create remaining discovered components. - + Note that nested APs are not supported. """ @@ -210,23 +210,23 @@ def _find_root_components(self): # There's not much we can do if we don't have a base address. if self.dp.base_address is None: return - + # Create a temporary memory interface. mem_interface = self.dp.apacc_memory_interface - + # Examine the base component. cmpid = CoreSightComponentID(None, mem_interface, self.dp.base_address) cmpid.read_id_registers() LOG.debug("Base component: %s", cmpid) - + if cmpid.is_rom_table: self._top_rom_table = ROMTable.create(mem_interface, cmpid) self._top_rom_table.init() - + # Create components defined in the DP ROM table. self._top_rom_table.for_each(self._create_1_ap, filter=lambda c: c.factory == AccessPort.create) - + # Create non-AP components in the DP ROM table. self._top_rom_table.for_each(self._create_root_component, filter=lambda c: (c.factory is not None) and (c.factory != AccessPort.create)) @@ -234,7 +234,7 @@ def _find_root_components(self): self._create_1_ap(cmpid) else: self._create_root_component(cmpid) - + def _create_1_ap(self, cmpid): """! @brief Init task to create a single AP object.""" try: @@ -244,10 +244,10 @@ def _create_1_ap(self, cmpid): except exceptions.Error as e: LOG.error("Exception reading AP@0x%08x IDR: %s", cmpid.address, e, exc_info=self.session.log_tracebacks) - + def _create_root_component(self, cmpid): """! @brief Init task to create a component attached directly to the DP. - + The newly created component is attached directly to the target instance (i.e., CoreSightTarget or subclass) in the object graph. """ @@ -255,7 +255,7 @@ def _create_root_component(self, cmpid): # Create a memory interface for this component. ap_address = APv2Address(cmpid.address) memif = APAccessMemoryInterface(self.dp, ap_address) - + # Instantiate the component and attach to the target. component = cmpid.factory(memif, cmpid, cmpid.address) self.target.add_child(component) @@ -263,7 +263,7 @@ def _create_root_component(self, cmpid): except exceptions.Error as e: LOG.error("Exception creating root component at address 0x%08x: %s", cmpid.address, e, exc_info=self.session.log_tracebacks) - + def _find_components_on_aps(self): """! @brief Init task that generates a call sequence to ask each AP to find its components.""" seq = CallSequence() diff --git a/pyocd/coresight/dwt.py b/pyocd/coresight/dwt.py index 841f4b4d4..82f0a8ab1 100644 --- a/pyocd/coresight/dwt.py +++ b/pyocd/coresight/dwt.py @@ -36,7 +36,7 @@ def __init__(self, comp_register_addr, provider): class DWT(CoreSightComponent): """! @brief Data Watchpoint and Trace version 1.0""" - + # DWT registers # # The addresses are offsets from the base address. @@ -52,7 +52,7 @@ class DWT(CoreSightComponent): DWT_MASK_OFFSET = 4 DWT_FUNCTION_OFFSET = 8 DWT_COMP_BLOCK_SIZE = 0x10 - + DWT_CTRL_NUM_COMP_MASK = (0xF << 28) DWT_CTRL_NUM_COMP_SHIFT = 28 DWT_CTRL_CYCEVTENA_MASK = (1 << 22) @@ -90,14 +90,14 @@ def __init__(self, ap, cmpid=None, addr=None): self.watchpoints = [] self.watchpoint_used = 0 self.dwt_configured = False - + @property def watchpoint_count(self): return len(self.watchpoints) def init(self): """! @brief Inits the DWT. - + Reads the number of hardware watchpoints available on the core and makes sure that they are all disabled and ready for future use. """ @@ -106,7 +106,7 @@ def init(self): if (demcr & DEMCR_TRCENA) == 0: demcr |= DEMCR_TRCENA self.ap.write_memory(DEMCR, demcr) - + dwt_ctrl = self.ap.read_memory(self.address + self.DWT_CTRL) watchpoint_count = (dwt_ctrl & self.DWT_CTRL_NUM_COMP_MASK) >> self.DWT_CTRL_NUM_COMP_SHIFT LOG.info("%d hardware watchpoints", watchpoint_count) @@ -114,7 +114,7 @@ def init(self): comparatorAddress = self.address + self.DWT_COMP_BASE + self.DWT_COMP_BLOCK_SIZE * i self.watchpoints.append(Watchpoint(comparatorAddress, self)) self.ap.write_memory(comparatorAddress + self.DWT_FUNCTION_OFFSET, 0) - + # Enable cycle counter. self.ap.write32(self.address + self.DWT_CTRL, self.DWT_CTRL_CYCCNTENA_MASK) self.dwt_configured = True @@ -171,34 +171,34 @@ def remove_watchpoint(self, addr, size, type): watch.func = 0 self.ap.write_memory(watch.comp_register_addr + self.DWT_FUNCTION_OFFSET, 0) self.watchpoint_used -= 1 - + def remove_all_watchpoints(self): for watch in self.watchpoints: if watch.func != 0: self.remove_watchpoint(watch.addr, watch.size, self.WATCH_TYPE_TO_FUNCT[watch.func]) - + def get_watchpoints(self): return [watch for watch in self.watchpoints if watch.func != 0] - + @property def cycle_count(self): return self.ap.read32(self.address + self.DWT_CYCCNT) - + @cycle_count.setter def cycle_count(self, value): self.ap.write32(self.address + self.DWT_CYCCNT, value) class DWTv2(DWT): """! @brief Data Watchpoint and Trace version 2.x - + This version is present in v8-M platforms. - + - DWT 2.0 appears in v8.0-M - DWT 2.1 appears in v8.1-M and adds the VMASKn registers. """ - + DWT_ACTION_DEBUG_EVENT = 0x00000010 - + ## Map from watchpoint type to FUNCTIONn.MATCH field value. WATCH_TYPE_TO_FUNCT = { Target.WatchpointType.READ: 0b0110, @@ -208,14 +208,14 @@ class DWTv2(DWT): 0b0101: Target.WatchpointType.WRITE, 0b0100: Target.WatchpointType.READ_WRITE, } - + ## Map from data access size to pre-shifted DATAVSIZE field value. DATAVSIZE_MAP = { 1: (0 << 10), 2: (1 << 10), 4: (2 << 10), } - + def set_watchpoint(self, addr, size, type): """! @brief Set a hardware watchpoint.""" if self.dwt_configured is False: @@ -228,7 +228,7 @@ def set_watchpoint(self, addr, size, type): if type not in self.WATCH_TYPE_TO_FUNCT: LOG.error("Invalid watchpoint type %i", type) return False - + # Only support sizes that can be handled with a single comparator. if size not in (1, 2, 4): LOG.error("Invalid watchpoint size %d", size) diff --git a/pyocd/coresight/fpb.py b/pyocd/coresight/fpb.py index 4232d0784..c83a27555 100644 --- a/pyocd/coresight/fpb.py +++ b/pyocd/coresight/fpb.py @@ -29,7 +29,7 @@ def __init__(self, comp_register_addr, provider): class FPB(BreakpointProvider, CoreSightComponent): """! @brief Flash Patch and Breakpoint unit""" - + # FPB registers # # The addresses are offsets from the base address. @@ -38,7 +38,7 @@ class FPB(BreakpointProvider, CoreSightComponent): FP_CTRL_REV_MASK = 0xf0000000 FP_CTRL_REV_SHIFT = 28 FP_COMP0 = 0x00000008 - + def __init__(self, ap, cmpid=None, addr=None): CoreSightComponent.__init__(self, ap, cmpid, addr) BreakpointProvider.__init__(self) @@ -55,7 +55,7 @@ def revision(self): def init(self): """! @brief Inits the FPB. - + Reads the number of hardware breakpoints available on the core and disable the FPB (Flash Patch and Breakpoint Unit), which will be enabled when the first breakpoint is set. setup FPB (breakpoint) @@ -97,7 +97,7 @@ def available_breakpoints(self): def can_support_address(self, addr): """! @brief Test whether an address is supported by the FPB. - + For FPBv1, hardware breakpoints are only supported in the range 0x00000000 - 0x1fffffff. This was fixed for FPBv2, which supports hardware breakpoints at any address. """ diff --git a/pyocd/coresight/generic_mem_ap.py b/pyocd/coresight/generic_mem_ap.py index 86297d8ef..e12c45eb0 100644 --- a/pyocd/coresight/generic_mem_ap.py +++ b/pyocd/coresight/generic_mem_ap.py @@ -27,7 +27,7 @@ class GenericMemAPTarget(Target, CoreSightCoreComponent): """! @brief This target represents ARM debug Access Port without a CPU - + It may be used to access the address space of the target via Access Ports without real ARM CPU core behind it. For instance Cypress PSoC64 devices have three APs implemented in the hardware: @@ -35,11 +35,11 @@ class GenericMemAPTarget(Target, CoreSightCoreComponent): * AP #1 -> Cortex-M0+ AP * AP #2 -> Cortex-M4F AP Depending on the protection state, AP #1 and AP #2 can be permanently disabled. - This class allows to communicate with Secure FW running on the target via AP #0. - + This class allows to communicate with Secure FW running on the target via AP #0. + Most of the methods in this class (except memory access methods) are empty/dummy. """ - + def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address=None): Target.__init__(self, session, memory_map) CoreSightCoreComponent.__init__(self, ap, cmpid, address) diff --git a/pyocd/coresight/gpr.py b/pyocd/coresight/gpr.py index 309b955a2..2713a5d83 100644 --- a/pyocd/coresight/gpr.py +++ b/pyocd/coresight/gpr.py @@ -21,15 +21,15 @@ class GPR(CoreSightComponent): """! @brief Granular Power Requestor. - + Currently only supports enabling power domains. """ - + CPWRUPREQ = 0x0 CPWRUPACK = 0x0 - + CPWRUPM_COUNT_MASK = 0x3f - + @classmethod def factory(cls, ap, cmpid, address): # Attempt to return the same instance that was created during ROM table scanning. @@ -37,7 +37,7 @@ def factory(cls, ap, cmpid, address): rom_gpr = cmpid.parent_rom_table.gpr if rom_gpr is not None and rom_gpr.address == address: return rom_gpr - + # No luck, create a new instance. gpr = cls(ap, cmpid, address) return gpr @@ -49,7 +49,7 @@ def __init__(self, ap, cmpid=None, addr=None): def init(self): """! @brief Inits the GPR.""" self.domain_count = self.cmpid.devid[2] & self.CPWRUPM_COUNT_MASK - + def _power_up(self, mask): """! @brief Enable power to a power domaind by mask. @param self @@ -59,7 +59,7 @@ def _power_up(self, mask): """ # Enable power up request bits. self.ap.write32(self.address + self.CPWRUPREQ, mask) - + # Wait for ack bits to set. with Timeout(ACK_TIMEOUT) as t_o: while t_o.check(): @@ -67,7 +67,7 @@ def _power_up(self, mask): if (value & mask) == mask: return True return False - + def power_up_all(self): """! @brief Enable power to all available power domains. @param self @@ -76,7 +76,7 @@ def power_up_all(self): """ mask = (1 << self.domain_count) - 1 return self._power_up(mask) - + def power_up_one(self, domain_id): """! @brief Power up a single power domain by domain ID. @param self @@ -86,9 +86,9 @@ def power_up_one(self, domain_id): """ mask = 1 << domain_id return self._power_up(mask) - + def __repr__(self): return "" % (id(self), self.domain_count) - + diff --git a/pyocd/coresight/itm.py b/pyocd/coresight/itm.py index 759f876b3..581b13c39 100644 --- a/pyocd/coresight/itm.py +++ b/pyocd/coresight/itm.py @@ -54,7 +54,7 @@ class ITM(CoreSightComponent): TCR_TRACEBUSID_MASK = (0x7f << 16) TCR_TRACEBUSID_SHIFT = 16 TCR_BUSY_MASK = (1 << 23) - + LAR = 0x00000fb0 LAR_KEY = 0xC5ACCE55 LSR = 0x00000fb4 @@ -79,10 +79,10 @@ def init(self): val = self.ap.read32(self.address + ITM.LSR) if val & ITM.LSR_SLK_MASK: raise exceptions.DebugError("Failed to unlock ITM") - + # Disable the ITM until enabled. self.disable() - + @property def is_enabled(self): return self._is_enabled @@ -99,9 +99,9 @@ def enable(self, enabled_ports=0xffffffff): def set_enabled_ports(self, enabled_ports): self.ap.write32(self.address + ITM.TERn, enabled_ports) - + def disable(self): self.ap.write32(self.address + ITM.TERn, 0) self.ap.write32(self.address + ITM.TCR, 0) self._is_enabled = False - + diff --git a/pyocd/coresight/rom_table.py b/pyocd/coresight/rom_table.py index fd8535167..3128fa2d1 100644 --- a/pyocd/coresight/rom_table.py +++ b/pyocd/coresight/rom_table.py @@ -30,7 +30,7 @@ class CoreSightComponentID(object): """! @brief Reads and parses CoreSight architectural component ID registers. - + Reads the CIDR, PIDR, DEVID, and DEVARCH registers present at well known offsets in the memory map of all CoreSight components. The various fields from these registers are made available as attributes. @@ -86,7 +86,7 @@ class CoreSightComponentID(object): DEVARCH_ARCHID_MASK = 0xffff CLASS_0X9_ROM_TABLE_ARCHID = 0x0af7 - + def __init__(self, parent_rom_table, ap, top_addr, power_id=None): self.parent_rom_table = parent_rom_table self.ap = ap @@ -123,12 +123,12 @@ def read_id_registers(self): # Extract class. self.component_class = (self.cidr & self.CIDR_COMPONENT_CLASS_MASK) >> self.CIDR_COMPONENT_CLASS_SHIFT - + # Extract JEP106 designer ID. self.designer = ((self.pidr & self.PIDR_DESIGNER_MASK) >> self.PIDR_DESIGNER_SHIFT) \ | ((self.pidr & self.PIDR_DESIGNER2_MASK) >> (self.PIDR_DESIGNER2_SHIFT - 8)) self.part = self.pidr & self.PIDR_PART_MASK - + # Handle Class 0x1 and Type 0x9 components. if self.component_class == self.ROM_TABLE_CLASS: # Class 0x1 ROM table. @@ -139,13 +139,13 @@ def read_id_registers(self): self.devid = regs[1:4] self.devid.reverse() self.devtype = regs[self.DEVTYPE_OFFSET] - + if self.devarch & self.DEVARCH_PRESENT_MASK: self.archid = self.devarch & self.DEVARCH_ARCHID_MASK # Identify a Class 0x9 ROM table. self.is_rom_table = (self.archid == self.CLASS_0X9_ROM_TABLE_ARCHID) - + # Determine component name. if self.is_rom_table: self.name = 'ROM' @@ -194,16 +194,16 @@ def __repr__(self): class ROMTable(CoreSightComponent): """! @brief CoreSight ROM table base class. - + This abstract class provides common functionality for ROM tables. Most importantly it has the static create() factory method. - + After a ROMTable is created, its init() method should be called. This will read and parse the table and any child ROM tables. For every component it finds in the table(s), it creates a CoreSightComponentID instance. The full collection of component IDs is available in the _components_ property. The for_each() method will execute a callable for all of the receiving ROM table and its children's components. - + Power domains controlled by Granular Power Requestor components are supported. They are automatically enabled as parsing proceeds so that components can be accessed to read their ID registers. @@ -215,17 +215,17 @@ class ROMTable(CoreSightComponent): @staticmethod def create(memif, cmpid, addr=None, parent_table=None): """! @brief Factory method for creating ROM table components. - + This static method instantiates the appropriate subclass for the ROM table component described by the cmpid parameter. - + @param memif MemoryInterface used to access the ROM table. @param cmpid The CoreSightComponentID instance for this ROM table. @param addr Optional base address for this ROM table, if already known. @param parent_table Optional ROM table that pointed to this one. """ assert cmpid is not None - + # Create appropriate ROM table class. if cmpid.component_class == CoreSightComponentID.ROM_TABLE_CLASS: return Class1ROMTable(memif, cmpid, addr, parent_table) @@ -233,7 +233,7 @@ def create(memif, cmpid, addr=None, parent_table=None): return Class9ROMTable(memif, cmpid, addr, parent_table) else: raise exceptions.DebugError("unexpected ROM table device class (%s)" % cmpid) - + def __init__(self, ap, cmpid=None, addr=None, parent_table=None): """! @brief Constructor.""" assert cmpid is not None @@ -246,23 +246,23 @@ def __init__(self, ap, cmpid=None, addr=None, parent_table=None): self._components = [] self.name = 'ROM' self.gpr = None - + @property def depth(self): """! @brief Number of parent ROM tables.""" return self._depth - + @property def components(self): """! @brief List of CoreSightComponentID instances for components found in this table. - + This property contains only the components for this ROM table, not any child tables. - + Child ROM tables will be represented in the list by ROMTable instances rather than CoreSightComponentID. """ return self._components - + @property def depth_indent(self): """! @brief String of whitespace with a width corresponding to the table's depth.'""" @@ -270,7 +270,7 @@ def depth_indent(self): def init(self): """! @brief Read and parse the ROM table. - + As table entries for CoreSight components are read, a CoreSightComponentID instance will be created and the ID registers read. These ID objects are added to the _components_ property. If any child ROM tables are discovered, they will automatically be created and inited. @@ -281,19 +281,19 @@ def init(self): self._components = [] self._read_table() - + def _read_table(self): raise NotImplementedError() def for_each(self, action, filter=None): """! @brief Apply an action to every component defined in the ROM table and child tables. - + This method iterates over every entry in the ROM table. For each entry it calls the filter function if provided. If the filter passes (returns True or was not provided) then the action function is called. - + The ROM table must have been initialized by calling init() prior to using this method. - + @param self This object. @param action Callable that accepts a single parameter, a CoreSightComponentID instance. @param filter Optional filter callable. Must accept a CoreSightComponentID instance and @@ -304,21 +304,21 @@ def for_each(self, action, filter=None): if isinstance(component, ROMTable): component.for_each(action, filter) continue - + # Skip component if the filter returns False. if filter is not None and not filter(component): continue - + # Perform the action. action(component) class Class1ROMTable(ROMTable): """! @brief CoreSight Class 0x1 ROM table component and parser. - + An object of this class represents a CoreSight Class 0x1 ROM table. It supports reading the table and any child tables. For each entry in the table, a CoreSightComponentID object is created that further reads the component's CoreSight identification registers. - + Granular Power Requestor (GPR) components are supported to automatically enable power domains required to access components, as indicated by the component entry in the ROM table. """ @@ -372,7 +372,7 @@ def _power_component(self, number, powerid, entry): "component has been seen; skipping component (entry=0x%08x)", number, powerid, entry) return False - + # Power up the domain. if not self.gpr.power_up_one(powerid): LOG.error("Failed to power up power domain #%d", powerid) @@ -394,11 +394,11 @@ def _handle_table_entry(self, entry, number): if (entry & self.ROM_TABLE_ADDR_OFFSET_NEG_MASK) != 0: offset = ~bit_invert(offset) address = self.address + offset - + # Check power ID. if (entry & self.ROM_TABLE_POWERIDVALID_MASK) != 0: powerid = (entry & self.ROM_TABLE_POWERID_MASK) >> self.ROM_TABLE_POWERID_SHIFT - + # Attempt to power up this component. Skip this component if we the attempt fails. if not self._power_component(number, powerid, entry): return @@ -408,7 +408,7 @@ def _handle_table_entry(self, entry, number): # Create component instance. cmpid = CoreSightComponentID(self, self.ap, address, powerid) cmpid.read_id_registers() - + # Is this component a power requestor? if cmpid.factory == GPR.factory: # Create the GPR instance and stash it. @@ -426,12 +426,12 @@ def _handle_table_entry(self, entry, number): if cmp is not None: self.components.append(cmp) - + class Class9ROMTable(ROMTable): """! @brief CoreSight Class 0x9 ROM table component and parser. - + Handles parsing of class 0x9 ROM tables as defined in ADIv6. - + In addition to GPR (Granular Power Requestor) components for power domain management, this class supports the optional power request functionality present in class 0x9 ROM tables. """ @@ -460,11 +460,11 @@ class Class9ROMTable(ROMTable): ROM_TABLE_DBGPCRn_PRESENT_MASK = 0x00000001 ROM_TABLE_DBGPCRn_PR_MASK = 0x00000002 ROM_TABLE_DBGPSRn_PS_MASK = 0x00000003 - + ROM_TABLE_DBGPSRn_PS_MAYBE_NOT_POWERED = 0x0 ROM_TABLE_DBGPSRn_PS_IS_POWERED = 0x1 ROM_TABLE_DBGPSRn_PS_MUST_REMAIN_POWERED = 0x3 - + ROM_TABLE_PRIDR0_VERSION_MASK = 0x0000000f ROM_TABLE_PRIDR0_VERSION = 1 # Current version number of the power request functionality. @@ -481,14 +481,14 @@ class Class9ROMTable(ROMTable): # 2's complement offset to debug component from ROM table base address. ROM_TABLE_ADDR_OFFSET_NEG_MASK = { 32: (1 << 31), 64: (1 << 63) } ROM_TABLE_ADDR_OFFSET_MASK = { 32: 0xfffff000, 64: 0xfffffffffffff000 } - + # 5 second timeout on power domain requests. POWER_REQUEST_TIMEOUT = 5.0 - + def __init__(self, ap, cmpid=None, addr=None, parent_table=None): """! @brief Component constructor.""" super(Class9ROMTable, self).__init__(ap, cmpid, addr, parent_table) - + self._pridr_version = None # Extract flags from DEVID. @@ -498,17 +498,17 @@ def __init__(self, ap, cmpid=None, addr=None, parent_table=None): is_64bit = ((self.cmpid.devid[0] & self.ROM_TABLE_DEVID_FORMAT_MASK) != 0) self._width = 64 if is_64bit else 32 LOG.debug("cp=%d prr=%d sysmem=%d w=%d", self._has_com_port, self._has_prr, self._is_sysmem, self._width) - + @property def has_com_port(self): """! @brief Whether the ROM table includes COM Port functionality.""" return self._has_com_port - + @property def has_prr(self): """! @brief Whether the ROM table includes power and reset requesting functionality.""" return self._has_prr - + @property def is_sysmem(self): """! @brief Whether the ROM table is present in system memory.""" @@ -521,12 +521,12 @@ def _read_table(self): actualMaxEntries = self.ROM_TABLE_MAX_ENTRIES // entrySizeMultiplier # Ensure 64-bit format is read as pairs of 32-bit values. entryReadCount = align_down(self.ROM_TABLE_ENTRY_READ_COUNT, entrySizeMultiplier) - + entryAddress = self.address foundEnd = False entriesRead = 0 entryNumber = 0 - + while not foundEnd and entriesRead < actualMaxEntries: # Read several entries at a time for performance. readCount = min(actualMaxEntries - entriesRead, entryReadCount) @@ -536,10 +536,10 @@ def _read_table(self): # For 64-bit entries, combine pairs of 32-bit values into single 64-bit value. if self._width == 64: entries = [(lo | (hi << 32)) for lo, hi in pairwise(entries)] - + for entry in entries: present = entry & self.ROM_TABLE_ENTRY_PRESENT_MASK - + # Zero entry indicates the end of the table. if present == self.ROM_TABLE_ENTRY_NOT_PRESENT_FINAL: foundEnd = True @@ -560,14 +560,14 @@ def _power_component(self, number, powerid, entry): if not self._has_prr: # Attempt GPR method of power domain enabling. return super(Class9ROMTable, self)._power_component(number, powerid, entry) - + # Check power request functionality version here so we can provide a nice warning message. if not self.check_power_request_version(): LOG.warning("Class 0x9 ROM table #%d @ 0x%08x has unsupported version (%d) of power " "request functionality, needed for entry #%d (entry=0x%08x). Skipping " "component.", self.depth, self.address, self._pridr_version, number, entry) return False - + if not self.power_debug_domain(powerid): LOG.error("Failed to power up power domain #%d", powerid) return False @@ -582,11 +582,11 @@ def _handle_table_entry(self, entry, number): if (entry & self.ROM_TABLE_ADDR_OFFSET_NEG_MASK[self._width]) != 0: offset = ~bit_invert(offset, width=self._width) address = self.address + offset - + # Check power ID. if (entry & self.ROM_TABLE_ENTRY_POWERIDVALID_MASK) != 0: powerid = (entry & self.ROM_TABLE_ENTRY_POWERID_MASK) >> self.ROM_TABLE_ENTRY_POWERID_SHIFT - + # Attempt to power up this component. Skip this component if we the attempt fails. if not self._power_component(number, powerid, entry): return @@ -596,7 +596,7 @@ def _handle_table_entry(self, entry, number): # Create component instance. cmpid = CoreSightComponentID(self, self.ap, address, powerid) cmpid.read_id_registers() - + # Is this component a power requestor? if cmpid.factory == GPR.factory: # Create the GPR instance and stash it. @@ -614,7 +614,7 @@ def _handle_table_entry(self, entry, number): if cmp is not None: self._components.append(cmp) - + def check_power_request_version(self): """! @brief Verify the power request functionality version.""" # Cache the PRIDR0 VERSION field the first time. @@ -629,14 +629,14 @@ def power_debug_domain(self, domain_id, enable=True): # Compute register addresses for this power domain. dbgpcr_addr = self.address + self.ROM_TABLE_DBGPCRn + (4 * domain_id) dbgpsr_addr = self.address + self.ROM_TABLE_DBGPSRn + (4 * domain_id) - + # Check the domain request PRESENT bit. dbgpcr = self.ap.read32(dbgpcr_addr) if (dbgpcr & self.ROM_TABLE_DBGPCRn_PRESENT_MASK) == 0: LOG.warning("Power request functionality for power domain #%d is not present.", domain_id) return False - + # Check if the PR bit matches our request. pr = (dbgpcr & self.ROM_TABLE_DBGPCRn_PR_MASK) != 0 if pr == enable: @@ -656,14 +656,14 @@ def power_debug_domain(self, domain_id, enable=True): LOG.warning("Power request handshake did not complete for power domain #%d.", domain_id) return False - + # Change power enable bit. if enable: dbgpcr |= self.ROM_TABLE_DBGPCRn_PR_MASK else: dbgpcr &= ~self.ROM_TABLE_DBGPCRn_PR_MASK self.ap.write32(dbgpcr_addr, dbgpcr) - + # Wait for status bits to update. with Timeout(self.POWER_REQUEST_TIMEOUT) as time_out: while time_out.check(): diff --git a/pyocd/coresight/sdc600.py b/pyocd/coresight/sdc600.py index 495fe9df1..4522d5743 100644 --- a/pyocd/coresight/sdc600.py +++ b/pyocd/coresight/sdc600.py @@ -40,7 +40,7 @@ class LinkClosedException(ComPortError): """! @brief Received an unexpected or out of order flag byte.""" def __init__(self, phase): self._phase = phase - + @property def phase(self): """! @brief The link phase that was closed from the other side.""" @@ -49,17 +49,17 @@ def phase(self): class SDC600(CoreSightComponent): """! @brief SDC-600 component. """ - + ## Default timeout for an operation or packet transfer. TRANSFER_TIMEOUT = 30.0 - + class LinkPhase(Enum): """! @brief COM Port link phases.""" ## Hardware-defined link phase. PHASE1 = 1 ## Software-defined link phase. PHASE2 = 2 - + class Register: """! @brief Namespace for SDC-600 register offset constants.""" # Register offsets. @@ -87,7 +87,7 @@ class Register: FIDxXR_xXSZ32_SHIFT = (10) FIDxXR_xXFD_MASK = (0x000f0000) FIDxXR_xXFD_SHIFT = (16) - + # SR bit definitions. SR_TXS_MASK = (0x000000ff) SR_TXS_SHIFT = (0) @@ -105,7 +105,7 @@ class Register: SR_RXLE_SHIFT = (30) SR_PEN_MASK = (0x80000000) SR_PEN_SHIFT = (31) - + class Flag: """! @brief Namespace with SDC-600 flag byte constants.""" IDR = 0xA0 @@ -120,11 +120,11 @@ class Flag: END = 0xAD ESC = 0xAE NULL = 0xAF - + # All bytes with 0b101 in bits [7:5] are flag bytes. MASK = 0xE0 IDENTIFIER = 0b10100000 - + ## Map from flag value to name. NAME = { IDR : "IDR", @@ -140,11 +140,11 @@ class Flag: ESC : "ESC", NULL : "NULL", } - + ## NULL bytes must be written to the upper bytes, and will be present in the upper bytes # when read. NULL_FILL = 0xAFAFAF00 - + def __init__(self, ap, cmpid=None, addr=None): super(SDC600, self).__init__(ap, cmpid, addr) self._tx_width = 0 @@ -153,37 +153,37 @@ def __init__(self, ap, cmpid=None, addr=None): def init(self): """! @brief Inits the component. - + Reads the RX and TX widths and whether the SDC-600 is enabled. All error flags are cleared. """ fidtx = self.ap.read32(self.Register.FIDTXR) LOG.debug("fidtx=0x%08x", fidtx) fidrx = self.ap.read32(self.Register.FIDRXR) LOG.debug("fidrx=0x%08x", fidrx) - + self._tx_width = (fidtx & self.Register.FIDxXR_xXW_MASK) >> self.Register.FIDxXR_xXW_SHIFT - + self._rx_width = (fidrx & self.Register.FIDxXR_xXW_MASK) >> self.Register.FIDxXR_xXW_SHIFT - + status = self.ap.read32(self.Register.SR) LOG.debug("status=0x%08x", status) self._is_enabled = (status & self.Register.SR_PEN_MASK) != 0 - + # Clear any error flags. error_flags = status & (self.Register.SR_TXOE_MASK | self.Register.SR_TXLE_MASK) if error_flags: self.ap.write32(self.Register.SR, error_flags) - + @property def is_enabled(self): """! @brief Whether the SDC-600 is enabled.""" return self._is_enabled - + @property def is_reboot_request_enabled(self): """! @brief Whether the Reboot Request feature is enabled in the SDC-600.""" return (self.ap.read32(self.Register.SR) & self.Register.SR_RRDIS_MASK) == 0 - + @property def current_link_phase(self): """! @brief Currently established link phase. @@ -193,10 +193,10 @@ def current_link_phase(self): def _read1(self, to_): """! @brief Read a single byte. - + If a NULL byte is received, it is ignored and another byte is read. No other flag bytes are processed. - + @exception TimeoutError """ while True: @@ -213,9 +213,9 @@ def _read1(self, to_): # Ignore NULL flag bytes. if value == self.Flag.NULL: continue - + return value - + def _write1(self, value, to_): """! @brief Write one or more bytes. @exception TimeoutError @@ -289,10 +289,10 @@ def _stuff(self, data): if (value & self.Flag.MASK) == self.Flag.IDENTIFIER: # Insert escape flag. result.append(self.Flag.ESC) - + # Invert high bit. value ^= 0x80 - + result.append(value) return result @@ -306,17 +306,17 @@ def _destuff(self, data): i = 0 while i < len(data): value = data[i] - + # Check for escaped bytes. if value == self.Flag.ESC: # Skip over escape. i += 1 - + # Get escaped byte and invert high bit to destuff it. value = data[i] ^ 0x80 - + result.append(value) - + i += 1 return result @@ -329,7 +329,7 @@ def _read_packet_data_to_end(self, to_): result = [] while to_.check(): value = self._read1(to_) - + # Check for the packet end marker flag. if value == self.Flag.END: break @@ -341,25 +341,25 @@ def _read_packet_data_to_end(self, to_): result.append(value) else: raise exceptions.TimeoutError("timeout while reading from SDC-600") - + return self._destuff(result) def receive_packet(self, timeout=TRANSFER_TIMEOUT): """! @brief Read a data packet. - + Reads a packet (PDU) from the target and removes byte stuffing. The timeout for reading the entire packet can be set via the _timeout_ parameter. - + As data is read from the target, special flags for link errors or to close either phase of the link are handled and an appropriate exception is raised. - + The connection must be in link phase 2. - + @param self @param timeout Optional timeout for reading the entire packet. If reading times out, a TimeoutError exception is raised. @return List of integer byte values of the de-escaped packet contents. - + @exception UnexpectedFlagError @exception LinkClosedException @exception TimeoutError @@ -368,21 +368,21 @@ def receive_packet(self, timeout=TRANSFER_TIMEOUT): with Timeout(timeout) as to_: self._expect_flag(self.Flag.START, to_) return self._read_packet_data_to_end(to_) - + def send_packet(self, data, timeout=TRANSFER_TIMEOUT): """! @brief Send a data packet. - + Sends the provided data to the target as a single packet (PDU), escaping bytes as necessary. No data is read while the packet is sent, so if the target closes the connection it will not be detected. - + The connection must be in link phase 2. - + @param self @param data List of integer byte values to send. Must not be pre-escaped. @param timeout Optional timeout for reading the entire packet. If reading times out, a TimeoutError exception is raised. - + @exception UnexpectedFlagError @exception TimeoutError """ @@ -392,7 +392,7 @@ def send_packet(self, data, timeout=TRANSFER_TIMEOUT): for value in self._stuff(data): self._write1(value, to_) self._write1(self.Flag.END, to_) - + def open_link(self, phase, timeout=TRANSFER_TIMEOUT): """! @brief Send the LPH1RA or LPH2RA flag. @exception UnexpectedFlagError @@ -405,11 +405,11 @@ def open_link(self, phase, timeout=TRANSFER_TIMEOUT): # Close link phase 1 first, to put it in a known state. self.close_link(self.LinkPhase.PHASE1) - + LOG.debug("sending LPH1RA") self._write1(self.Flag.LPH1RA, to_) self._expect_flag(self.Flag.LPH1RA, to_) - + self._current_link_phase = self.LinkPhase.PHASE1 elif phase == self.LinkPhase.PHASE2: assert self._current_link_phase == self.LinkPhase.PHASE1 @@ -417,17 +417,17 @@ def open_link(self, phase, timeout=TRANSFER_TIMEOUT): LOG.debug("sending LPH2RA") self._write1(self.Flag.LPH2RA, to_) self._expect_flag(self.Flag.LPH2RA, to_) - + self._current_link_phase = self.LinkPhase.PHASE2 else: raise ValueError("unrecognized phase value") def close_link(self, phase, timeout=TRANSFER_TIMEOUT): """! @brief Send the LPH1RL or LPH2RL flag. - + Link phase 1 can be closed from any state. Link phase 2 can only be closed when the connection is already in that phase. - + @exception UnexpectedFlagError @exception TimeoutError """ @@ -437,7 +437,7 @@ def close_link(self, phase, timeout=TRANSFER_TIMEOUT): LOG.debug("sending LPH1RL") self._write1(self.Flag.LPH1RL, to_) self._expect_flag(self.Flag.LPH1RL, to_) - + self._current_link_phase = None elif phase == self.LinkPhase.PHASE2: assert self._current_link_phase == self.LinkPhase.PHASE2 @@ -445,7 +445,7 @@ def close_link(self, phase, timeout=TRANSFER_TIMEOUT): LOG.debug("sending LPH2RL") self._write1(self.Flag.LPH2RL, to_) self._expect_flag(self.Flag.LPH2RL, to_) - + self._current_link_phase = self.LinkPhase.PHASE1 else: raise ValueError("unrecognized phase value") @@ -464,15 +464,15 @@ def read_protocol_id(self, timeout=TRANSFER_TIMEOUT): self._write1(self.Flag.IDR, to_) self._expect_flag(self.Flag.IDA, to_) return self._read_packet_data_to_end(to_) - + def send_reboot_request(self, timeout=TRANSFER_TIMEOUT): """! @brief Send remote reboot request.""" with Timeout(timeout) as to_: self._write1(self.Flag.LPH2RR, to_) - + def __repr__(self): return "".format(id(self), self._is_enabled, self._tx_width, self._rx_width, self._current_link_phase) - + diff --git a/pyocd/coresight/tpiu.py b/pyocd/coresight/tpiu.py index b380147ed..fba0db3e7 100644 --- a/pyocd/coresight/tpiu.py +++ b/pyocd/coresight/tpiu.py @@ -39,7 +39,7 @@ def __init__(self, ap, cmpid=None, addr=None): """! @brief Standard CoreSight component constructor.""" super(TPIU, self).__init__(ap, cmpid, addr) self._has_swo_uart = False - + @property def has_swo_uart(self): """! @brief Whether SWO UART mode is supported by the TPIU.""" @@ -47,19 +47,19 @@ def has_swo_uart(self): def init(self): """! @brief Reads TPIU capabilities. - + Currently this method simply checks whether the TPIU supports SWO in asynchronous UART mode. The result of this check is available via the has_swo_uart property. """ devid = self.ap.read32(self.address + TPIU.DEVID) self._has_swo_uart = (devid & TPIU.DEVID_NRZ_MASK) != 0 - + def set_swo_clock(self, swo_clock, system_clock): """! @brief Prepare TPIU for transmitting SWO at a given baud rate. - + Configures the TPIU for SWO UART mode, then sets the SWO clock frequency based on the provided system clock. - + @param self @param swo_clock Desired SWO baud rate in Hertz. @param system_clock The frequency of the SWO clock source in Hertz. This is almost always @@ -70,11 +70,11 @@ def set_swo_clock(self, swo_clock, system_clock): # First check whether SWO UART is supported. if not self.has_swo_uart: return False - + # Go ahead and configure for SWO. self.ap.write32(self.address + TPIU.SPPR, TPIU.SPPR_TXMODE_NRZ) # Select SWO UART mode. self.ap.write32(self.address + TPIU.FFCR, 0) # Disable formatter. - + # Compute the divider. div = (system_clock // swo_clock) - 1 actual = system_clock // (div + 1) diff --git a/pyocd/debug/breakpoints/manager.py b/pyocd/debug/breakpoints/manager.py index 3114385ff..20cbc9b91 100644 --- a/pyocd/debug/breakpoints/manager.py +++ b/pyocd/debug/breakpoints/manager.py @@ -72,7 +72,7 @@ def find_breakpoint(self, addr): def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): """! @brief Set a hardware or software breakpoint at a specific location in memory. - + @retval True Breakpoint was set. @retval False Breakpoint could not be set. """ @@ -105,7 +105,7 @@ def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): def _check_added_breakpoint(self, bp): """! @brief Check whether a new breakpoint is likely to actually be added when we flush. - + First, software breakpoints are assumed to always be addable. For hardware breakpoints, the current free hardware breakpoint count is updated based on the current set of to-be added and removed breakpoints. If there are enough free hardware breakpoints to meet the @@ -115,11 +115,11 @@ def _check_added_breakpoint(self, bp): if self._fpb is None: region = self._core.memory_map.get_region_for_address(bp.addr) return region is not None and region.is_writable - + likely_bp_type = self._select_breakpoint_type(bp, False) if likely_bp_type == Target.BreakpointType.SW: return True - + # Count updated hw breakpoints. free_hw_bp_count = self._fpb.available_breakpoints added, removed = self._get_updated_breakpoints() @@ -130,7 +130,7 @@ def _check_added_breakpoint(self, bp): likely_bp_type = self._select_breakpoint_type(bp, False) if likely_bp_type == Target.BreakpointType.HW: free_hw_bp_count -= 1 - + return free_hw_bp_count > self.MIN_HW_BREAKPOINTS def remove_breakpoint(self, addr): diff --git a/pyocd/debug/context.py b/pyocd/debug/context.py index a65b34d6b..dcd19133a 100644 --- a/pyocd/debug/context.py +++ b/pyocd/debug/context.py @@ -21,30 +21,30 @@ class DebugContext(MemoryInterface): """! @brief Viewport for inspecting the system being debugged. - + A debug context is used to access target registers and memory. It enables these accesses to be redirected to different locations. For instance, if you want to read registers from a call frame that is not the topmost, then a context would redirect those reads to locations on the stack. - + A context always has both a parent context and a specific core associated with it, neither of which can be changed after the context is created. The parent context is passed into the constructor. For the top-level debug context, the parent *is* the core. For other contexts that have a context as their parent, the core is set to the topmost parent's core. - + The DebugContext class itself is meant to be used as a base class. It's primary purpose is to provide the default implementation of methods to forward calls up to the parent and eventually to the core. """ - + def __init__(self, parent): """! @brief Debug context constructor. - + @param self @param parent The parent of this context. Can be either a core (CoreSightCoreComponent) or another DebugContext instance. """ self._parent = parent - + if isinstance(self._parent, CoreSightCoreComponent): self._core = parent else: @@ -57,7 +57,7 @@ def parent(self): @property def core(self): return self._core - + @property def session(self): return self.core.session @@ -82,12 +82,12 @@ def read_memory_block32(self, addr, size): def read_core_register(self, reg): """! @brief Read one core register. - + @param self The debug context. @param reg Either the register's name in lowercase or an integer register index. @return The current value of the register. Most core registers return an integer value, while the floating point single and double precision register return a float value. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to read the register. @@ -98,12 +98,12 @@ def read_core_register(self, reg): def read_core_register_raw(self, reg): """! @brief Read a core register without type conversion. - + @param self The debug context. @param reg Either the register's name in lowercase or an integer register index. @return The current integer value of the register. Even float register values are returned as integers (thus the "raw"). - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to read the register. @@ -113,13 +113,13 @@ def read_core_register_raw(self, reg): def read_core_registers_raw(self, reg_list): """! @brief Read one or more core registers. - + @param self The debug context. @param reg_list List of registers to read. Each element in the list can be either the register's name in lowercase or the integer register index. @return List of integer values of the registers requested to be read. The result list will be the same length as _reg_list_. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to read one or more registers. @@ -128,11 +128,11 @@ def read_core_registers_raw(self, reg_list): def write_core_register(self, reg, data): """! @brief Write a CPU register. - + @param self The debug context. @param reg The name of the register to write. @param data New value of the register. Float registers accept float values. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to write the register. @@ -142,11 +142,11 @@ def write_core_register(self, reg, data): def write_core_register_raw(self, reg, data): """! @brief Write a CPU register without type conversion. - + @param self The debug context. @param reg The name of the register to write. @param data New value of the register. Must be an integer, even for float registers. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to write the register. @@ -161,7 +161,7 @@ def write_core_registers_raw(self, reg_list, data_list): register's name in lowercase or the integer register index. @param data_list List of values for the registers in the corresponding positions of _reg_list_. All values must be integers, even for float registers. - + @exception KeyError Invalid or unsupported register was requested. @exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to write one or more registers. diff --git a/pyocd/debug/elf/decoder.py b/pyocd/debug/elf/decoder.py index 7f06d7202..95a76a4ce 100644 --- a/pyocd/debug/elf/decoder.py +++ b/pyocd/debug/elf/decoder.py @@ -51,7 +51,7 @@ def get_symbol_for_address(self, addr): return sorted(self.symbol_tree[addr])[0].data except IndexError: return None - + def get_symbol_for_name(self, name): try: return self.symbol_dict[name] @@ -80,7 +80,7 @@ def _build_symbol_search_tree(self): # Add to symbol dict. self.symbol_dict[symbol.name] = syminfo - + # Add to symbol tree. self.symbol_tree.addi(sym_value, sym_value+sym_size, syminfo) diff --git a/pyocd/debug/elf/elf.py b/pyocd/debug/elf/elf.py index 0bd5a90c6..88ed44705 100644 --- a/pyocd/debug/elf/elf.py +++ b/pyocd/debug/elf/elf.py @@ -23,18 +23,18 @@ class ELFSection(MemoryRange): """! @brief Memory range for a section of an ELF file. - + Objects of this class represent sections of an ELF file. See the ELFBinaryFile class documentation for details of how sections are selected and how to get instances of this class. - + If a region in the target's memory map can be found that contains the section, it will be accessible via the instance's _region_ attribute. Otherwise _region_ will be `None`. A maximum of one associated memory region is supported, even if the section spans multiple regions. - + The contents of the ELF section can be read via the `data` property as a `bytearray`. The data is read from the file only once and cached. """ - + def __init__(self, elf, sect): self._elf = elf self._section = sect @@ -52,7 +52,7 @@ def __init__(self, elf, sect): @property def name(self): return self._name - + @property def type(self): return self._section['sh_type'] @@ -80,7 +80,7 @@ def flags_description(self): if flagsDesc[-1] == '|': flagsDesc = flagsDesc[:-1] return flagsDesc - + def __eq__(self, other): # Include section name in equality test. return super(ELFSection, self).__eq__(other) and self.name == other.name @@ -91,23 +91,23 @@ def __repr__(self): class ELFBinaryFile(object): """! @brief An ELF binary executable file. - + Examines the ELF and provides several lists of useful data: section objects, and both used and unused ranges of memory. - + An ELFSection object is created for each of the sections of the file that are loadable code or data, or otherwise occupy memory. These are normally the .text, .rodata, .data, and .bss sections. More specifically, the list of sections contains any section with a type of `SHT_PROGBITS` or `SHT_NOBITS`. Also, at least one of the `SHF_WRITE`, `SHF_ALLOC`, or `SHF_EXECINSTR` flags must be set. - + The set of sections is compared with the target's memory map to produce a lists of the used (occupied) and unused (unoccupied) ranges of memory. Note that if the executable uses ranges of memory not mapped with a section of the ELF file, those ranges will not be considered in the used/unused lists. Also, only ranges completely contained within a region of the memory map are considered. """ - + def __init__(self, elf, memory_map=None): self._owns_file = False if isinstance(elf, str): diff --git a/pyocd/debug/semihost.py b/pyocd/debug/semihost.py index d2495dfb4..a20d6985e 100644 --- a/pyocd/debug/semihost.py +++ b/pyocd/debug/semihost.py @@ -74,11 +74,11 @@ class SemihostIOHandler(object): """! @brief Interface for semihosting file I/O handlers. - + This class is also used as the default I/O handler if none is provided to SemihostAgent. In this case, all file I/O requests are rejected. """ - + def __init__(self): self.agent = None self._errno = 0 @@ -92,12 +92,12 @@ def errno(self): def _std_open(self, fnptr, fnlen, mode): """! @brief Helper for standard I/O open requests. - + In the ARM semihosting spec, standard I/O files are opened using a filename of ":tt" with the open mode specifying which standard I/O file to open. This method takes care of these special open requests, and is intended to be used by concrete I/O handler subclasses. - + @return A 2-tuple of the file descriptor and filename. The filename is returned so it only has to be read from target memory once if the request is not for standard I/O. The returned file descriptor may be one of 0, 1, or 2 for the standard I/O files, @@ -154,12 +154,12 @@ def rename(self, oldptr, oldlength, newptr, newlength): class InternalSemihostIOHandler(SemihostIOHandler): """! @brief Implements semihosting requests directly in the Python process. - + This class maintains its own list of pseudo-file descriptors for files opened by the debug target. By default, this class uses the system stdin, stdout, and stderr file objects for file desscriptors 1, 2, and 3. """ - + def __init__(self): super(InternalSemihostIOHandler, self).__init__() self.next_fd = STDERR_FD + 1 @@ -287,7 +287,7 @@ def flen(self, fd): class ConsoleIOHandler(SemihostIOHandler): """! @brief Simple IO handler for console.""" - + def __init__(self, stdin_file, stdout_file=None): super(ConsoleIOHandler, self).__init__() self._stdin_file = stdin_file @@ -321,31 +321,31 @@ def readc(self): class SemihostAgent(object): """! @brief Handler for ARM semihosting requests. - + Semihosting requests are made by the target by executing a 'bkpt #0xab' instruction. The requested operation is specified by R0 and any arguments by R1. Many requests use a block of word-sized arguments pointed to by R1. The return value is passed back to the target in R0. - + This class does not handle any file-related requests by itself. It uses I/O handler objects passed in to the constructor. The requests handled directly by this class are #TARGET_SYS_CLOCK and #TARGET_SYS_TIME. - + There are two types of I/O handlers used by this class. The main I/O handler, set with the constructor's @i io_handler parameter, is used for most file operations. You may optionally pass another I/O handler for the @i console constructor parameter. The console handler is used solely for standard I/O and debug console I/O requests. If no console handler is provided, the main handler is used instead. TARGET_SYS_OPEN requests are not passed to the console handler in any event, they are always passed to the main handler. - + If no main I/O handler is provided, the class will use SemihostIOHandler, which causes all file I/O requests to be rejected as an error. - + The SemihostAgent assumes standard I/O file descriptor numbers are #STDIN_FD, #STDOUT_FD, and #STDERR_FD. When it receives a read or write request for one of these descriptors, it passes the request to the console handler. This means the main handler must return these numbers for standard I/O open requests (those with a file name of ":tt"). - + Not all semihosting requests are supported. Those that are not implemented are: - TARGET_SYS_TMPNAM - TARGET_SYS_SYSTEM @@ -397,16 +397,16 @@ def __init__(self, context, io_handler=None, console=None): def check_and_handle_semihost_request(self): """! @brief Handle a semihosting request. - + This method should be called after the target has halted, to check if the halt was due to a semihosting request. It first checks to see if the target halted because of a breakpoint. If so, it reads the instruction at PC to make sure it is a 'bkpt #0xAB' instruction. If so, the target is making a semihosting request. If not, nothing more is done. - + After the request is handled, the PC is advanced to the next instruction after the 'bkpt'. A boolean is return indicating whether a semihosting request was handled. If True, the caller should resume the target immediately. - + @retval True A semihosting request was handled. @retval False The target halted for a reason other than semihosting, i.e. a user-installed debugging breakpoint. @@ -458,7 +458,7 @@ def check_and_handle_semihost_request(self): def cleanup(self): """! @brief Clean up any resources allocated by semihost requests. - + @note May be called more than once. """ self.io_handler.cleanup() diff --git a/pyocd/debug/svd/loader.py b/pyocd/debug/svd/loader.py index a929308d2..5060ab285 100644 --- a/pyocd/debug/svd/loader.py +++ b/pyocd/debug/svd/loader.py @@ -38,7 +38,7 @@ def from_builtin(cls, svd_name): from ...core.session import Session LOG.warning("unable to open builtin SVD file: %s", err, exc_info=Session.get_current().log_tracebacks) return None - + def __init__(self, filename=None): self.filename = filename self.device = None diff --git a/pyocd/flash/builder.py b/pyocd/flash/builder.py index 8905ad332..469a93929 100644 --- a/pyocd/flash/builder.py +++ b/pyocd/flash/builder.py @@ -67,7 +67,7 @@ class MemoryBuilder(abc.ABC): def __init__(self) -> None: super().__init__() self._buffered_data_size: int = 0 - + @property def buffered_data_size(self) -> int: """@brief Total amount of memory buffered by this builder.""" @@ -100,7 +100,7 @@ def __init__(self, sector_info): self.max_page_count = 0 self.page_list = [] self.erase_weight = sector_info.erase_weight - + def add_page(self, page): # The first time a page is added, compute the page count for this sector. This # obviously assumes that all the pages in the sector are the same size. @@ -111,16 +111,16 @@ def add_page(self, page): assert len(self.page_list) < self.max_page_count self.page_list.append(page) self.page_list.sort(key=lambda p:p.addr) - + def are_any_pages_not_same(self): """! @brief Returns True if any pages in this sector might need to be programmed.""" return any(page.same is not True for page in self.page_list) - + def mark_all_pages_not_same(self): """! @brief Sets the same flag to False for all pages in this sector.""" for page in self.page_list: page.same = False - + def __repr__(self): return "<_FlashSector@%x addr=%x size=%x wgt=%g pages=%s>" % ( id(self), self.addr, self.size, self.erase_weight, self.page_list) @@ -145,7 +145,7 @@ def get_program_weight(self): def get_verify_weight(self): """! @brief Get time to verify a page.""" return float(self.size) / float(DATA_TRANSFER_B_PER_S) - + def __repr__(self): return "<_FlashPage@%x addr=%x size=%x datalen=%x wgt=%g erased=%s same=%s>" % ( id(self), self.addr, self.size, len(self.data), self.program_weight, self.erased, self.same) @@ -162,10 +162,10 @@ class FlashBuilder(MemoryBuilder): The purpose of this class is to optimize flash programming within a single region to achieve the highest flash programming performance possible. Various methods are used to estimate the fastest programming method. - + Individual flash algorithm operations are performed by the @ref pyocd.flash.flash.Flash "Flash" instance provided to the contructor. - + Assumptions: 1. Sector erases must be on sector boundaries. 2. Page writes must be on page boundaries. @@ -212,19 +212,19 @@ def add_data(self, addr, data): """! @brief Add a block of data to be programmed. @note Programming does not start until the method program() is called. - + @param self @param addr Base address of the block of data passed to this method. The entire block of data must be contained within the flash memory region associated with this instance. @param data Data to be programmed. Should be a list of byte values. - + @exception ValueError Attempt to add overlapping data, or address range of added data is outside the address range of the flash region associated with the builder. """ # Ignore empty data. if len(data) == 0: return - + # Sanity check if not self.flash.region.contains_range(start=addr, length=len(data)): raise ValueError("Flash address range 0x%x-0x%x is not contained within region '%s'" % @@ -236,7 +236,7 @@ def add_data(self, addr, data): # Keep list sorted self.flash_operation_list = sorted(self.flash_operation_list, key=lambda operation: operation.addr) - + # Verify this does not overlap prev_flash_operation = None for operation in self.flash_operation_list: @@ -246,10 +246,10 @@ def add_data(self, addr, data): % (prev_flash_operation.addr, prev_flash_operation.addr + len(prev_flash_operation.data), operation.addr, operation.addr + len(operation.data))) prev_flash_operation = operation - + def _enable_read_access(self): """! @brief Ensure flash is accessible by initing the algo for verify. - + Not all flash memories are always accessible. For instance, external QSPI. Initing the flash algo for the VERIFY operation is the canonical way to ensure that the flash is memory mapped and accessible. @@ -264,7 +264,7 @@ def _enable_read_access(self): def _build_sectors_and_pages(self, keep_unwritten): """! @brief Converts the list of flash operations to flash sectors and pages. - + @param self @param keep_unwritten If true, unwritten pages in an erased sector and unwritten contents of a modified page will be read from the target and added to the data to be @@ -273,14 +273,14 @@ def _build_sectors_and_pages(self, keep_unwritten): @exception FlashFailure Could not get sector or page info for an address. """ assert len(self.flash_operation_list) > 0 - + self.program_byte_count = 0 - + flash_addr = self.flash_operation_list[0].addr sector_info = self.flash.get_sector_info(flash_addr) if sector_info is None: raise FlashFailure("attempt to program invalid flash address", address=flash_addr) - + page_info = self.flash.get_page_info(flash_addr) if page_info is None: raise FlashFailure("attempt to program invalid flash address", address=flash_addr) @@ -290,7 +290,7 @@ def _build_sectors_and_pages(self, keep_unwritten): current_page = _FlashPage(page_info) current_sector.add_page(current_page) self.page_list.append(current_page) - + def fill_end_of_page_gap(): # Fill the gap at the end of the soon to be previous page if there is one if len(current_page.data) != current_page.size: @@ -303,12 +303,12 @@ def fill_end_of_page_gap(): old_data = [self.flash.region.erased_byte_value] * old_data_len current_page.data.extend(old_data) self.program_byte_count += old_data_len - + for flash_operation in self.flash_operation_list: pos = 0 while pos < len(flash_operation.data): flash_addr = flash_operation.addr + pos - + # Check if operation is in a different sector. if flash_addr >= current_sector.addr + current_sector.size: sector_info = self.flash.get_sector_info(flash_addr) @@ -321,7 +321,7 @@ def fill_end_of_page_gap(): if flash_addr >= current_page.addr + current_page.size: # Fill any gap at the end of the current page before switching to a new page. fill_end_of_page_gap() - + # Create the new page. page_info = self.flash.get_page_info(flash_addr) if page_info is None: @@ -354,11 +354,11 @@ def fill_end_of_page_gap(): # Fill the page gap at the end if there is one fill_end_of_page_gap() - + # Go back through sectors and fill any missing pages with existing data. if keep_unwritten and self.flash.region.is_readable: self._fill_unwritten_sector_pages() - + def _fill_unwritten_sector_pages(self): """! @brief Fill in missing pages from sectors we are going to modify.""" for sector in self.sector_list: @@ -377,18 +377,18 @@ def add_page_with_existing_data(): self.page_list.append(new_page) self.program_byte_count += len(new_page.data) return new_page - + # Iterate over pages defined for the sector. If a gap is found, a new page is inserted # with the current contents of target memory. while sector_page_number < len(sector.page_list): page = sector.page_list[sector_page_number] - + if page.addr != sector_page_addr: page = add_page_with_existing_data() - + sector_page_number += 1 sector_page_addr += page.size - + # Add missing pages at the end of the sector. while sector_page_addr < sector.addr + sector.size: page = add_page_with_existing_data() @@ -398,10 +398,10 @@ def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_veri """! @brief Determine fastest method of flashing and then run flash programming. Data must have already been added with add_data(). - + If the flash region's 'are_erased_sectors_readable' attribute is false, then the smart_flash, fast_verify, and keep_unwritten options are forced disabled. - + @param self @param chip_erase A value of "chip" forces chip erase, "sector" forces sector erase, and a value of "auto" means that the estimated fastest method should be used. If not @@ -423,7 +423,7 @@ def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_veri # Send notification that we're about to program flash. self.flash.target.session.notify(Target.Event.PRE_FLASH_PROGRAM, self) - + # Disable options if attempting to read erased sectors will fault. if not self.flash.region.are_erased_sectors_readable: smart_flash = False @@ -443,7 +443,7 @@ def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_veri if len(self.flash_operation_list) == 0: LOG.warning("No pages were programmed") return - + # Convert chip_erase. if (chip_erase is None) or (chip_erase == "auto"): chip_erase = None @@ -458,12 +458,12 @@ def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_veri self._build_sectors_and_pages(keep_unwritten) assert len(self.sector_list) != 0 and len(self.sector_list[0].page_list) != 0 self.flash_operation_list = [] # Don't need this data in memory anymore. - + # If smart flash was set to false then mark all pages # as requiring programming if not smart_flash: self._mark_all_pages_for_programming() - + # If the flash algo doesn't support erase all, disable chip erase. if not self.flash.is_erase_all_supported: chip_erase = False @@ -535,7 +535,7 @@ def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_veri if sector.are_any_pages_not_same(): erase_byte_count += sector.size erase_sector_count += 1 - + self.perf.total_byte_count = self.program_byte_count self.perf.program_byte_count = actual_program_byte_count self.perf.program_page_count = actual_program_page_count @@ -543,7 +543,7 @@ def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_veri self.perf.erase_sector_count = erase_sector_count self.perf.skipped_byte_count = skipped_byte_count self.perf.skipped_page_count = skipped_page_count - + if self.log_performance: if chip_erase: LOG.info("Erased chip, programmed %d bytes (%s), skipped %d bytes (%s) at %.02f kB/s", @@ -551,7 +551,7 @@ def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_veri skipped_byte_count, get_page_count(skipped_page_count), ((self.program_byte_count/1024) / self.perf.program_time)) else: - LOG.info("Erased %d bytes (%s), programmed %d bytes (%s), skipped %d bytes (%s) at %.02f kB/s", + LOG.info("Erased %d bytes (%s), programmed %d bytes (%s), skipped %d bytes (%s) at %.02f kB/s", erase_byte_count, get_sector_count(erase_sector_count), actual_program_byte_count, get_page_count(actual_program_page_count), skipped_byte_count, get_page_count(skipped_page_count), @@ -613,7 +613,7 @@ def _analyze_pages_with_partial_read(self): else: # Save the data read for estimation so we don't need to read it again. page.cached_estimate_data = data - + def _analyze_pages_with_crc32(self, assume_estimate_correct=False): """! @brief Estimate how many pages are the same using a CRC32 analyzer. @@ -659,7 +659,7 @@ def _compute_sector_erase_pages_and_weight(self, fast_verify): This is done automatically by smart_program. """ analyze_start = time() - + # Analyze unknown pages using either CRC32 analyzer or partial reads. if any(page.same is None for page in self.page_list): if self.flash.get_flash_info().crc_supported: @@ -688,7 +688,7 @@ def _compute_sector_erase_pages_and_weight(self, fast_verify): elif page.same is True: # Page is confirmed to be the same so no programming weight pass - + if sector.are_any_pages_not_same(): sector_erase_weight += sector.erase_weight @@ -698,7 +698,7 @@ def _compute_sector_erase_pages_and_weight(self, fast_verify): analyze_finish = time() self.perf.analyze_time = analyze_finish - analyze_start LOG.debug("Analyze time: %f" % (analyze_finish - analyze_start)) - + return sector_erase_count, sector_erase_weight def _chip_erase_program(self, progress_cb=_stub_progress): @@ -710,10 +710,10 @@ def _chip_erase_program(self, progress_cb=_stub_progress): self.flash.init(self.flash.Operation.ERASE) self.flash.erase_all() self.flash.uninit() - + progress += self.flash.get_flash_info().erase_weight progress_cb(float(progress) / float(self.chip_erase_weight)) - + self.flash.init(self.flash.Operation.PROGRAM) for page in self.page_list: if not page.erased: @@ -746,7 +746,7 @@ def _chip_erase_program_double_buffer(self, progress_cb=_stub_progress): self.flash.init(self.flash.Operation.ERASE) self.flash.erase_all() self.flash.uninit() - + progress += self.flash.get_flash_info().erase_weight progress_cb(float(progress) / float(self.chip_erase_weight)) @@ -784,7 +784,7 @@ def _chip_erase_program_double_buffer(self, progress_cb=_stub_progress): # Update progress. progress += current_weight progress_cb(float(progress) / float(self.chip_erase_weight)) - + self.flash.uninit() progress_cb(1.0) return FlashBuilder.FLASH_CHIP_ERASE @@ -800,7 +800,7 @@ def _sector_erase_program(self, progress_cb=_stub_progress): # Fill in same flag for all pages. This is done up front so we're not trying # to read from flash while simultaneously programming it. progress = self._scan_pages_for_same(progress_cb) - + for sector in self.sector_list: if sector.are_any_pages_not_same(): # Erase the sector @@ -813,7 +813,7 @@ def _sector_erase_program(self, progress_cb=_stub_progress): # Update progress if self.sector_erase_weight > 0: progress_cb(float(progress) / float(self.sector_erase_weight)) - + # The sector was erased, so we must program all pages in the sector # regardless of whether they were the same or not. for page in sector.page_list: @@ -823,7 +823,7 @@ def _sector_erase_program(self, progress_cb=_stub_progress): self.flash.init(self.flash.Operation.PROGRAM) self.flash.program_page(page.addr, page.data) self.flash.uninit() - + actual_sector_erase_count += 1 actual_sector_erase_weight += page.get_program_weight() @@ -840,13 +840,13 @@ def _sector_erase_program(self, progress_cb=_stub_progress): def _scan_pages_for_same(self, progress_cb=_stub_progress): """! @brief Read the full page data to determine if it is unchanged. - + When this function exits, the same flag will be set to either True or False for every page. In addition, sectors that need at least one page programmed will have the same flag set to False for all pages within that sector. """ progress = 0 - + # Read page data if unknown - after this page.same will be True or False unknown_pages = [page for page in self.page_list if page.same is None] if unknown_pages: @@ -865,17 +865,17 @@ def _scan_pages_for_same(self, progress_cb=_stub_progress): page.same = same(page.data, data) page.cached_estimate_data = None # This data isn't needed anymore. progress += page.get_verify_weight() - + # Update progress if self.sector_erase_weight > 0: progress_cb(float(progress) / float(self.sector_erase_weight)) - + # If we have to program any pages of a sector, then mark all pages of that sector # as needing to be programmed, since the sector will be erased. for sector in self.sector_list: if sector.are_any_pages_not_same(): sector.mark_all_pages_not_same() - + return progress def _next_nonsame_page(self, i): @@ -909,7 +909,7 @@ def _sector_erase_program_double_buffer(self, progress_cb=_stub_progress): if sector.are_any_pages_not_same(): # Erase the sector self.flash.erase_sector(sector.addr) - + # Update progress progress += sector.erase_weight if self.sector_erase_weight > 0: @@ -936,7 +936,7 @@ def _sector_erase_program_double_buffer(self, progress_cb=_stub_progress): current_weight = page.get_program_weight() self.flash.start_program_page_with_buffer(current_buf, current_addr) - + actual_sector_erase_count += 1 actual_sector_erase_weight += page.get_program_weight() @@ -951,7 +951,7 @@ def _sector_erase_program_double_buffer(self, progress_cb=_stub_progress): raise FlashProgramFailure('flash program page timeout', address=current_addr, result_code=result) elif result != 0: raise FlashProgramFailure('flash program page failure', address=current_addr, result_code=result) - + # Swap buffers. current_buf, next_buf = next_buf, current_buf diff --git a/pyocd/flash/eraser.py b/pyocd/flash/eraser.py index c146b5242..19a3ffbbc 100755 --- a/pyocd/flash/eraser.py +++ b/pyocd/flash/eraser.py @@ -24,7 +24,7 @@ class FlashEraser(object): """! @brief Class that manages high level flash erasing. - + Can erase a target in one of three modes: - chip erase: Erase all flash on the target. - mass erase: Also erase all flash on the target. However, on some targets, a mass erase has @@ -37,34 +37,34 @@ class Mode(Enum): MASS = 1 CHIP = 2 SECTOR = 3 - + def __init__(self, session, mode): """! @brief Constructor. - + @param self @param session The session instance. @param mode One of the FlashEraser.Mode enums to select mass, chip, or sector erase. """ self._session = session self._mode = mode - + def erase(self, addresses=None): """! @brief Perform the type of erase operation selected when the object was created. - + For sector erase mode, an iterable of sector addresses specifications must be provided via the _addresses_ parameter. The address iterable elements can be either strings, tuples, or integers. Tuples must have two elements, the start and end addresses of a range to erase. Integers are simply an address within the single page to erase. - + String address specifications may be in one of three formats: "
", "-", or "+". Each field denoted by angled brackets is an integer literal in either decimal or hex notation. - + Examples: - "0x1000" - erase the one sector at 0x1000 - "0x1000-0x4fff" - erase sectors from 0x1000 up to but not including 0x5000 - "0x8000+0x800" - erase sectors starting at 0x8000 through 0x87ff - + @param self @param addresses List of addresses or address ranges of the sectors to erase. """ @@ -76,14 +76,14 @@ def erase(self, addresses=None): self._sector_erase(addresses) else: LOG.warning("No operation performed") - + def _mass_erase(self): LOG.info("Mass erasing device...") if self._session.target.mass_erase(): LOG.info("Successfully erased.") else: LOG.error("Mass erase failed.") - + def _chip_erase(self): LOG.info("Erasing chip...") # Erase all flash regions. This may be overkill if either each region's algo erases @@ -98,7 +98,7 @@ def _chip_erase(self): else: self._sector_erase([(region.start, region.end)]) LOG.info("Done") - + def _sector_erase(self, addresses): flash = None currentRegion = None @@ -106,7 +106,7 @@ def _sector_erase(self, addresses): for spec in addresses: # Convert the spec into a start and end address. sector_addr, end_addr = self._convert_spec(spec) - + while sector_addr < end_addr: # Look up the flash memory region for the current address. region = self._session.target.memory_map.get_region_for_address(sector_addr) @@ -116,34 +116,34 @@ def _sector_erase(self, addresses): if not region.is_flash: LOG.warning("address 0x%08x is not in flash", sector_addr) break - + # Handle switching regions. if region is not currentRegion: # Clean up previous flash. if flash is not None: flash.cleanup() - + currentRegion = region flash = region.flash flash.init(flash.Operation.ERASE) - + assert flash is not None - + # Get sector info for the current address. sector_info = flash.get_sector_info(sector_addr) assert sector_info, ("sector address 0x%08x within flash region '%s' is invalid" % (sector_addr, region.name)) - + # Align first page address. delta = sector_addr % sector_info.size if delta: LOG.warning("sector address 0x%08x is unaligned", sector_addr) sector_addr -= delta - + # Erase this page. LOG.info("Erasing sector 0x%08x (%d bytes)", sector_addr, sector_info.size) flash.erase_sector(sector_addr) - + sector_addr += sector_info.size if flash is not None: diff --git a/pyocd/flash/file_programmer.py b/pyocd/flash/file_programmer.py index d6311035d..ca5f65d1a 100755 --- a/pyocd/flash/file_programmer.py +++ b/pyocd/flash/file_programmer.py @@ -38,7 +38,7 @@ def ranges(i: List[int]) -> List[Tuple[int, int]]: """! Accepts a sorted list of byte addresses. Breaks the addresses into contiguous ranges. Yields 2-tuples of the start and end address for each contiguous range. - + For instance, the input [0, 1, 2, 3, 32, 33, 34, 35] will yield the following 2-tuples: (0, 3) and (32, 35). """ @@ -48,12 +48,12 @@ def ranges(i: List[int]) -> List[Tuple[int, int]]: class FileProgrammer(object): """! @brief Class to manage programming a file in any supported format with many options. - + Most specifically, this class implements the behaviour provided by the command-line flash programming tool. The code in this class simply extracts data from the given file, potentially respecting format-specific options such as the base address for binary files. Then the heavy lifting of flash programming is handled by FlashLoader, and beneath that, FlashBuilder. - + Support file formats are: - Binary (.bin) - Intel Hex (.hex) @@ -68,7 +68,7 @@ def __init__(self, keep_unwritten: Optional[bool] = None ): """! @brief Constructor. - + @param self @param session The session object. @param progress A progress report handler as a callable that takes a percentage completed. @@ -94,64 +94,64 @@ def __init__(self, self._keep_unwritten = keep_unwritten self._progress = progress self._loader = None - + self._format_handlers: Dict[str, Callable[..., None]] = { 'axf': self._program_elf, 'bin': self._program_bin, 'elf': self._program_elf, 'hex': self._program_hex, } - + def program(self, file_or_path: Union[str, IO[bytes]], file_format: Optional[str] = None, **kwargs: Any): """! @brief Program a file into flash. - + @param self @param file_or_path Either a string that is a path to a file, or a file-like object. @param file_format Optional file format name, one of "bin", "hex", "elf", "axf". If not provided, the file's extension will be used. If a file object is passed for _file_or_path_ then this parameter must be used to set the format. @param kwargs Optional keyword arguments for format-specific parameters. - + The only current format-specific keyword parameters are for the binary format: - `base_address`: Memory address at which to program the binary data. If not set, the base of the boot memory will be used. - `skip`: Number of bytes to skip at the start of the binary file. Does not affect the base address. - + @exception FileNotFoundError Provided file_or_path string does not reference a file. @exception ValueError Invalid argument value, for instance providing a file object but not setting file_format. """ is_path = isinstance(file_or_path, str) - + # Check for valid path first. if is_path and not os.path.isfile(file_or_path): # type: ignore (type checker doesn't use is_path) raise FileNotFoundError(errno.ENOENT, "No such file: '{}'".format(file_or_path)) - + # If no format provided, use the file's extension. if not file_format: if is_path: # Extract the extension from the path. file_format = os.path.splitext(file_or_path)[1][1:] # type: ignore (type checker doesn't use is_path) - + # Explicitly check for no extension. if file_format == '': raise ValueError("file path '{}' does not have an extension and " "no format is set".format(file_or_path)) else: raise ValueError("file object provided but no format is set") - + # Check the format is one we understand. if file_format is None or file_format not in self._format_handlers: raise ValueError("unknown file format '%s'" % file_format) - + self._loader = FlashLoader(self._session, progress=self._progress, chip_erase=self._chip_erase, smart_flash=self._smart_flash, trust_crc=self._trust_crc, keep_unwritten=self._keep_unwritten) - + # file_obj = None # Open the file if a path was provided. if is_path: @@ -185,13 +185,13 @@ def _program_bin(self, file_obj: IO[bytes], **kwargs: Any) -> None: raise exceptions.TargetSupportError("No boot memory is defined for this device") address = boot_memory.start assert isinstance(address, int) - + skip_offset = kwargs.get('skip', 0) if not isinstance(skip_offset, int): raise TypeError("skip argument must be an integer") file_obj.seek(skip_offset, os.SEEK_SET) data = list(bytearray(file_obj.read())) - + self._loader.add_data(address, data) def _program_hex(self, file_obj: IO[bytes], **kwargs: Any) -> None: @@ -208,7 +208,7 @@ def _program_hex(self, file_obj: IO[bytes], **kwargs: Any) -> None: data = list(hexfile.tobinarray(start=start, size=size)) # Ignore invalid addresses for HEX files only # Binary files (obviously) don't contain addresses - # For ELF files, any metadata that's not part of the application code + # For ELF files, any metadata that's not part of the application code # will be held in a section that doesn't have the SHF_WRITE flag set try: self._loader.add_data(start, data) @@ -223,7 +223,7 @@ def _program_elf(self, file_obj: IO[bytes], **kwargs: Any) -> None: addr = segment['p_paddr'] if segment.header.p_type == 'PT_LOAD' and segment.header.p_filesz != 0: data = bytearray(segment.data()) - LOG.debug("Writing segment LMA:0x%08x, VMA:0x%08x, size %d", addr, + LOG.debug("Writing segment LMA:0x%08x, VMA:0x%08x, size %d", addr, segment['p_vaddr'], segment.header.p_filesz) try: self._loader.add_data(addr, data) diff --git a/pyocd/flash/flash.py b/pyocd/flash/flash.py index c618e2012..522acc8d0 100644 --- a/pyocd/flash/flash.py +++ b/pyocd/flash/flash.py @@ -42,7 +42,7 @@ 0x40434013, 0xc6083501, 0xd1d242bd, 0xd01f2900, 0x46602301, 0x469c25ff, 0x00894e11, 0x447e1841, 0x88034667, 0x409f8844, 0x2f00409c, 0x2201d012, 0x4252193f, 0x34017823, 0x402b4053, 0x599b009b, 0x405a0a12, 0xd1f542bc, 0xc00443d2, 0xd1e74281, 0xbdf02000, 0xe7f82200, 0x000000b2, 0xedb88320, - 0x00000042, + 0x00000042, ) @dataclass @@ -69,13 +69,13 @@ class FlashInfo: class Flash: """ @brief Low-level control of flash programming algorithms. - + Instances of this class are bound to a flash memory region (@ref pyocd.core.memory_map.FlashRegion "FlashRegion") and support programming only within that region's address range. To program images that cross flash memory region boundaries, use the @ref pyocd.flash.loader.FlashLoader "FlashLoader" or @ref pyocd.flash.file_programmer.FileProgrammer "FileProgrammer" classes. - + Terminology: - sector: The size of an erasable block. - page: The size of a nominal programming block. Often flash can be programmed in much smaller @@ -84,7 +84,7 @@ class Flash: sectors. - phrase: The minimum programming granularity, often from 1-16 bytes. For some flash technologies, the is no distinction between a phrase and a page. - + The `flash_algo` parameter of the constructor is a dictionary that defines all the details of the flash algorithm. The keys of this dictionary are as follows. - `load_address`: Memory address where the flash algo instructions will be loaded. @@ -104,7 +104,7 @@ class Flash: - `analyzer_supported`: Whether the CRC32-based analyzer is supported. - `analyzer_address`: RAM base address where the analyzer code will be placed. There must be at least 0x600 free bytes after this address. - + All of the "pc_" entry point key values must have bit 0 set to indicate a Thumb function. """ class Operation(Enum): @@ -117,7 +117,7 @@ class Operation(Enum): VERIFY = 3 ## Error value returned from wait_for_completion() on operation timeout. - # + # # The flash algo itself can never return a negative error code because the core register r0 is read # as unsigned. TIMEOUT_ERROR = -1 @@ -160,7 +160,7 @@ def __init__(self, target, flash_algo): self.min_program_length = 0 self.page_buffers = [] self.double_buffer_supported = False - + def _is_api_valid(self, api_name): return (api_name in self.flash_algo) \ and (self.flash_algo[api_name] >= self.flash_algo['load_address']) \ @@ -174,7 +174,7 @@ def minimum_program_length(self): @property def page_buffer_count(self): return len(self.page_buffers) - + @property def is_erase_all_supported(self): return self._is_api_valid('pc_eraseAll') @@ -182,11 +182,11 @@ def is_erase_all_supported(self): @property def is_double_buffering_supported(self): return self.double_buffer_supported - + @property def region(self): return self._region - + @region.setter def region(self, flashRegion): assert flashRegion.is_flash @@ -195,27 +195,27 @@ def region(self, flashRegion): def init(self, operation, address=None, clock=0, reset=True): """! @brief Prepare the flash algorithm for performing operations. - + First, the target is prepared to execute flash algo operations, including loading the algo to target RAM. This step is skipped if the target is already prepared, i.e., init() has been called, but cleanup() not called yet. - + Next, the algo's Init() function is called with the provided parameters. If the algo does not have an Init() function, this step is skipped. Calling Init() is also skipped if the algo was previously inited for the same operation without an intervening uninit. If the algo is already inited for a different operation, uninit() is automatically called prior to intiting for the new operation. - + @exception FlashFailure """ if address is None: address = self.get_flash_info().rom_start - + assert isinstance(operation, self.Operation) assert (self._did_prepare_target) or (not self._did_prepare_target and self._active_operation is None) - + self.target.halt() - + # Handle the algo already being inited. if self._active_operation is not None: # Uninit if the algo was left inited for a different operation. @@ -228,7 +228,7 @@ def init(self, operation, address=None, clock=0, reset=True): # Setup target for running the flash algo. if not self._did_prepare_target: TRACE.debug("algo init and load to %#010x", self.flash_algo['load_address']) - + if reset: self.target.reset_and_halt(Target.ResetType.SW) self.prepare_target() @@ -251,12 +251,12 @@ def init(self, operation, address=None, clock=0, reset=True): raise FlashFailure('flash init timed out') elif result != 0: raise FlashFailure('flash init failure', result_code=result) - + self._active_operation = operation def cleanup(self): """! @brief Deinitialize the flash algo and restore the target. - + Before further operations are executed, the algo must be reinited. Unlike uninit(), this method marks the target and unprepared to execute flash algo functions. So on the next call to init(), the target will be prepared and the algo loaded into RAM. @@ -267,15 +267,15 @@ def cleanup(self): def uninit(self): """! @brief Uninitialize the flash algo. - + Before further operations are executed, the algo must be reinited. The target is left in a state where algo does not have to be reloaded when init() is called. - + @exception FlashFailure """ if self._active_operation is None: return - + if self._is_api_valid('pc_unInit'): TRACE.debug("call uninit(%d)", self._active_operation.value) @@ -283,27 +283,27 @@ def uninit(self): result = self._call_function_and_wait(self.flash_algo['pc_unInit'], r0=self._active_operation.value, timeout=self.target.session.options.get('flash.timeout.init')) - + # check the return code TRACE.debug("uninit result = %d", result) if result == self.TIMEOUT_ERROR: raise FlashFailure('flash uninit timed out') elif result != 0: raise FlashFailure('flash uninit', result_code=result) - + self._active_operation = None def prepare_target(self): """! @brief Subclasses can override this method to perform special target configuration.""" pass - + def restore_target(self): """! @brief Subclasses can override this method to undo any target configuration changes.""" pass def compute_crcs(self, sectors): assert self.use_analyzer - + data = [] # Load analyzer code into target RAM. @@ -335,7 +335,7 @@ def compute_crcs(self, sectors): def erase_all(self): """! @brief Erase all the flash. - + @exception FlashEraseFailure """ assert self._active_operation == self.Operation.ERASE @@ -356,7 +356,7 @@ def erase_all(self): def erase_sector(self, address): """! @brief Erase one sector. - + @exception FlashEraseFailure """ assert self._active_operation == self.Operation.ERASE @@ -376,7 +376,7 @@ def erase_sector(self, address): def program_page(self, address, bytes): """! @brief Flash one or more pages. - + @exception FlashProgramFailure """ assert self._active_operation == self.Operation.PROGRAM @@ -415,7 +415,7 @@ def start_program_page_with_buffer(self, buffer_number, address): def load_page_buffer(self, buffer_number, address, bytes): """! @brief Load data to a numbered page buffer. - + This method is used in conjunction with start_program_page_with_buffer() to implement double buffered programming. """ @@ -430,7 +430,7 @@ def load_page_buffer(self, buffer_number, address, bytes): def program_phrase(self, address, bytes): """! @brief Flash a portion of a page. - + @exception FlashFailure The address or data length is not aligned to the minimum programming length specified in the flash algorithm. @exception FlashProgramFailure @@ -519,7 +519,7 @@ def flash_block(self, addr, data, smart_flash=True, chip_erase=None, progress_cb """ assert self.region is not None assert self.region.contains_range(start=addr, length=len(data)) - + fb = FlashBuilder(self) fb.add_data(addr, data) info = fb.program(chip_erase, progress_cb, smart_flash, fast_verify) @@ -618,7 +618,7 @@ def wait_for_completion(self, timeout=None): if self.target.get_state() != Target.State.RUNNING: break else: - # Operation timed out. + # Operation timed out. self.target.halt() return self.TIMEOUT_ERROR diff --git a/pyocd/flash/loader.py b/pyocd/flash/loader.py index 975483e9a..fca32c264 100755 --- a/pyocd/flash/loader.py +++ b/pyocd/flash/loader.py @@ -114,20 +114,20 @@ def region(self) -> "MemoryRegion": class MemoryLoader: """@brief Handles high level programming of raw binary data to memory. - + If you need file programming, either binary files or other formats, please see the FileProgrammer class. - + This manager provides a simple interface to programming data that may cross memory region boundaries. To use it, create an instance and pass in the session object. Then call add_data() for each chunk of binary data you need to write. When all data is added, call the commit() method to write everything to memory. You may reuse a single MemoryLoader instance for multiple add-commit sequences. - + When programming across multiple regions, progress reports are combined so that only a one progress output is reported. Similarly, the programming performance report for each region is suppresed and a combined report is logged. - + Internally, MemoryBuilder instances are used to buffer data to be written to different types of memory. FlashBuilder is used to optimise programming within flash memory regions. RAM regions are programmed using the much simpler RamBuilder. @@ -155,7 +155,7 @@ def __init__(self, keep_unwritten: Optional[bool] = None ): """! @brief Constructor. - + @param self @param session The session object. @param progress A progress report handler as a callable that takes a percentage completed. @@ -195,9 +195,9 @@ def __init__(self, else self._session.options.get('fast_program') self._keep_unwritten = keep_unwritten if (keep_unwritten is not None) \ else self._session.options.get('keep_unwritten') - + self._reset_state() - + def _reset_state(self): """! @brief Clear all state variables. """ # _builders is a dict that maps memory regions to either a FlashBuilder or, for writable memories, @@ -206,19 +206,19 @@ def _reset_state(self): self._total_data_size = 0 self._progress_offset = 0.0 self._current_progress_fraction = 0.0 - + def add_data(self, address, data): """! @brief Add a chunk of data to be programmed. - + The data may cross memory region boundaries, as long as the regions are contiguous. - + @param self @param address Integer address for where the first byte of _data_ should be written. @param data A list of byte values to be programmed at the given address. - + @return The MemoryLoader instance is returned, to allow chaining further add_data() calls or a call to commit(). - + @exception ValueError Raised when the address is not within a flash memory region. @exception TargetSupportError Raised if the flash memory region does not have a valid Flash instance associated with it, which indicates that the target connect sequence did @@ -244,28 +244,28 @@ def add_data(self, address, data): region_builder = RamBuilder(self._session, region) else: raise ValueError(f"memory region at address {address:#010x} is not writable") - + # Save the new builder. assert region_builder is not None self._builders[region] = region_builder - + # Take as much data as is contained by this region. program_length = min(len(data), region.end - address + 1) assert program_length != 0 # Add data to this region's builder. region_builder.add_data(address, data[:program_length]) - + # Advance. data = data[program_length:] address += program_length self._total_data_size += program_length - + return self - + def commit(self): """! @brief Write all collected data to memory. - + This routine ensures that chip erase is only used once if either the auto mode or chip erase mode are used. As an example, if two regions are to be written to and True was passed to the constructor for chip_erase (or if the session option was set), then only @@ -273,17 +273,17 @@ def commit(self): sector erase. This will not result in extra erasing, as sector erase always verifies whether the sectors are already erased. This will, of course, also work correctly if the flash algorithm for the first region doesn't actually erase the entire chip (all regions). - + After calling this method, the loader instance can be reused to program more data. """ didChipErase = False perfList = [] - + # Iterate over builders we've created and program the data. for builder in sorted(self._builders.values(), key=lambda v: v.region.start): # Determine this builder's portion of total progress. self._current_progress_fraction = builder.buffered_data_size / self._total_data_size - + # Program the data. chipErase = self._chip_erase if not didChipErase else "sector" perf = builder.program(chip_erase=chipErase, @@ -293,15 +293,15 @@ def commit(self): keep_unwritten=self._keep_unwritten) perfList.append(perf) didChipErase = True - + self._progress_offset += self._current_progress_fraction # Report programming statistics. self._log_performance(perfList) - + # Clear state to allow reuse. self._reset_state() - + def _log_performance(self, perf_list): """! @brief Log a report of programming performance numbers.""" # Compute overall performance numbers. @@ -311,13 +311,13 @@ def _log_performance(self, perf_list): actual_program_page_count = sum(perf.program_page_count for perf in perf_list) skipped_byte_count = sum(perf.skipped_byte_count for perf in perf_list) skipped_page_count = sum(perf.skipped_page_count for perf in perf_list) - + # Compute kbps while avoiding a potential zero-div error. if totalProgramTime == 0: kbps = 0 else: kbps = (program_byte_count/1024) / totalProgramTime - + if any(perf.program_type == FlashBuilder.FLASH_CHIP_ERASE for perf in perf_list): LOG.info("Erased chip, programmed %d bytes (%s), skipped %d bytes (%s) at %.02f kB/s", actual_program_byte_count, get_page_count(actual_program_page_count), @@ -327,22 +327,22 @@ def _log_performance(self, perf_list): erase_byte_count = sum(perf.erase_byte_count for perf in perf_list) erase_sector_count = sum(perf.erase_sector_count for perf in perf_list) - LOG.info("Erased %d bytes (%s), programmed %d bytes (%s), skipped %d bytes (%s) at %.02f kB/s", + LOG.info("Erased %d bytes (%s), programmed %d bytes (%s), skipped %d bytes (%s) at %.02f kB/s", erase_byte_count, get_sector_count(erase_sector_count), actual_program_byte_count, get_page_count(actual_program_page_count), skipped_byte_count, get_page_count(skipped_page_count), kbps) - + def _progress_cb(self, amount): if self._progress is not None: self._progress((amount * self._current_progress_fraction) + self._progress_offset) - + @classmethod def program_binary_data(cls, session, address, data): """! @brief Helper routine to write a single chunk of data. - + The session options for chip_erase and trust_crc are used. - + @param cls @param session The session instance. @param address Start address of the data to program. diff --git a/pyocd/gdbserver/context_facade.py b/pyocd/gdbserver/context_facade.py index 625247ad6..6a089cd3a 100644 --- a/pyocd/gdbserver/context_facade.py +++ b/pyocd/gdbserver/context_facade.py @@ -59,27 +59,27 @@ class GDBDebugContextFacade(object): """! @brief Provides GDB specific transformations to a DebugContext.""" - ## The order certain target features should appear in target XML. + ## The order certain target features should appear in target XML. REQUIRED_FEATURE_ORDER = ("org.gnu.gdb.arm.m-profile", "org.gnu.gdb.arm.vfp") def __init__(self, context): self._context = context - + # Note: Use the gdb 'maint print remote-registers' command to see it's view of the g/G commands. - + ## List of CoreRegisterInfos sorted by gdb_regnum, excluding any registers not communicated to gdb. # # This list is in the order expected by the g/G commands for reading/writing full register contexts. # It contains de-duplicated core registers with a valid GDB regnum, sorted by regnum. self._register_list = sorted(set(self._context.core.core_registers.iter_matching( lambda reg: reg.gdb_regnum is not None)), key=lambda v: v.gdb_regnum) - + ## List of internal register numbers corresponding to gdb registers. self._full_reg_num_list = [reg.index for reg in self._register_list] - + ## Map of gdb regnum to register info. self._gdb_regnum_map = {reg.gdb_regnum: reg for reg in self._register_list} - + ## String of XML target description for gdb. self._target_xml = self._build_target_xml() @@ -92,7 +92,7 @@ def set_context(self, new_context): def get_register_context(self): """! @brief Return hexadecimal dump of registers as expected by GDB. - + @exception CoreRegisterAccessError """ LOG.debug("GDB getting register context") @@ -101,7 +101,7 @@ def get_register_context(self): vals = self._context.read_core_registers_raw(self._full_reg_num_list) except exceptions.CoreRegisterAccessError: vals = [None] * len(self._full_reg_num_list) - + for reg, reg_value in zip(self._register_list, vals): # Return x's to indicate unavailable register value. if reg_value is None: @@ -116,7 +116,7 @@ def get_register_context(self): def set_register_context(self, data): """! @brief Set registers from GDB hexadecimal string. - + @exception CoreRegisterAccessError """ LOG.debug("GDB setting register context") @@ -137,11 +137,11 @@ def set_register_context(self, data): def set_register(self, gdb_regnum, data): """! @brief Set single register from GDB hexadecimal string. - + @param self The object. @param gdb_regnum The regnum of register in target XML sent to GDB. @param data String of hex-encoded value for the register. - + @exception CoreRegisterAccessError """ reg = self._gdb_regnum_map.get(gdb_regnum, None) @@ -154,17 +154,17 @@ def set_register(self, gdb_regnum, data): def gdb_get_register(self, gdb_regnum): """! @brief Set single core register. - + @param self The object. @param gdb_regnum The regnum of register in target XML sent to GDB. @return String of hex-encoded value for the register. - + @exception CoreRegisterAccessError """ reg = self._gdb_regnum_map.get(gdb_regnum, None) if reg is None: return b'' - + try: reg_value = self._context.read_core_register_raw(reg.name) resp = six.b(conversion.uint_to_hex_le(reg_value, reg.bitsize)) @@ -177,7 +177,7 @@ def gdb_get_register(self, gdb_regnum): def get_t_response(self, force_signal=None): """! @brief Returns a GDB T response string. - + This includes: - The signal encountered. - The current value of the important registers (sp, lr, pc). @@ -212,7 +212,7 @@ def get_signal_value(self): def _get_reg_index_value_pairs(self, reg_list): """! @brief Return register values as pairs. - + Returns a string like NN:MMMMMMMM;NN:MMMMMMMM;... for the T response string. NN is the index of the register to follow MMMMMMMM is the value of the register. @@ -241,7 +241,7 @@ def get_memory_map_xml(self): # Look up the region type name. Regions default to ram if gdb doesn't # have a concept of the region type. gdb_type = GDB_TYPE_MAP.get(r.type, 'ram') - + start = hex(r.start).rstrip("L") length = hex(r.length).rstrip("L") mem = ElementTree.SubElement(root, 'memory', type=gdb_type, start=start, length=length) @@ -282,7 +282,7 @@ def _build_target_xml(self): regs_by_feature = {k: list(g) for k, g in groupby(regs_sorted_by_feature, key=lambda r: r.gdb_feature)} unordered_features = list(regs_by_feature.keys()) features = [] - + # Get a list of gdb features with some features having a determined order. for feature_name in self.REQUIRED_FEATURE_ORDER: if feature_name in unordered_features: @@ -290,20 +290,20 @@ def _build_target_xml(self): unordered_features.remove(feature_name) # Add any remaining features at the end of the feature list. features += unordered_features - + use_xpsr_control_fields = self._context.session.options.get('xpsr_control_fields') - + xml_root = ElementTree.Element('target') - + for feature_name in features: regs = regs_by_feature[feature_name] - + xml_feature = ElementTree.SubElement(xml_root, "feature", name=feature_name) # Special case for XPSR and CONTROL bitfield presentation. if (feature_name == "org.gnu.gdb.arm.m-profile") and use_xpsr_control_fields: self._define_xpsr_control_fields(xml_feature) - + # Add XML for the registers in this feature. for reg in regs: if use_xpsr_control_fields and (reg.name in ('xpsr', 'control')): diff --git a/pyocd/gdbserver/gdbserver.py b/pyocd/gdbserver/gdbserver.py index 35a916ee8..edb818d52 100644 --- a/pyocd/gdbserver/gdbserver.py +++ b/pyocd/gdbserver/gdbserver.py @@ -59,7 +59,7 @@ def unescape(data): """! @brief De-escapes binary data from Gdb. - + @param data Bytes-like object with possibly escaped values. @return List of integers in the range 0-255, with all escaped bytes de-escaped. """ @@ -81,7 +81,7 @@ def unescape(data): def escape(data): """! @brief Escape binary data to be sent to Gdb. - + @param data Bytes-like object containing raw binary. @return Bytes object with the characters in '#$}*' escaped as required by Gdb. """ @@ -99,17 +99,17 @@ class GDBError(exceptions.Error): class GDBServer(threading.Thread): """! @brief GDB remote server thread. - + This class start a GDB server listening a gdb connection on a specific port. It implements the RSP (Remote Serial Protocol). """ ## Notification event for the gdbserver beginnning to listen on its RSP port. GDBSERVER_START_LISTENING_EVENT = 'gdbserver-start-listening' - + ## Timer delay for sending the notification that the server is listening. START_LISTENING_NOTIFY_DELAY = 0.03 # 30 ms - + def __init__(self, session, core=None): super(GDBServer, self).__init__() self.session = session @@ -198,7 +198,7 @@ def __init__(self, session, core=None): self.telnet_server = None semihost_console = semihost_io_handler self.semihost = semihost.SemihostAgent(self.target_context, io_handler=semihost_io_handler, console=semihost_console) - + # # If SWV is enabled, create a SWVReader thread. Note that we only do # this if the core is 0: SWV is not a per-core construct, and can't @@ -214,11 +214,11 @@ def __init__(self, session, core=None): swo_clock = int(session.options.get("swv_clock")) self._swv_reader = SWVReader(session, self.core, self.lock) self._swv_reader.init(sys_clock, swo_clock, console_file) - + self._init_remote_commands() # pylint: disable=invalid-name - + # Command handler table. # # The dict keys are the first character of the incoming command from gdb. Values are a @@ -274,7 +274,7 @@ def _init_remote_commands(self): self._command_context = CommandExecutionContext() self._command_context.selected_core = self.target self._command_context.attach_session(self.session) - + # Add the gdbserver command group. self._command_context.command_set.add_command_group('gdbserver') @@ -334,7 +334,7 @@ def run(self): if self.detach_event.is_set(): continue - + # Make sure the target is halted. Otherwise gdb gets easily confused. self.target.halt() @@ -453,7 +453,7 @@ def kill(self): if not self.persist: self.board.target.set_vector_catch(Target.VectorCatch.NONE) self.board.target.resume() - + def restart(self, data): self.target.reset_and_halt() # No reply. @@ -663,7 +663,7 @@ def resume(self, data): # This exception was not a transfer error, so reading the target state should be ok. val = ('S%02x' % self.target_facade.get_signal_value()).encode() break - + # Check if we exited the above loop due to a timeout after a fault. if fault_retry_timeout.did_time_out: LOG.error("Timed out while attempting to reestablish control over target.") @@ -674,13 +674,13 @@ def resume(self, data): def step(self, data, start=0, end=0): #addr = self._get_resume_step_addr(data) LOG.debug("GDB step: %s (start=0x%x, end=0x%x)", data, start, end) - + # Use the step hook to check for an interrupt event. def step_hook(): # Note we don't clear the interrupt event here! return self.packet_io.interrupt_event.is_set() self.target.step(not self.step_into_interrupt, start, end, hook_cb=step_hook) - + # Clear and handle an interrupt. if self.packet_io.interrupt_event.is_set(): LOG.debug("Received Ctrl-C during step") @@ -688,7 +688,7 @@ def step_hook(): response = self.get_t_response(forceSignal=signals.SIGINT) else: response = self.get_t_response() - + return self.create_rsp_packet(response) def halt(self): @@ -772,7 +772,7 @@ def v_cont(self, cmd): end = 0 if thread_actions[currentThread][0:1] == b'r': start, end = [int(addr, base=16) for addr in thread_actions[currentThread][1:].split(b',')] - + if self.non_stop: self.target.step(not self.step_into_interrupt, start, end) self.packet_io.send(self.create_rsp_packet(b"OK")) @@ -997,9 +997,9 @@ def init_thread_providers(self): if not self.session.options.get('rtos.enable'): LOG.debug("Skipping RTOS load because it was disabled.") return self.create_rsp_packet(b"OK") - + forced_rtos_name = self.session.options.get('rtos.name') - + symbol_provider = GDBSymbolProvider(self) for rtosName, rtosClass in RTOS.items(): @@ -1053,7 +1053,7 @@ def handle_remote_command(self, cmd): # Create a new stream to collect the command output. stream = io.StringIO() self._command_context.output_stream = stream - + # TODO run this in a separate thread so we can cancel the command with ^C from gdb? try: # Run command and collect output. @@ -1070,7 +1070,7 @@ def handle_remote_command(self, cmd): stream.write("Unexpected error: %s\n" % err) LOG.error("Exception while executing remote command '%s': %s", cmd, err, exc_info=self.session.log_tracebacks) - + # Convert back to bytes, hex encode, then return the response packet. output = stream.getvalue() if not output: @@ -1079,7 +1079,7 @@ def handle_remote_command(self, cmd): # Disconnect the stream. self._command_context.output_stream = None - + return self.create_rsp_packet(response) def handle_general_set(self, msg): @@ -1240,7 +1240,7 @@ def event_handler(self, notification): def _option_did_change(self, notification): """! @brief Handle an option changing at runtime. - + For option notifications, the event is the name of the option and the `data` attribute is an OptionChangeInfo object with `new_value` and `old_value` attributes. """ diff --git a/pyocd/gdbserver/gdbserver_commands.py b/pyocd/gdbserver/gdbserver_commands.py index cd5afa684..636ef85e7 100644 --- a/pyocd/gdbserver/gdbserver_commands.py +++ b/pyocd/gdbserver/gdbserver_commands.py @@ -31,12 +31,12 @@ class ThreadsCommand(CommandBase): 'usage': "{flush,enable,disable,status}", 'help': "Control thread awareness.", } - + def parse(self, args): self.action = args[0] if self.action not in ('flush', 'enable', 'disable', 'status'): raise exceptions.CommandError("invalid action") - + def execute(self): # Get the gdbserver for the selected core. core_number = self.context.selected_core.core_number @@ -73,21 +73,21 @@ class ArmSemihostingCommand(CommandBase): 'extra_help': "Provided for compatibility with OpenOCD. The same functionality can be achieved " "by setting the 'enable_semihosting' session option.", } - + def parse(self, args): if args[0] != 'semihosting': raise exceptions.CommandError("invalid action") if args[1] not in ('enable', 'disable'): raise exceptions.CommandError("invalid action") self.action = args[1] - + def execute(self): enable = (self.action == 'enable') self.context.session.options['enable_semihosting'] = enable class GdbserverMonitorInitCommand(CommandBase): """@brief 'init' command for OpenOCD compatibility. - + Many default gdbserver configurations send an 'init' monitor command. """ INFO = { @@ -98,6 +98,6 @@ class GdbserverMonitorInitCommand(CommandBase): 'usage': "init", 'help': "Ignored; for OpenOCD compatibility.", } - + def execute(self): pass diff --git a/pyocd/gdbserver/packet_io.py b/pyocd/gdbserver/packet_io.py index c76b23d68..3bb782926 100644 --- a/pyocd/gdbserver/packet_io.py +++ b/pyocd/gdbserver/packet_io.py @@ -40,13 +40,13 @@ class ConnectionClosedException(Exception): class GDBServerPacketIOThread(threading.Thread): """! @brief Packet I/O thread. - + This class is a thread used by the GDBServer class to perform all RSP packet I/O. It handles verifying checksums, acking, and receiving Ctrl-C interrupts. There is a queue for received packets. The interface to this queue is the receive() method. The send() method writes outgoing packets to the socket immediately. """ - + def __init__(self, abstract_socket): super(GDBServerPacketIOThread, self).__init__() self.name = "gdb-packet-thread-port%d" % abstract_socket.port diff --git a/pyocd/probe/aggregator.py b/pyocd/probe/aggregator.py index 235151a6c..5b4898c27 100644 --- a/pyocd/probe/aggregator.py +++ b/pyocd/probe/aggregator.py @@ -48,31 +48,31 @@ def _get_probe_classes(unique_id): @staticmethod def get_all_connected_probes(unique_id=None): klasses, unique_id, is_explicit = DebugProbeAggregator._get_probe_classes(unique_id) - + probes = [] - + # First look for a match against the full ID, as this can be more efficient for certain probes. if unique_id is not None: for cls in klasses: probe = cls.get_probe_with_id(unique_id, is_explicit) if probe is not None: return [probe] - + # No full match, so ask probe classes for probes. for cls in klasses: probes += cls.get_all_connected_probes(unique_id, is_explicit) - + # Filter by unique ID. if unique_id is not None: unique_id = unique_id.lower() probes = [probe for probe in probes if (unique_id in probe.unique_id.lower())] - + return probes - + @classmethod def get_probe_with_id(cls, unique_id): klasses, unique_id, is_explicit = DebugProbeAggregator._get_probe_classes(unique_id) - + for cls in klasses: probe = cls.get_probe_with_id(unique_id, is_explicit) if probe is not None: diff --git a/pyocd/probe/cmsis_dap_probe.py b/pyocd/probe/cmsis_dap_probe.py index bc823e7d0..14d77191c 100644 --- a/pyocd/probe/cmsis_dap_probe.py +++ b/pyocd/probe/cmsis_dap_probe.py @@ -32,7 +32,7 @@ class CMSISDAPProbe(DebugProbe): """! @brief Wraps a pydapaccess link as a DebugProbe. - + Supports CMSIS-DAP v1 and v2. """ @@ -52,14 +52,14 @@ class CMSISDAPProbe(DebugProbe): DAPAccess.PORT.SWD: DebugProbe.Protocol.SWD, DAPAccess.PORT.JTAG: DebugProbe.Protocol.JTAG, } - + # APnDP constants. DP = 0 AP = 1 - + # Bitmasks for AP register address fields. A32 = 0x0000000c - + # Map from AP/DP and 2-bit register address to the enums used by pydapaccess. REG_ADDR_TO_ID_MAP: Dict[Tuple[int, int], DAPAccess.REG] = { # APnDP A32 @@ -72,17 +72,17 @@ class CMSISDAPProbe(DebugProbe): ( 1, 0x8 ) : DAPAccess.REG.AP_0x8, ( 1, 0xC ) : DAPAccess.REG.AP_0xC, } - + ## USB VID and PID pair for DAPLink firmware. DAPLINK_VIDPID = (0x0d28, 0x0204) - + @classmethod def get_all_connected_probes(cls, unique_id=None, is_explicit=False): try: return [cls(dev) for dev in DAPAccess.get_connected_devices()] except DAPAccess.Error as exc: raise cls._convert_exception(exc) from exc - + @classmethod def get_probe_with_id(cls, unique_id, is_explicit=False): try: @@ -101,15 +101,15 @@ def __init__(self, device): self._protocol = None self._is_open = False self._caps = set() - + @property def board_id(self): """! @brief Unique identifier for the board. - + Only board IDs for DAPLink firmware are supported. We can't assume other CMSIS-DAP firmware is using the same serial number format, so we cannot reliably extract the board ID. - + @return Either a 4-character board ID string, or None if the probe doesn't have a board ID. """ if self._link.vidpid == self.DAPLINK_VIDPID: @@ -126,11 +126,11 @@ def description(self): return self.vendor_name + " " + self.product_name else: return "{0} [{1}]".format(board_info.name, board_info.target) - + @property def vendor_name(self): return self._link.vendor_name - + @property def product_name(self): return self._link.product_name @@ -147,18 +147,18 @@ def unique_id(self): @property def wire_protocol(self): return self._protocol - + @property def is_open(self): return self._is_open - + @property def capabilities(self): return self._caps def create_associated_board(self): assert self.session is not None - + # Only support associated Mbed boards for DAPLink firmware. We can't assume other # CMSIS-DAP firmware is using the same serial number format, so we cannot reliably # extract the board ID. @@ -166,15 +166,15 @@ def create_associated_board(self): return MbedBoard(self.session, board_id=self.board_id) else: return None - + def open(self): try: TRACE.debug("trace: open") - + self._link.open() self._is_open = True self._link.set_deferred_transfer(self.session.options.get('cmsis_dap.deferred_transfers')) - + # Read CMSIS-DAP capabilities self._capabilities = self._link.identify(DAPAccess.ID.CAPABILITIES) self._supported_protocols = [DebugProbe.Protocol.DEFAULT] @@ -182,7 +182,7 @@ def open(self): self._supported_protocols.append(DebugProbe.Protocol.SWD) if self._capabilities & self.JTAG_CAPABILITY_MASK: self._supported_protocols.append(DebugProbe.Protocol.JTAG) - + self._caps = { self.Capability.SWJ_SEQUENCE, self.Capability.BANKED_DP_REGISTERS, @@ -195,11 +195,11 @@ def open(self): self._caps.add(self.Capability.SWO) except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - + def close(self): try: TRACE.debug("trace: close") - + self._link.close() self._is_open = False except DAPAccess.Error as exc: @@ -210,27 +210,27 @@ def close(self): # ------------------------------------------- # def connect(self, protocol=None): TRACE.debug("trace: connect(%s)", protocol.name if (protocol is not None) else "None") - + # Convert protocol to port enum. - # + # # We must get a non-default port, since some CMSIS-DAP implementations do not accept the default - # port. Note that the conversion of the default port type is contained in the PORT_MAP dict so it + # port. Note that the conversion of the default port type is contained in the PORT_MAP dict so it # is one location. port = self.PORT_MAP.get(protocol, self.PORT_MAP[DebugProbe.Protocol.DEFAULT]) assert port is not DAPAccess.PORT.DEFAULT - + try: self._link.connect(port) except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - + # Read the current mode and save it. actualMode = self._link.get_swj_mode() self._protocol = self.PORT_MAP[actualMode] def swj_sequence(self, length, bits): TRACE.debug("trace: swj_sequence(length=%i, bits=%x)", length, bits) - + try: self._link.swj_sequence(length, bits) except DAPAccess.Error as exc: @@ -238,7 +238,7 @@ def swj_sequence(self, length, bits): def swd_sequence(self, sequences): TRACE.debug("trace: swd_sequence(sequences=%r)", sequences) - + try: self._link.swd_sequence(sequences) except DAPAccess.Error as exc: @@ -246,7 +246,7 @@ def swd_sequence(self, sequences): def jtag_sequence(self, cycles, tms, read_tdo, tdi): TRACE.debug("trace: jtag_sequence(cycles=%i, tms=%x, read_tdo=%s, tdi=%x)", cycles, tms, read_tdo, tdi) - + try: self._link.jtag_sequence(cycles, tms, read_tdo, tdi) except DAPAccess.Error as exc: @@ -254,7 +254,7 @@ def jtag_sequence(self, cycles, tms, read_tdo, tdi): def disconnect(self): TRACE.debug("trace: disconnect") - + try: self._link.disconnect() self._protocol = None @@ -263,7 +263,7 @@ def disconnect(self): def set_clock(self, frequency): TRACE.debug("trace: set_clock(freq=%i)", frequency) - + try: self._link.set_clock(frequency) except DAPAccess.Error as exc: @@ -271,7 +271,7 @@ def set_clock(self, frequency): def reset(self): TRACE.debug("trace: reset") - + try: self._link.assert_reset(True) sleep(self.session.options.get('reset.hold_time')) @@ -282,12 +282,12 @@ def reset(self): def assert_reset(self, asserted): TRACE.debug("trace: assert_reset(%s)", asserted) - + try: self._link.assert_reset(asserted) except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - + def is_reset_asserted(self): try: result = self._link.is_reset_asserted() @@ -298,7 +298,7 @@ def is_reset_asserted(self): def flush(self): TRACE.debug("trace: flush") - + try: self._link.flush() except DAPAccess.Error as exc: @@ -311,7 +311,7 @@ def flush(self): def read_dp(self, addr, now=True): reg_id = self.REG_ADDR_TO_ID_MAP[self.DP, addr] - + try: if not now: TRACE.debug("trace: read_dp(addr=%#010x) -> ...", addr) @@ -338,7 +338,7 @@ def read_dp_result_callback(): def write_dp(self, addr, data): reg_id = self.REG_ADDR_TO_ID_MAP[self.DP, addr] - + # Write the DP register. try: self._link.write_reg(reg_id, data) @@ -393,7 +393,7 @@ def write_ap(self, addr, data): def read_ap_multiple(self, addr, count=1, now=True): assert isinstance(addr, int) ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)] - + try: if not now: TRACE.debug("trace: read_ap_multi(addr=%#010x, count=%i) -> ...", addr, count) @@ -423,7 +423,7 @@ def read_ap_repeat_callback(): def write_ap_multiple(self, addr, values): assert isinstance(addr, int) ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)] - + try: self._link.reg_write_repeat(len(values), ap_reg, values, dap_index=0) TRACE.debug("trace: write_ap_multi(addr=%#010x, (%i)[%s])", addr, len(values), @@ -432,14 +432,14 @@ def write_ap_multiple(self, addr, values): TRACE.debug("trace: write_ap_multi(addr=%#010x, (%i)[%s]) -> error(%s)", addr, len(values), ", ".join(["%#010x" % v for v in values]), exc) raise self._convert_exception(exc) from exc - + # ------------------------------------------- # # SWO functions # ------------------------------------------- # def swo_start(self, baudrate): TRACE.debug("trace: swo_start(baud=%i)", baudrate) - + try: self._link.swo_configure(True, baudrate) self._link.swo_control(True) @@ -448,7 +448,7 @@ def swo_start(self, baudrate): def swo_stop(self): TRACE.debug("trace: swo_stop") - + try: self._link.swo_configure(False, 0) except DAPAccess.Error as exc: @@ -479,14 +479,14 @@ def _convert_exception(exc): class CMSISDAPProbePlugin(Plugin): """! @brief Plugin class for CMSISDAPProbe.""" - + def load(self): return CMSISDAPProbe - + @property def name(self): return "cmsisdap" - + @property def description(self): return "CMSIS-DAP debug probe" diff --git a/pyocd/probe/debug_probe.py b/pyocd/probe/debug_probe.py index e95fb96c7..8009c3470 100644 --- a/pyocd/probe/debug_probe.py +++ b/pyocd/probe/debug_probe.py @@ -20,33 +20,33 @@ class DebugProbe(object): """! @brief Abstract debug probe class. - + Subclasses of this abstract class are drivers for different debug probe interfaces, either hardware such as a USB based probe, or software such as connecting with a simulator. - + The constructor is private. To create an instance, use either of get_all_connected_probes() or get_probe_with_id(). Normally, the @ref pyocd.probe.aggregator.DebugProbeAggregator "DebugProbeAggregator" class is used instead of directly calling methods on a specific probe class. - + Use an instance as follows: - + 1. Call open(). 2. Optionally inspect the `supported_wire_protocols` property and select a protocol to use. 3. Call connect(), passing the chosen wire protocol. 4. Use by instance by calling other methods. 5. Call disconnect(). 6. Call close(). - + Most methods are required to be overridden by a subclass, with a few exceptions. - + These methods are completely optional: - + - create_associated_board() - flush() - get_memory_interface_for_ap() - + These methods must be implemented depending on the probe capabilities, as returned from the `capabilities` property. - + - swj_sequence(): Capability.SWJ_SEQUENCE; if not provided it is assumed the probe automatically enables SWD or JTAG on the target based on the protocol passed into connect(). - swd_sequence(): Capability.SWD_SEQUENCE @@ -59,14 +59,14 @@ class Protocol(Enum): DEFAULT = 0 SWD = 1 JTAG = 2 - + ## Map from wire protocol setting name to debug probe constant. PROTOCOL_NAME_MAP = { 'swd': Protocol.SWD, 'jtag': Protocol.JTAG, 'default': Protocol.DEFAULT, } - + class Capability(Enum): """! @brief Probe capabilities.""" ## @brief Whether the probe supports the swj_sequence() API. @@ -74,7 +74,7 @@ class Capability(Enum): # If this property is True, then the swj_sequence() method is used to move between protocols. # If False, it is assumed the probe firmware automatically manages the protocol switch. SWJ_SEQUENCE = 0 - + ## @brief Whether the probe supports receiving SWO data. SWO = 1 @@ -83,20 +83,20 @@ class Capability(Enum): # Currently only used to verify that the probe supports banked DP registers when the #MANAGED_DPBANKSEL # capability is present. BANKED_DP_REGISTERS = 2 - + ## @brief Whether the probe can access APv2 registers. # # This capability is currently only used to verify that a probe with the #MANAGED_AP_SELECTION capability # can support the wider AP addresses used in version 2 APs. For probes without #MANAGED_AP_SELECTION, # DP_SELECT is written directly by the DAP layer when selecting an AP. APv2_ADDRESSES = 3 - + ## @brief Whether the probe automatically handles AP selection in the DP. # # If this capability is not present, the DebugPort object will perform the AP selection # by DP register writes. MANAGED_AP_SELECTION = 4 - + ## @brief whether the probe automatically handles access of banked DAP registers. MANAGED_DPBANKSEL = 5 @@ -105,15 +105,15 @@ class Capability(Enum): ## @brief Whether the probe supports the jtag_sequence() API. JTAG_SEQUENCE = 7 - + @classmethod def get_all_connected_probes(cls, unique_id=None, is_explicit=False): """! @brief Returns a list of DebugProbe instances. - + To filter the list of returned probes, the `unique_id` parameter may be set to a string with a full or partial unique ID (canonically the serial number). Alternatively, the probe class may simply return all available probes and let the caller handle filtering. - + @param cls The class instance. @param unique_id String. Optional partial unique ID value used to filter available probes. May be used by the probe to optimize retrieving the probe list; there is no requirement to filter the results. @@ -124,13 +124,13 @@ def get_all_connected_probes(cls, unique_id=None, is_explicit=False): @return List of DebugProbe instances. """ raise NotImplementedError() - + @classmethod def get_probe_with_id(cls, unique_id, is_explicit=False): """! @brief Returns a DebugProbe instance for a probe with the given unique ID. - + If no probe is connected with a fully matching unique ID, then None will be returned. - + @param cls The class instance. @param unique_id Unique ID string to match against probes' full unique ID. No partial matches are allowed. @param is_explicit Boolean. Whether the probe type was explicitly specified in the unique ID. @@ -147,30 +147,30 @@ def __init__(self): def session(self): """! @brief Session associated with this probe.""" return self._session - + @session.setter def session(self, the_session): self._session = the_session - + @property def description(self): """! @brief Combined description of the debug probe and/or associated board.""" return self.vendor_name + " " + self.product_name - + @property def vendor_name(self): """! @brief Name of the debug probe's manufacturer.""" raise NotImplementedError() - + @property def product_name(self): """! @brief Name of the debug probe.""" raise NotImplementedError() - + @property def supported_wire_protocols(self): """! @brief List of DebugProbe.Protocol supported by the probe. - + Only one of the values returned from this property may be passed to connect(). """ raise NotImplementedError() @@ -178,7 +178,7 @@ def supported_wire_protocols(self): @property def unique_id(self): """! @brief The unique ID of this device. - + This property will be valid before open() is called. This value can be passed to get_probe_with_id(). """ @@ -187,37 +187,37 @@ def unique_id(self): @property def wire_protocol(self): """! @brief Currently selected wire protocol. - + If the probe is not open and connected, i.e., open() and connect() have not been called, then this property will be None. If a value other than None is returned, then the probe has been connected successfully. """ raise NotImplementedError() - + @property def is_open(self): """! @brief Whether the probe is currently open. - + To open the probe, call the open() method. """ raise NotImplementedError() - + @property def capabilities(self): """! @brief A set of DebugProbe.Capability enums indicating the probe's features. - + This value should not be trusted until after the probe is opened. """ raise NotImplementedError() def create_associated_board(self): """! @brief Create a board instance representing the board of which the probe is a component. - + If the probe is part of a board, then this method will create a Board instance that represents the associated board. Usually, for an on-board debug probe, this would be the Board that the probe physically is part of, and will also set the target type. If the probe does not have an associated board, then this method returns None. - + @param self @param session Session to pass to the board upon construction. """ @@ -226,24 +226,24 @@ def create_associated_board(self): def open(self): """! @brief Open the USB interface to the probe for sending commands.""" raise NotImplementedError() - + def close(self): """! @brief Close the probe's USB interface.""" raise NotImplementedError() - + def lock(self): """! @brief Lock the probe from access by other threads. - + This lock is recursive, so locking multiple times from a single thread is acceptable as long as the thread unlocks the same number of times. - + This method does not return until the calling thread has ownership of the lock. """ self._lock.acquire() - + def unlock(self): """! @brief Unlock the probe. - + Only when the thread unlocks the probe the same number of times it has called lock() will the lock actually be released and other threads allowed access. """ @@ -262,7 +262,7 @@ def disconnect(self): def swj_sequence(self, length, bits): """! @brief Transfer some number of bits on SWDIO/TMS. - + @param self @param length Number of bits to transfer. Must be less than or equal to 256. @param bits Integer of the bit values to send on SWDIO/TMS. The LSB is transmitted first. @@ -271,16 +271,16 @@ def swj_sequence(self, length, bits): def swd_sequence(self, sequences): """! @brief Send a sequences of bits on the SWDIO signal. - + Each sequence in the _sequences_ parameter is a tuple with 1 or 2 members in this order: - 0: int: number of TCK cycles from 1-64 - 1: int: the SWDIO bit values to transfer. The presence of this tuple member indicates the sequence is an output sequence; the absence means that the specified number of TCK cycles of SWDIO data will be read and returned. - + @param self @param sequences A sequence of sequence description tuples as described above. - + @return A 2-tuple of the response status, and a sequence of bytes objects, one for each input sequence. The length of the bytes object is ( + 7) / 8. Bits are in LSB first order. """ @@ -288,14 +288,14 @@ def swd_sequence(self, sequences): def jtag_sequence(self, cycles, tms, read_tdo, tdi): """! @brief Send JTAG sequence. - + @param self @param cycles Number of TCK cycles, from 1-64. @param tms Fixed TMS value. Either 0 or 1. @param read_tdo Boolean indicating whether TDO should be read. @param tdi Integer with the TDI bit values to be transferred each TCK cycle. The LSB is sent first. - + @return Either an integer with TDI bit values, or None, if _read_tdo_ was false. """ raise NotImplementedError() @@ -313,15 +313,15 @@ def reset(self): def assert_reset(self, asserted): """! @brief Assert or de-assert target's nRESET signal. - + Because nRESET is negative logic and usually open drain, passing True will drive it low, and passing False will stop driving so nRESET will be pulled up. """ raise NotImplementedError() - + def is_reset_asserted(self): """! @brief Returns True if nRESET is asserted or False if de-asserted. - + If the debug probe cannot actively read the reset signal, the value returned will be the last value passed to assert_reset(). """ @@ -329,7 +329,7 @@ def is_reset_asserted(self): def flush(self): """! @brief Write out all unsent commands. - + This API may be a no-op for certain debug probe types. """ pass @@ -341,7 +341,7 @@ def flush(self): def read_dp(self, addr, now=True): """! @brief Read a DP register. - + @param self @param addr Integer register address being one of (0x0, 0x4, 0x8, 0xC). @param now Boolean specifying whether the read is synchronous (True) or asynchronous. @@ -352,7 +352,7 @@ def read_dp(self, addr, now=True): def write_dp(self, addr, data): """! @brief Write a DP register. - + @param self @param addr Integer register address being one of (0x0, 0x4, 0x8, 0xC). @param data Integer register value. @@ -374,21 +374,21 @@ def read_ap_multiple(self, addr, count=1, now=True): def write_ap_multiple(self, addr, values): """! @brief Write one AP register multiple times.""" raise NotImplementedError() - + def get_memory_interface_for_ap(self, ap_address): """! @brief Returns a @ref pyocd.core.memory_interface.MemoryInterface "MemoryInterface" for the specified AP. - + Some debug probe types have accelerated memory read and write commands. This method is used to get a concrete @ref pyocd.core.memory_interface.MemoryInterface "MemoryInterface" instance that is specific to the AP identified by the _ap_address_ parameter. If the probe does not provide an accelerated memory interface, None will be returned. - + @param self The debug probe. @param ap_address An instance of @ref pyocd.coresight.ap.APAddress "APAddress". """ return None - + ##@} ## @name SWO @@ -396,7 +396,7 @@ def get_memory_interface_for_ap(self, ap_address): def swo_start(self, baudrate): """! @brief Start receiving SWO data at the given baudrate. - + Once SWO reception has started, the swo_read() method must be called at regular intervals to receive SWO data. If this is not done, the probe's internal SWO data buffer may overflow and data will be lost. @@ -409,15 +409,15 @@ def swo_stop(self): def swo_read(self): """! @brief Read buffered SWO data from the target. - + @eturn Bytearray of the received data. May be 0 bytes in length if no SWO data is buffered at the probe. """ raise NotImplementedError() ##@} - + def __repr__(self): return "<{}@{:x} {}>".format(self.__class__.__name__, id(self), self.description) - + diff --git a/pyocd/probe/jlink_probe.py b/pyocd/probe/jlink_probe.py index 9b43198fa..fec97bacf 100644 --- a/pyocd/probe/jlink_probe.py +++ b/pyocd/probe/jlink_probe.py @@ -42,7 +42,7 @@ class JLinkProbe(DebugProbe): APBANKSEL = 0x000000f0 APSEL = 0xff000000 APSEL_APBANKSEL = APSEL | APBANKSEL - + @classmethod def _get_jlink(cls): # TypeError is raised by pylink if the JLink DLL cannot be found. @@ -59,7 +59,7 @@ def _get_jlink(cls): @classmethod def _format_serial_number(cls, serial_number): return "{:d}".format(serial_number) - + @classmethod def get_all_connected_probes(cls, unique_id=None, is_explicit=False): try: @@ -69,7 +69,7 @@ def get_all_connected_probes(cls, unique_id=None, is_explicit=False): return [cls(cls._format_serial_number(info.SerialNumber)) for info in jlink.connected_emulators()] except JLinkException as exc: raise cls._convert_exception(exc) from exc - + @classmethod def get_probe_with_id(cls, unique_id, is_explicit=False): try: @@ -120,15 +120,15 @@ def __init__(self, serial_number): self._default_protocol = None self._is_open = False self._product_name = six.ensure_str(info.acProduct) - + @property def description(self): return self.vendor_name + " " + self.product_name - + @property def vendor_name(self): return "Segger" - + @property def product_name(self): return self._product_name @@ -145,11 +145,11 @@ def unique_id(self): @property def wire_protocol(self): return self._protocol - + @property def is_open(self): return self._link.opened - + @property def capabilities(self): return { @@ -157,17 +157,17 @@ def capabilities(self): self.Capability.BANKED_DP_REGISTERS, self.Capability.APv2_ADDRESSES, } - + def open(self): try: # Configure UI usage. We must do this here rather than in the ctor because the ctor # doesn't have access to the session. if self.session.options.get('jlink.non_interactive'): self._link.disable_dialog_boxes() - + self._link.open(self._serial_number_int) self._is_open = True - + # Get available wire protocols. ifaces = self._link.supported_tifs() self._supported_protocols = [DebugProbe.Protocol.DEFAULT] @@ -178,7 +178,7 @@ def open(self): if not len(self._supported_protocols) >= 2: # default + 1 raise exceptions.ProbeError("J-Link probe {} does not support any known wire protocols".format( self.unique_id)) - + # Select default protocol, preferring SWD over JTAG. if DebugProbe.Protocol.SWD in self._supported_protocols: self._default_protocol = DebugProbe.Protocol.SWD @@ -186,7 +186,7 @@ def open(self): self._default_protocol = DebugProbe.Protocol.JTAG except JLinkException as exc: raise self._convert_exception(exc) from exc - + def close(self): try: self._link.close() @@ -202,22 +202,22 @@ def connect(self, protocol=None): # Handle default protocol. if (protocol is None) or (protocol == DebugProbe.Protocol.DEFAULT): protocol = self._default_protocol - + # Validate selected protocol. if protocol not in self._supported_protocols: raise ValueError("unsupported wire protocol %s" % protocol) - + # Convert protocol to port enum. if protocol == DebugProbe.Protocol.SWD: iface = pylink.enums.JLinkInterfaces.SWD elif protocol == DebugProbe.Protocol.JTAG: iface = pylink.enums.JLinkInterfaces.JTAG - + try: self._link.set_tif(iface) if self.session.options.get('jlink.power'): self._link.power_on() - + # Connect if a device name was supplied. device_name = self.session.options.get('jlink.device') if device_name is not None: @@ -232,15 +232,15 @@ def swj_sequence(self, length, bits): for chunk in range((length + 31) // 32): chunk_word = bits & 0xffffffff chunk_len = min(length, 32) - + if chunk_len == 32: self._link.swd_write32(chunk_len, chunk_word) else: self._link.swd_write(0, chunk_word, chunk_len) - + bits >>= 32 length -= 32 - + self._link.swd_sync() def disconnect(self): @@ -276,7 +276,7 @@ def assert_reset(self, asserted): self._link.set_reset_pin_high() except JLinkException as exc: raise self._convert_exception(exc) from exc - + def is_reset_asserted(self): try: status = self._link.hardware_status() @@ -296,7 +296,7 @@ def read_dp(self, addr, now=True): else: def read_reg_cb(): return value - + return value if now else read_reg_cb def write_dp(self, addr, data): @@ -314,7 +314,7 @@ def read_ap(self, addr, now=True): else: def read_reg_cb(): return value - + return value if now else read_reg_cb def write_ap(self, addr, data): @@ -326,10 +326,10 @@ def write_ap(self, addr, data): def read_ap_multiple(self, addr, count=1, now=True): results = [self.read_ap(addr, now=True) for n in range(count)] - + def read_ap_multiple_result_callback(): return results - + return results if now else read_ap_multiple_result_callback def write_ap_multiple(self, addr, values): @@ -371,18 +371,18 @@ def _convert_exception(exc): class JLinkProbePlugin(Plugin): """! @brief Plugin class for JLinkProbe.""" - + def should_load(self): """! @brief Load the J-Link plugin if the J-Link library is available.""" return JLinkProbe._get_jlink() is not None - + def load(self): return JLinkProbe - + @property def name(self): return "jlink" - + @property def description(self): return "SEGGER J-Link debug probe" diff --git a/pyocd/probe/pydapaccess/cmsis_dap_core.py b/pyocd/probe/pydapaccess/cmsis_dap_core.py index 67eae0a25..e84cf05b8 100644 --- a/pyocd/probe/pydapaccess/cmsis_dap_core.py +++ b/pyocd/probe/pydapaccess/cmsis_dap_core.py @@ -140,7 +140,7 @@ class DAPTransferResponse: ACK_MASK = 0x07 # Bits [2:0] PROTOCOL_ERROR_MASK = 0x08 # Bit [3] VALUE_MISMATCH_MASK = 0x08 # Bit [4] - + # Values for ACK bitfield. ACK_OK = 1 ACK_WAIT = 2 @@ -166,7 +166,7 @@ def dap_info(self, id_): - A string-type info was requested, but the returned value length is greater than the response packet size minus response header and terminating null byte on the string. """ assert type(id_) is DAPAccessIntf.ID - + cmd = [] cmd.append(Command.DAP_INFO) cmd.append(id_.value) @@ -348,7 +348,7 @@ def set_swj_pins(self, output, pins, wait=0): def swd_configure(self, turnaround=1, always_send_data_phase=False): assert 1 <= turnaround <= 4 conf = (turnaround - 1) | (int(always_send_data_phase) << 2) - + cmd = [] cmd.append(Command.DAP_SWD_CONFIGURE) cmd.append(conf) @@ -367,13 +367,13 @@ def swd_configure(self, turnaround=1, always_send_data_phase=False): def swd_sequence(self, sequences): """! @brief Send the DAP_SWD_Sequence command. - + Each sequence in the _sequences_ parameter is a tuple with 1 or 2 members: - 0: int: number of TCK cycles from 1-64 - 1: int: the SWDIO bit values to transfer. The presence of this tuple member indicates the sequence is an output sequence; the absence means that the specified number of TCK cycles of SWDIO data will be read and returned. - + The DAP_SWD_Sequence command expects this data for each sequence: - 0: sequence info byte - bit [7]: mode, 0=output, 1=input @@ -381,10 +381,10 @@ def swd_sequence(self, sequences): - bits [5:0]: number of TCK cycles from 1-64, with 64 encoded as 0 - 1: (only present if element 0 bit 7 == 0, for output mode) bytes of data to send, one bit per TCK cycle, transmitted LSB first. - + @param self @param sequences A sequence of sequence description tuples as described above. - + @return A 2-tuple of the response status, and a sequence of bytes objects, one for each input sequence. The length of the bytes object is ( + 7) / 8. Bits are in LSB first order. """ @@ -397,7 +397,7 @@ def swd_sequence(self, sequences): is_output = len(seq) == 2 info = (0x00 if is_output else 0x80) | (0 if (tck_count == 64) else tck_count) cmd.append(info) - + # Append SWDIO output data. if is_output: bits = seq[1] @@ -456,7 +456,7 @@ def jtag_sequence(self, cycles, tms, read_tdo, tdi): info = (((0 if (cycles == 64) else cycles) & 0x3f) | ((tms & 1) << 6) | (int(read_tdo) << 7)) - + cmd = [] cmd.append(Command.DAP_JTAG_SEQUENCE) cmd.append(1) @@ -481,7 +481,7 @@ def jtag_configure(self, devices_irlen=None): # Default to a single device with an IRLEN of 4. if devices_irlen is None: devices_irlen = [4] - + cmd = [] cmd.append(Command.DAP_JTAG_CONFIGURE) cmd.append(len(devices_irlen)) diff --git a/pyocd/probe/pydapaccess/dap_access_api.py b/pyocd/probe/pydapaccess/dap_access_api.py index 5a8f75ac6..9e6d97a3c 100644 --- a/pyocd/probe/pydapaccess/dap_access_api.py +++ b/pyocd/probe/pydapaccess/dap_access_api.py @@ -102,7 +102,7 @@ def set_args(arg_list): @property def protocol_version(self): """! @brief CMSIS-DAP protocol version. - + The version is represented as 3-tuple with elements, in order, of major version, minor version, and patch version. @@ -117,7 +117,7 @@ def vendor_name(self): @property def product_name(self): raise NotImplementedError() - + @property def vidpid(self): """! @brief A tuple of USB VID and PID, in that order.""" @@ -126,7 +126,7 @@ def vidpid(self): @property def has_swd_sequence(self): """! @brief Boolean indicating whether the DAP_SWD_Sequence command is supported. - + This property is only valid after the probe is opened. Until then, the value will be None. """ raise NotImplementedError() @@ -162,17 +162,17 @@ def connect(self, port=None): def configure_swd(self, turnaround=1, always_send_data_phase=False): """! @brief Modify SWD configuration. - + @param self @param turnaround Number of turnaround phase clocks, from 1-4. @param always_send_data_phase Whether the data phase should always be transmitted on writes, even on a FAULT response. This is required for sticky overrun support. """ raise NotImplementedError() - + def configure_jtag(self, devices_irlen=None): """! @brief Modify JTAG configuration. - + @param self @param devices_irlen Sequence of IR lengths for each device, thus also specifying the number of devices. If not passed, this will default to a single device with IRLen=4. @@ -181,7 +181,7 @@ def configure_jtag(self, devices_irlen=None): def swj_sequence(self, length, bits): """! @brief Send sequence to activate JTAG or SWD on the target. - + @param self @param length Number of bits to transfer on TCK/TMS. @param bits Integer with the bit values, sent LSB first. @@ -190,32 +190,32 @@ def swj_sequence(self, length, bits): def swd_sequence(self, sequences): """! @brief Send a sequences of bits on the SWDIO signal. - + This method sends the DAP_SWD_Sequence CMSIS-DAP command. - + Each sequence in the _sequences_ parameter is a tuple with 1 or 2 members: - 0: int: number of TCK cycles from 1-64 - 1: int: the SWDIO bit values to transfer. The presence of this tuple member indicates the sequence is an output sequence; the absence means that the specified number of TCK cycles of SWDIO data will be read and returned. - + @param self @param sequences A sequence of sequence description tuples as described above. - + @return A 2-tuple of the response status, and a sequence of bytes objects, one for each input sequence. The length of the bytes object is ( + 7) / 8. Bits are in LSB first order. """ def jtag_sequence(self, cycles, tms, read_tdo, tdi): """! @brief Send JTAG sequence. - + @param self @param cycles Number of TCK cycles, from 1-64. @param tms Fixed TMS value. Either 0 or 1. @param read_tdo Boolean indicating whether TDO should be read. @param tdi Integer with the TDI bit values to be transferred each TCK cycle. The LSB is sent first. - + @return Either an integer with TDI bit values, or None, if _read_tdo_ was false. """ raise NotImplementedError() @@ -242,7 +242,7 @@ def reset(self): def assert_reset(self, asserted): """! @brief Assert or de-assert target reset line""" raise NotImplementedError() - + def is_reset_asserted(self): """! @brief Returns True if the target reset line is asserted or False if de-asserted""" raise NotImplementedError() @@ -258,7 +258,7 @@ def flush(self): def vendor(self, index, data=None): """! @brief Send a vendor specific command""" raise NotImplementedError() - + def has_swo(self): """! @brief Returns bool indicating whether the link supports SWO.""" raise NotImplementedError() @@ -278,11 +278,11 @@ def get_swo_status(self): def swo_read(self, count=None): """! @brief Read buffered SWO data from the target. - + The count parameter is optional. If provided, it is the number of bytes to read, which must be less than the packet size. If count is not provided, the packet size will be used instead. - + Returns a 3-tuple containing the status mask at index 0, the number of buffered SWO data bytes at index 1, and a list of the received data bytes at index 2.""" raise NotImplementedError() diff --git a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py index bdbe5e4e2..c455da928 100644 --- a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py +++ b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py @@ -65,16 +65,16 @@ def _get_interfaces(): """! @brief Get the connected USB devices""" # Get CMSIS-DAPv1 interfaces. v1_interfaces = INTERFACE[USB_BACKEND].get_all_connected_interfaces() - + # Get CMSIS-DAPv2 interfaces. v2_interfaces = INTERFACE[USB_BACKEND_V2].get_all_connected_interfaces() - + # Prefer v2 over v1 if a device provides both. devices_in_both = [v1 for v1 in v1_interfaces for v2 in v2_interfaces if _get_unique_id(v1) == _get_unique_id(v2)] for dev in devices_in_both: v1_interfaces.remove(dev) - + # Return the combined list. return v1_interfaces + v2_interfaces @@ -323,11 +323,11 @@ def _check_response(self, response): The ACK bits [2:0] and the protocol error bit are checked. If any error is indicated, the appropriate exception is raised. An exception is also raised for unrecognised ACK values. - + @param self @param response The "Transfer Response" byte from a DAP_Transfer or DAP_TransferBlock command. - + @exception DAPAccessIntf.TransferFaultError Raised for the ACK_FAULT response. @exception DAPAccessIntf.TransferTimeoutError Raised for ACK_WAIT response. @exception DAPAccessIntf.TransferError Raised for other, less common errors, including No @@ -346,7 +346,7 @@ def _check_response(self, response): raise DAPAccessIntf.TransferError("Unexpected ACK value (%d) returned by probe" % ack) elif (response & DAPTransferResponse.PROTOCOL_ERROR_MASK) != 0: raise DAPAccessIntf.TransferError("SWD protocol error") - + def _decode_transfer_data(self, data): """! @brief Take a byte array and extract the data from it @@ -456,7 +456,7 @@ def decode_data(self, data): class DAPAccessCMSISDAP(DAPAccessIntf): """! @brief An implementation of the DAPAccessIntf layer for DAPLink boards - + @internal All methods that use the CMSISDAPProtocol instance must be locked and must flush the command queue prior to using methods of that object. Otherwise the command responses may be processed out of order. @@ -548,7 +548,7 @@ def __init__(self, unique_id, interface=None): self._vendor_name = "" self._product_name = "" self._vidpid = (0, 0) - + self._lock = threading.RLock() self._interface = interface self._deferred_transfer = False @@ -572,11 +572,11 @@ def protocol_version(self) -> VersionTuple: by the debug probe. """ return self._cmsis_dap_version - + @property def firmware_version(self) -> Optional[str]: """! @brief A string of the product firmware version, or None. - + Only probes supporting CMSIS-DAP protocol v2.1 or later can return their firmware version. """ return self._fw_version @@ -588,7 +588,7 @@ def vendor_name(self): @property def product_name(self): return self._product_name - + @property def vidpid(self): """! @brief A tuple of USB VID and PID, in that order.""" @@ -597,11 +597,11 @@ def vidpid(self): @property def has_swd_sequence(self): return self._cmsis_dap_version >= CMSISDAPVersion.V1_2_0 - + def lock(self): """! @brief Lock the interface.""" self._lock.acquire() - + def unlock(self): """! @brief Unlock the interface.""" self._lock.release() @@ -644,7 +644,7 @@ def _read_protocol_version(self): # One of the protocol version fields had a non-numeric character, indicating it is not a valid # CMSIS-DAP version number. Default to the lowest version. self._cmsis_dap_version = CMSISDAPVersion.V1_0_0 - + # Validate the version against known CMSIS-DAP minor versions. This will also catch the beta release # versions of CMSIS-DAP, 0.01 and 0.02, and raise them to 1.0.0. if self._cmsis_dap_version[:2] not in CMSISDAPVersion.minor_versions(): @@ -672,7 +672,7 @@ def open(self): if (self._cmsis_dap_version >= CMSISDAPVersion.V2_1_0) or (self._cmsis_dap_version >= CMSISDAPVersion.V1_3_0 and self._cmsis_dap_version < CMSISDAPVersion.V2_0_0): self._fw_version = self._protocol.dap_info(self.ID.PRODUCT_FW_VERSION) - + # Log probe's firmware version. if self._fw_version: LOG.debug("CMSIS-DAP probe %s: firmware version %s, protocol version %i.%i.%i", @@ -709,7 +709,7 @@ def assert_reset(self, asserted): self._protocol.set_swj_pins(0, Pin.nRESET) else: self._protocol.set_swj_pins(Pin.nRESET, Pin.nRESET) - + @locked def is_reset_asserted(self): self.flush() @@ -775,7 +775,7 @@ def connect(self, port=DAPAccessIntf.PORT.DEFAULT): self._protocol.set_swj_clock(self._frequency) # configure transfer self._protocol.transfer_configure() - + # configure the selected protocol with defaults. if self._dap_port == DAPAccessIntf.PORT.SWD: self.configure_swd() @@ -786,7 +786,7 @@ def connect(self, port=DAPAccessIntf.PORT.DEFAULT): def configure_swd(self, turnaround=1, always_send_data_phase=False): self.flush() self._protocol.swd_configure(turnaround, always_send_data_phase) - + @locked def configure_jtag(self, devices_irlen=None): self.flush() @@ -811,10 +811,10 @@ def jtag_sequence(self, cycles, tms, read_tdo, tdi): def disconnect(self): self.flush() self._protocol.disconnect() - + def has_swo(self): return self._has_swo_uart - + @locked def swo_configure(self, enabled, rate): self.flush() @@ -822,7 +822,7 @@ def swo_configure(self, enabled, rate): # Don't send any commands if the SWO commands aren't supported. if not self._has_swo_uart: return False - + # Before we attempt any configuration, we must explicitly disable SWO # (if SWO is enabled, setting any other configuration fails). self._swo_disable() @@ -834,7 +834,7 @@ def swo_configure(self, enabled, rate): transport = DAPSWOTransport.DAP_SWO_EP else: transport = DAPSWOTransport.DAP_SWO_DATA - + if self._protocol.swo_transport(transport) != 0: self._swo_disable() return False @@ -851,7 +851,7 @@ def swo_configure(self, enabled, rate): LOG.debug("Exception while configuring SWO: %s", e) self._swo_disable() return False - + # Doesn't need @locked because it is only called from swo_configure(). def _swo_disable(self): try: @@ -861,7 +861,7 @@ def _swo_disable(self): LOG.debug("Exception while disabling SWO: %s", e) finally: self._swo_status = SWOStatus.DISABLED - + @locked def swo_control(self, start): self.flush() @@ -869,7 +869,7 @@ def swo_control(self, start): # Don't send any commands if the SWO commands aren't supported. if not self._has_swo_uart: return False - + if start: self._protocol.swo_control(DAPSWOControl.START) if self._interface.has_swo_ep: @@ -881,11 +881,11 @@ def swo_control(self, start): self._interface.stop_swo() self._swo_status = SWOStatus.CONFIGURED return True - + @locked def get_swo_status(self): return self._protocol.swo_status() - + def swo_read(self, count=None): # The separate SWO EP can be read without locking. if self._interface.has_swo_ep: diff --git a/pyocd/probe/pydapaccess/dap_settings.py b/pyocd/probe/pydapaccess/dap_settings.py index 2d93da331..93aa8eb8c 100644 --- a/pyocd/probe/pydapaccess/dap_settings.py +++ b/pyocd/probe/pydapaccess/dap_settings.py @@ -15,5 +15,5 @@ # limitations under the License. class DAPSettings(): - + limit_packets = False diff --git a/pyocd/probe/pydapaccess/interface/common.py b/pyocd/probe/pydapaccess/interface/common.py index 0bdaf4aad..df801b717 100644 --- a/pyocd/probe/pydapaccess/interface/common.py +++ b/pyocd/probe/pydapaccess/interface/common.py @@ -76,10 +76,10 @@ def is_known_cmsis_dap_vid_pid(vid, pid): def filter_device_by_class(vid, pid, device_class): """! @brief Test whether the device should be ignored by comparing bDeviceClass. - + This function checks the device's bDeviceClass to determine whether it is likely to be a CMSIS-DAP device. It uses the vid and pid for device-specific quirks. - + @retval True Skip the device. @retval False The device is valid. """ @@ -94,12 +94,12 @@ def filter_device_by_class(vid, pid, device_class): def filter_device_by_usage_page(vid, pid, usage_page): """! @brief Test whether the device should be ignored by comparing the HID usage page. - + This function performs device-specific tests to determine whether the device is a CMSIS-DAP interface. The only current test is for the NXP LPC-Link2, which has extra HID interfaces with usage pages other than 0xff00. No generic tests are done regardless of VID/PID, because it is not clear whether all CMSIS-DAP devices have the usage page set to the same value. - + @retval True Skip the device. @retval False The device is valid. """ @@ -114,12 +114,12 @@ def check_ep(interface, ep_index, ep_dir, ep_type): def generate_device_unique_id(vid: int, pid: int, *locations: List[Union[int, str]]) -> str: """! @brief Generate a semi-stable unique ID from USB device properties. - + This function is intended to be used in cases where a device does not provide a serial number string. pyocd still needs a valid unique ID so the device can be selected from amongst multiple connected devices. The algorithm used here generates an ID that is stable for a given device as long as it is connected to the same USB port. - + @param vid Vendor ID. @param pid Product ID. @param locations Additional parameters are expected to be int or string values that represent diff --git a/pyocd/probe/pydapaccess/interface/hidapi_backend.py b/pyocd/probe/pydapaccess/interface/hidapi_backend.py index 0b2bdae90..b5c249ceb 100644 --- a/pyocd/probe/pydapaccess/interface/hidapi_backend.py +++ b/pyocd/probe/pydapaccess/interface/hidapi_backend.py @@ -56,7 +56,7 @@ def open(self): @staticmethod def get_all_connected_interfaces(): """! @brief Returns all the connected devices with CMSIS-DAP in the name. - + returns an array of HidApiUSB (Interface) objects """ @@ -73,10 +73,10 @@ def get_all_connected_interfaces(): if "CMSIS-DAP" not in device_path: # Skip non cmsis-dap devices continue - + vid = deviceInfo['vendor_id'] pid = deviceInfo['product_id'] - + # Perform device-specific filtering. if filter_device_by_usage_page(vid, pid, deviceInfo['usage_page']): continue diff --git a/pyocd/probe/pydapaccess/interface/interface.py b/pyocd/probe/pydapaccess/interface/interface.py index 6f2ec9d1f..e67f80d9e 100644 --- a/pyocd/probe/pydapaccess/interface/interface.py +++ b/pyocd/probe/pydapaccess/interface/interface.py @@ -25,7 +25,7 @@ def __init__(self): self.serial_number = "" self.packet_count = 1 self.packet_size = 64 - + @property def has_swo_ep(self): return False diff --git a/pyocd/probe/pydapaccess/interface/pyusb_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_backend.py index 812780787..dea50add0 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_backend.py @@ -48,7 +48,7 @@ class PyUSB(Interface): """ isAvailable = IS_AVAILABLE - + did_show_no_libusb_warning = False def __init__(self): @@ -237,28 +237,28 @@ def close(self): class MatchCmsisDapv1Interface(object): """! @brief Match class for finding CMSIS-DAPv1 interface. - + This match class performs several tests on the provided USB interface descriptor, to determine whether it is a CMSIS-DAPv1 interface. These requirements must be met by the interface: - + 1. If there is more than one HID interface on the device, the interface must have an interface name string containing "CMSIS-DAP". 2. bInterfaceClass must be 0x03 (HID). 3. bInterfaceSubClass must be 0. 4. Must have interrupt in endpoint, with an optional interrupt out endpoint, in that order. """ - + def __init__(self, hid_interface_count): """! @brief Constructor.""" self._hid_count = hid_interface_count - + def __call__(self, interface): """! @brief Return True if this is a CMSIS-DAPv1 interface.""" try: if self._hid_count > 1: interface_name = usb.util.get_string(interface.device, interface.iInterface) - + # This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2. if (interface_name is None) or ("CMSIS-DAP" not in interface_name): return False @@ -291,7 +291,7 @@ def __call__(self, interface): ] if endpoint_attrs not in ENDPOINT_ATTRS_ALLOWED: return False - + # All checks passed, this is a CMSIS-DAPv2 interface! return True @@ -315,20 +315,20 @@ def __call__(self, dev): # Check if the device class is a valid one for CMSIS-DAP. if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass): return False - + try: # First attempt to get the active config. This produces a more direct error # when you don't have device permissions on Linux config = dev.get_active_configuration() - + # Now read the product name string. device_string = dev.product if (device_string is None) or ("CMSIS-DAP" not in device_string): return False - + # Get count of HID interfaces. hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID))) - + # Find the CMSIS-DAPv1 interface. matcher = MatchCmsisDapv1Interface(hid_interface_count) cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=matcher) diff --git a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py index ec60a9ea0..8167787a6 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py @@ -70,7 +70,7 @@ def __init__(self): self.read_sem = threading.Semaphore(0) self.packet_size = 512 self.is_swo_running = False - + @property def has_swo_ep(self): return self.ep_swo is not None @@ -135,7 +135,7 @@ def start_rx(self): self.thread = threading.Thread(target=self.rx_task, name=thread_name) self.thread.daemon = True self.thread.start() - + def start_swo(self): self.swo_stop_event = threading.Event() thread_name = "SWO receive (%s)" % self.serial_number @@ -143,7 +143,7 @@ def start_swo(self): self.swo_thread.daemon = True self.swo_thread.start() self.is_swo_running = True - + def stop_swo(self): self.swo_stop_event.set() self.swo_thread.join() @@ -224,7 +224,7 @@ def read_swo(self): if self.swo_data[0] is None: raise DAPAccessIntf.DeviceError("Device %s SWO thread exited unexpectedly" % self.serial_number) data += self.swo_data.pop(0) - + return data def close(self): @@ -251,11 +251,11 @@ def close(self): def _match_cmsis_dap_v2_interface(interface): """! @brief Returns true for a CMSIS-DAP v2 interface. - + This match function performs several tests on the provided USB interface descriptor, to determine whether it is a CMSIS-DAPv2 interface. These requirements must be met by the interface: - + 1. Have an interface name string containing "CMSIS-DAP". 2. bInterfaceClass must be 0xff. 3. bInterfaceSubClass must be 0. @@ -264,7 +264,7 @@ def _match_cmsis_dap_v2_interface(interface): """ try: interface_name = usb.util.get_string(interface.device, interface.iInterface) - + # This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2. if (interface_name is None) or ("CMSIS-DAP" not in interface_name): return False @@ -277,20 +277,20 @@ def _match_cmsis_dap_v2_interface(interface): # Must have either 2 or 3 endpoints. if interface.bNumEndpoints not in (2, 3): return False - + # Endpoint 0 must be bulk out. if not check_ep(interface, 0, usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_BULK): return False - + # Endpoint 1 must be bulk in. if not check_ep(interface, 1, usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_BULK): return False - + # Endpoint 2 is optional. If present it must be bulk in. if (interface.bNumEndpoints == 3) \ and not check_ep(interface, 2, usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_BULK): return False - + # All checks passed, this is a CMSIS-DAPv2 interface! return True @@ -314,7 +314,7 @@ def __call__(self, dev): # Check if the device class is a valid one for CMSIS-DAP. if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass): return False - + try: config = dev.get_active_configuration() cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=_match_cmsis_dap_v2_interface) diff --git a/pyocd/probe/shared_probe_proxy.py b/pyocd/probe/shared_probe_proxy.py index b0f78a951..bdbc46378 100644 --- a/pyocd/probe/shared_probe_proxy.py +++ b/pyocd/probe/shared_probe_proxy.py @@ -24,12 +24,12 @@ class SharedDebugProbeProxy(object): """! @brief Proxy for a DebugProbe that allows it to be shared by multiple clients. - + The main purpose of this class is to keep track of the number of times the probe has been opened and connected, and to perform checks to ensure that probes don't interfere with each other. Most probe APIs are simply passed to the underlying probe object. """ - + def __init__(self, probe): self._session = None self._probe = probe @@ -40,21 +40,21 @@ def __init__(self, probe): def session(self): """! @brief Session associated with this probe.""" return self._session - + @session.setter def session(self, the_session): self._session = the_session self._probe.session = the_session - + @property def probe(self): return self._probe - + def open(self): if self._open_count == 0: self._probe.open() self._open_count += 1 - + def close(self): if self._open_count == 1: self._probe.close() @@ -75,7 +75,7 @@ def disconnect(self): def swj_sequence(self, length, bits): self._probe.swj_sequence(length, bits) - + def __getattr__(self, name): """! @brief Redirect to underlying probe object methods.""" if hasattr(self._probe, name): diff --git a/pyocd/probe/stlink/constants.py b/pyocd/probe/stlink/constants.py index 61c538795..9d75d48b7 100644 --- a/pyocd/probe/stlink/constants.py +++ b/pyocd/probe/stlink/constants.py @@ -15,11 +15,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -class Commands: +class Commands: """! @brief STLink V2 and V3 commands. """ - + # Common commands. GET_VERSION = 0xf1 JTAG_COMMAND = 0xf2 @@ -71,7 +71,7 @@ class Commands: SET_COM_FREQ = 0x61 # V3 only, replaces SWD/JTAG_SET_FREQ GET_COM_FREQ = 0x62 # V3 only SWITCH_STLINK_FREQ = 0x63 # V3 only - + # Parameters for JTAG_ENTER2. JTAG_ENTER_SWD = 0xa3 JTAG_ENTER_JTAG_NO_CORE_RESET = 0xa3 @@ -80,15 +80,15 @@ class Commands: JTAG_DRIVE_NRST_LOW = 0x00 JTAG_DRIVE_NRST_HIGH = 0x01 JTAG_DRIVE_NRST_PULSE = 0x02 - + # Parameters for JTAG_INIT_AP and JTAG_CLOSE_AP_DBG. JTAG_AP_NO_CORE = 0x00 JTAG_AP_CORTEXM_CORE = 0x01 - + # Parameters for SET_COM_FREQ and GET_COM_FREQ. JTAG_STLINK_SWD_COM = 0x00 JTAG_STLINK_JTAG_COM = 0x01 - + class Status: """! @brief STLink status codes and messages. @@ -123,7 +123,7 @@ class Status: SWV_NOT_AVAILABLE = 0x20 JTAG_FREQ_NOT_SUPPORTED = 0x41 JTAG_UNKNOWN_CMD = 0x42 - + ## Map from status code to error message. MESSAGES = { JTAG_UNKNOWN_ERROR : "Unknown error", @@ -155,7 +155,7 @@ class Status: JTAG_FREQ_NOT_SUPPORTED : "Frequency not supported", JTAG_UNKNOWN_CMD : "Unknown command", } - + @staticmethod def get_error_message(status): return "STLink error ({}): {}".format(status, Status.MESSAGES.get(status, "Unknown error")) diff --git a/pyocd/probe/stlink/stlink.py b/pyocd/probe/stlink/stlink.py index 18a1c5f39..36e8dec82 100644 --- a/pyocd/probe/stlink/stlink.py +++ b/pyocd/probe/stlink/stlink.py @@ -41,21 +41,21 @@ class Protocol(Enum): """ SWD = 1 JTAG = 2 - + ## Maximum number of bytes to send or receive for 32- and 16- bit transfers. # # 8-bit transfers have a maximum size of the maximum USB packet size (64 bytes for full speed). MAXIMUM_TRANSFER_SIZE = 1024 - + ## Minimum required STLink firmware version (hw version 2). MIN_JTAG_VERSION = 24 - + ## Firmware version that adds 16-bit transfers (hw version 2). MIN_JTAG_VERSION_16BIT_XFER = 26 - + ## Firmware version that adds multiple AP support (hw version 2). MIN_JTAG_VERSION_MULTI_AP = 28 - + ## Firmware version that adds DP bank support. # # Keys are the hardware version, value is the minimum JTAG version. @@ -65,7 +65,7 @@ class Protocol(Enum): # # Keys are the hardware version, value is the minimum JTAG version. MIN_JTAG_VERSION_GET_BOARD_IDS = {2: 36, 3: 6} - + ## Port number to use to indicate DP registers. DP_PORT = 0xffff @@ -76,19 +76,19 @@ class Protocol(Enum): Status.SWD_AP_FAULT: exceptions.TransferFaultError, Status.SWD_AP_ERROR: exceptions.TransferError, Status.SWD_AP_PARITY_ERROR: exceptions.TransferError, - + # DP protocol errors Status.SWD_DP_WAIT: exceptions.TransferTimeoutError, Status.SWD_DP_FAULT: exceptions.TransferFaultError, Status.SWD_DP_ERROR: exceptions.TransferError, Status.SWD_DP_PARITY_ERROR: exceptions.TransferError, - + # High level transaction errors Status.SWD_AP_WDATA_ERROR: exceptions.TransferFaultError, Status.SWD_AP_STICKY_ERROR: exceptions.TransferError, Status.SWD_AP_STICKYORUN_ERROR: exceptions.TransferError, } - + ## These errors indicate a memory fault. _MEM_FAULT_ERRORS = ( Status.JTAG_UNKNOWN_ERROR, # Returned in some cases by older STLink firmware. @@ -106,7 +106,7 @@ def __init__(self, device): self._target_voltage = 0 self._protocol = None self._lock = threading.RLock() - + def open(self): with self._lock: self._device.open() @@ -165,7 +165,7 @@ def get_version(self): self._hw_version = bfx(ver, 15, 12) self._jtag_version = bfx(ver, 11, 6) self._msc_version = bfx(ver, 5, 0) - + # For STLinkV3 we must use the extended get version command. if self._hw_version >= 3: # GET_VERSION_EXT response structure (byte offsets): @@ -194,7 +194,7 @@ def get_version(self): def _check_version(self, min_version): return (self._hw_version >= 3) or (self._jtag_version >= min_version) - + @property def vendor_name(self): return self._device.vendor_name @@ -222,11 +222,11 @@ def version_str(self): @property def target_voltage(self): return self._target_voltage - + @property def supports_banked_dp(self): """! @brief Whether the firmware version supports accessing banked DP registers. - + This property is not valid until the connection is opened. """ return self._jtag_version >= self.MIN_JTAG_VERSION_DPBANKSEL[self._hw_version] @@ -270,36 +270,36 @@ def set_jtag_frequency(self, freq=1120000): self._check_status(response) return raise exceptions.ProbeError("Selected JTAG frequency is too low") - + def get_com_frequencies(self, protocol): assert self._hw_version >= 3 - + with self._lock: cmd = [Commands.JTAG_COMMAND, Commands.GET_COM_FREQ, protocol.value - 1] response = self._device.transfer(cmd, readSize=52) self._check_status(response[0:2]) - + freqs = conversion.byte_list_to_u32le_list(response[4:52]) currentFreq = freqs.pop(0) freqCount = freqs.pop(0) return currentFreq, freqs[:freqCount] - + def set_com_frequency(self, protocol, freq): assert self._hw_version >= 3 - + with self._lock: cmd = [Commands.JTAG_COMMAND, Commands.SET_COM_FREQ, protocol.value - 1, 0] cmd.extend(conversion.u32le_list_to_byte_list([freq // 1000])) response = self._device.transfer(cmd, readSize=8) self._check_status(response[0:2]) - + freqs = conversion.byte_list_to_u32le_list(response[4:8]) return freqs[0] def enter_debug(self, protocol): with self._lock: self.enter_idle() - + if protocol == self.Protocol.SWD: protocolParam = Commands.JTAG_ENTER_SWD elif protocol == self.Protocol.JTAG: @@ -309,7 +309,7 @@ def enter_debug(self, protocol): response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_ENTER2, protocolParam, 0], readSize=2) self._check_status(response) self._protocol = protocol - + def open_ap(self, apsel): with self._lock: if not self._check_version(self.MIN_JTAG_VERSION_MULTI_AP): @@ -317,7 +317,7 @@ def open_ap(self, apsel): cmd = [Commands.JTAG_COMMAND, Commands.JTAG_INIT_AP, apsel, Commands.JTAG_AP_NO_CORE] response = self._device.transfer(cmd, readSize=2) self._check_status(response) - + def close_ap(self, apsel): with self._lock: if not self._check_version(self.MIN_JTAG_VERSION_MULTI_AP): @@ -330,16 +330,16 @@ def target_reset(self): with self._lock: response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_DRIVE_NRST, Commands.JTAG_DRIVE_NRST_PULSE], readSize=2) self._check_status(response) - + def drive_nreset(self, isAsserted): with self._lock: value = Commands.JTAG_DRIVE_NRST_LOW if isAsserted else Commands.JTAG_DRIVE_NRST_HIGH response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_DRIVE_NRST, value], readSize=2) self._check_status(response) - + def _check_status(self, response): status, = struct.unpack('> 16) == 0, "register address must be 16-bit" - + self._check_dp_bank(port, addr) - + with self._lock: cmd = [Commands.JTAG_COMMAND, Commands.JTAG_READ_DAP_REG] cmd.extend(six.iterbytes(struct.pack('> 16) == 0, "register address must be 16-bit" @@ -501,7 +501,7 @@ def swo_stop(self): cmd = [Commands.JTAG_COMMAND, Commands.SWV_STOP_TRACE_RECEPTION] response = self._device.transfer(cmd, readSize=2) self._check_status(response) - + def swo_read(self): with self._lock: response = None diff --git a/pyocd/probe/stlink/usb.py b/pyocd/probe/stlink/usb.py index 5f98a60dc..f16a420d6 100644 --- a/pyocd/probe/stlink/usb.py +++ b/pyocd/probe/stlink/usb.py @@ -44,7 +44,7 @@ class STLinkUSBInterface: ## Command packet size. CMD_SIZE = 16 - + ## ST's USB vendor ID USB_VID = 0x0483 @@ -62,23 +62,23 @@ class STLinkUSBInterface: 0x3755: STLinkInfo('V3', 0x01, 0x81, 0x82), 0x3757: STLinkInfo('V3', 0x01, 0x81, 0x82), } - + ## STLink devices only have one USB interface. DEBUG_INTERFACE_NUMBER = 0 - + @classmethod def _usb_match(cls, dev): try: # Check VID/PID. isSTLink = (dev.idVendor == cls.USB_VID) and (dev.idProduct in cls.USB_PID_EP_MAP) - + # Try accessing the current config, which will cause a permission error on Linux. Better # to error out here than later when building the device description. For Windows we # don't need to worry about device permissions, but reading descriptors requires special # handling due to the libusb bug described in __init__(). if isSTLink and platform.system() != "Windows": dev.get_active_configuration() - + return isSTLink except usb.core.USBError as error: if error.errno == errno.EACCES and platform.system() == "Linux" \ @@ -101,7 +101,7 @@ def get_all_connected_devices(cls): except usb.core.NoBackendError: common.show_no_libusb_warning() return [] - + assert devices is not None intfList = [] for dev in devices: @@ -111,7 +111,7 @@ def get_all_connected_devices(cls): except (ValueError, usb.core.USBError, IndexError, NotImplementedError) as error: # Ignore errors that can be raised by libusb, just don't add the device to the list. pass - + return intfList def __init__(self, dev): @@ -142,14 +142,14 @@ def __init__(self, dev): self._product_name = self._dev.product finally: usb.util.dispose_resources(self._dev) - + def open(self): assert self._closed - + # Debug interface is always interface 0, alt setting 0. config = self._dev.get_active_configuration() interface = config[(self.DEBUG_INTERFACE_NUMBER, 0)] - + # Look up endpoint objects. for endpoint in interface: if endpoint.bEndpointAddress == self._info.out_ep: @@ -158,20 +158,20 @@ def open(self): self._ep_in = endpoint elif endpoint.bEndpointAddress == self._info.swv_ep: self._ep_swv = endpoint - + if not self._ep_out: raise exceptions.ProbeError("Unable to find OUT endpoint") if not self._ep_in: raise exceptions.ProbeError("Unable to find IN endpoint") self._max_packet_size = self._ep_in.wMaxPacketSize - + # Claim this interface to prevent other processes from accessing it. usb.util.claim_interface(self._dev, self.DEBUG_INTERFACE_NUMBER) - + self._flush_rx() self._closed = False - + def close(self): assert not self._closed self._closed = True @@ -206,7 +206,7 @@ def max_packet_size(self): def _flush_rx(self): assert self._ep_in - + # Flush the RX buffers by reading until timeout exception try: while True: @@ -230,19 +230,19 @@ def transfer(self, cmd, writeData=None, readSize=None, timeout=1000): assert len(cmd) <= self.CMD_SIZE paddedCmd = bytearray(self.CMD_SIZE) paddedCmd[0:len(cmd)] = cmd - + try: # Command phase. TRACE.debug(" USB CMD> %s" % ' '.join(['%02x' % i for i in paddedCmd])) count = self._ep_out.write(paddedCmd, timeout) assert count == len(paddedCmd) - + # Optional data out phase. if writeData is not None: TRACE.debug(" USB OUT> %s" % ' '.join(['%02x' % i for i in writeData])) count = self._ep_out.write(writeData, timeout) assert count == len(writeData) - + # Optional data in phase. if readSize is not None: TRACE.debug(" USB IN < (%d bytes)" % readSize) @@ -256,7 +256,7 @@ def transfer(self, cmd, writeData=None, readSize=None, timeout=1000): def read_swv(self, size, timeout=1000): assert self._ep_swv return bytearray(self._ep_swv.read(size, timeout)) - + def __repr__(self): return "<{} @ {:#x} vid={:#06x} pid={:#06x} sn={} version={}>".format( self.__class__.__name__, id(self), diff --git a/pyocd/probe/stlink_probe.py b/pyocd/probe/stlink_probe.py index b75dd9b98..3cf4a4a5a 100644 --- a/pyocd/probe/stlink_probe.py +++ b/pyocd/probe/stlink_probe.py @@ -31,12 +31,12 @@ class StlinkProbe(DebugProbe): """! @brief Wraps an STLink as a DebugProbe.""" - + @classmethod def get_all_connected_probes(cls, unique_id: Optional[str] = None, is_explicit: bool = False) -> List["StlinkProbe"]: return [cls(dev) for dev in STLinkUSBInterface.get_all_connected_devices()] - + @classmethod def get_probe_with_id(cls, unique_id: str, is_explicit: bool = False) -> Optional["StlinkProbe"]: for dev in STLinkUSBInterface.get_all_connected_devices(): @@ -54,7 +54,7 @@ def __init__(self, device: STLinkUSBInterface) -> None: self._mbed_info = None self._board_id = self._get_board_id() self._caps = set() - + def _get_board_id(self) -> Optional[str]: # Try to get the board ID first by sending a command, since it is much faster. This requires # opening the USB device, however, and requires a recent STLink firmware version. @@ -66,7 +66,7 @@ def _get_board_id(self) -> Optional[str]: for info in detector.list_mbeds(): if info['target_id_usb_id'] == self._link.serial_number: self._mbed_info = info - + # Some STLink probes provide an MSD volume, but not the mbed.htm file. # We can live without the board ID, so just ignore any error. try: @@ -87,11 +87,11 @@ def description(self) -> str: return self.product_name else: return "{0} [{1}]".format(board_info.name, board_info.target) - + @property def vendor_name(self): return self._link.vendor_name - + @property def product_name(self): return self._link.product_name @@ -107,11 +107,11 @@ def unique_id(self): @property def wire_protocol(self): return DebugProbe.Protocol.SWD if self._is_connected else None - + @property def is_open(self): return self._is_open - + @property def capabilities(self): return self._caps @@ -122,11 +122,11 @@ def create_associated_board(self): return MbedBoard(self.session, board_id=self._board_id) else: return None - + def open(self): self._link.open() self._is_open = True - + # Update capabilities. self._caps = { self.Capability.SWO, @@ -135,7 +135,7 @@ def open(self): } if self._link.supports_banked_dp: self._caps.add(self.Capability.BANKED_DP_REGISTERS) - + def close(self): self._link.close() self._is_open = False @@ -151,7 +151,7 @@ def disconnect(self): # TODO Close the APs. When this is attempted, we get an undocumented 0x1d error. Doesn't # seem to be necessary, anyway. self._memory_interfaces = {} - + self._link.enter_idle() self._is_connected = False @@ -168,7 +168,7 @@ def reset(self): def assert_reset(self, asserted): self._link.drive_nreset(asserted) self._nreset_state = asserted - + def is_reset_asserted(self): return self._nreset_state @@ -178,13 +178,13 @@ def flush(self): # ------------------------------------------- # # DAP Access functions # ------------------------------------------- # - + def read_dp(self, addr, now=True): result = self._link.read_dap_register(STLink.DP_PORT, addr) - + def read_dp_result_callback(): return result - + return result if now else read_dp_result_callback def write_dp(self, addr, data): @@ -193,10 +193,10 @@ def write_dp(self, addr, data): def read_ap(self, addr, now=True): apsel = (addr & APSEL) >> APSEL_SHIFT result = self._link.read_dap_register(apsel, addr & 0xffff) - + def read_ap_result_callback(): return result - + return result if now else read_ap_result_callback def write_ap(self, addr, data): @@ -205,10 +205,10 @@ def write_ap(self, addr, data): def read_ap_multiple(self, addr, count=1, now=True): results = [self.read_ap(addr, now=True) for n in range(count)] - + def read_ap_multiple_result_callback(): return results - + return results if now else read_ap_multiple_result_callback def write_ap_multiple(self, addr, values): @@ -237,14 +237,14 @@ def swo_read(self): class STLinkMemoryInterface(MemoryInterface): """! @brief Concrete memory interface for a single AP.""" - + def __init__(self, link, apsel): self._link = link self._apsel = apsel def write_memory(self, addr, data, transfer_size=32): """! @brief Write a single memory location. - + By default the transfer size is a word. """ assert transfer_size in (8, 16, 32) @@ -255,10 +255,10 @@ def write_memory(self, addr, data, transfer_size=32): self._link.write_mem16(addr, conversion.u16le_list_to_byte_list([data]), self._apsel) elif transfer_size == 8: self._link.write_mem8(addr, [data], self._apsel) - + def read_memory(self, addr, transfer_size=32, now=True): """! @brief Read a memory location. - + By default, a word will be read. """ assert transfer_size in (8, 16, 32) @@ -269,7 +269,7 @@ def read_memory(self, addr, transfer_size=32, now=True): result = conversion.byte_list_to_u16le_list(self._link.read_mem16(addr, 2, self._apsel))[0] elif transfer_size == 8: result = self._link.read_mem8(addr, 1, self._apsel)[0] - + def read_callback(): return result return result if now else read_callback @@ -284,18 +284,18 @@ def read_memory_block32(self, addr, size): class StlinkProbePlugin(Plugin): """! @brief Plugin class for StlLinkProbe.""" - + def should_load(self): # TODO only load the plugin when libusb is available return True - + def load(self): return StlinkProbe - + @property def name(self): return "stlink" - + @property def description(self): return "STMicro STLinkV2 and STLinkV3 debug probe" diff --git a/pyocd/probe/swj.py b/pyocd/probe/swj.py index 0e6f22548..6876d6060 100644 --- a/pyocd/probe/swj.py +++ b/pyocd/probe/swj.py @@ -23,10 +23,10 @@ class SWJSequenceSender(object): """! @brief Class to send canned SWJ sequences. - + The primary usage of this class is for sending the SWJ sequences to switch between JTAG and SWD protocols in the Arm ADI SWJ-DP. The select_protocol() method is used for this purpose. - + In addition, there are methods available to send fragments of the various selection sequences. These can be used to put a target is whatever state is required. """ @@ -34,25 +34,25 @@ class SWJSequenceSender(object): def __init__(self, probe, use_dormant): self._probe = probe self._use_dormant = use_dormant - + @property def use_dormant(self): return self._use_dormant - + @use_dormant.setter def use_dormant(self, flag): self._use_dormant = flag def select_protocol(self, protocol): """! @brief Send SWJ sequence to select chosen wire protocol. - + The `use_dormant` property determines whether dormant mode will be used for the protocol selection, or if the deprecated ADIv5.0 SWJ sequences will be used. - + @param self This object. @param protocol One of the @ref pyocd.probe.debug_probe.DebugProbe.Protocol DebugProbe.Protocol enums, except that `DEFAULT` is not acceptable and will cause a ValueError exception to be raised. - + @exception ValueError Request to select the `DEFAULT` protocol. """ # Not all probes support sending SWJ sequences. @@ -65,129 +65,129 @@ def select_protocol(self, protocol): raise ValueError("cannot send SWJ sequence for default protocol") else: assert False, "unhandled protocol %s in SWJSequenceSender" % protocol - + def jtag_enter_test_logic_reset(self): """! @brief Execute at least >5 TCK cycles with TMS high to enter the Test-Logic-Reset state. - + The line_reset() method can be used instead of this method, but takes a little longer to send. """ self._probe.swj_sequence(8, 0xff) - + def line_reset(self): """! @brief Execute a line reset for both SWD and JTAG. - + For JTAG, >=5 TCK cycles with TMS high enters the Test-Logic-Reset state.
For SWD, >=50 cycles with SWDIO high performs a line reset. """ self._probe.swj_sequence(51, 0xffffffffffffff) - + def selection_alert(self): """! @brief Send the dormant selection alert sequence. - + The 128-bit selection alert is prefixed with 8 cycles of SWDIOTMS high. """ self._probe.swj_sequence(136, 0x19bc0ea2e3ddafe986852d956209f392ff) - + def jtag_activation_code(self): """! @brief 4-bit SWDIOTMS cycles low + 8-bit JTAG activation code.""" self._probe.swj_sequence(12, 0x00a0) - + def swd_activation_code(self): """! @brief 4-bit SWDIOTMS cycles low + 8-bit SWD activation code.""" self._probe.swj_sequence(12, 0x01a0) - + def idle_cycles(self, cycles): """! @brief Send SWD idle cycles with SWDIOTMS low.""" self._probe.swj_sequence(cycles, 0) - + def jtag_to_dormant(self): """! @brief Send the JTAG to DS select sequence. - + Sends the recommended 31-bit JTAG-to-DS select sequence of 0x33bbbbba (LSB-first) on SWDIOTMS. See ADIv6 section B5.3.2. - + @note This should be prefixed with at least 5 cycles to put the JTAG TAP in Test-Logic-Reset; see jtag_enter_test_logic_reset(). """ self._probe.swj_sequence(39, 0x33bbbbba) - + def swd_to_dormant(self): """! @brief Send the SWD to DS sequence. - + Sends the 16-bit SWD-to-DS select sequence of 0xe3bc (LSB-first) on SWDIOTMS. See ADIv6 section B5.3.3. - + @note An SWD line reset should prefix this sequence. See line_reset(). """ self._probe.swj_sequence(16, 0xe3bc) - + def dormant_to_swd(self): """! @brief Perform the dormant mode to SWD transition sequence.""" - + # 8 SWDIOTMS cycles high + 128-bit selection alert sequence. self.selection_alert() - + # 4-bit SWDIOTMS cycles low + 8-bit SWD activation code. self.swd_activation_code() - + # SWD line reset (>50 SWDIOTMS cycles high). self.line_reset() - + # >=2 SWDIOTMS cycles low. self.idle_cycles(2) - + def dormant_to_jtag(self): """! @brief Perform the dormant mode to JTAG transition sequence.""" - + # 8 SWDIOTMS cycles high + 128-bit selection alert sequence. self.selection_alert() - + self.jtag_activation_code() - + self.jtag_enter_test_logic_reset() def switch_to_swd(self): """! @brief Send SWJ sequence to select SWD.""" - + # Ensure current debug interface is in reset state. A full line reset is used here instead # of the shorter JTAG TLR to support the case where the device is already in SWD mode. self.line_reset() - + if self._use_dormant: LOG.debug("Sending SWJ sequence to select SWD; using dormant state") - + # Switch from JTAG to dormant, then dormant to SWD. self.jtag_to_dormant() self.dormant_to_swd() else: LOG.debug("Sending deprecated SWJ sequence to select SWD") - + # Execute SWJ-DP Switch Sequence JTAG to SWD (0xE79E) # Change if SWJ-DP uses deprecated switch code (0xEDB6) self._probe.swj_sequence(16, 0xe79e) - + # Enter SWD Line Reset State self.line_reset() # > 50 cycles SWDIO/TMS High self._probe.swj_sequence(8, 0x00) # At least 2 idle cycles (SWDIO/TMS Low) - + def switch_to_jtag(self): """! @brief Send SWJ sequence to select JTAG.""" - + # Ensure current debug interface is in reset state, for either SWD or JTAG. self.line_reset() - + if self._use_dormant: LOG.debug("Sending SWJ sequence to select JTAG ; using dormant state") - + # Switch from SWD to dormant, then dormant to JTAG. self.swd_to_dormant() self.dormant_to_jtag() else: LOG.debug("Sending deprecated SWJ sequence to select JTAG") - + # Execute SWJ-DP Switch Sequence SWD to JTAG (0xE73C) # Change if SWJ-DP uses deprecated switch code (0xAEAE) self._probe.swj_sequence(16, 0xe73c) - + # Ensure JTAG interface is reset self.jtag_enter_test_logic_reset() - + diff --git a/pyocd/probe/tcp_client_probe.py b/pyocd/probe/tcp_client_probe.py index b6cde5497..c0d0f37b0 100644 --- a/pyocd/probe/tcp_client_probe.py +++ b/pyocd/probe/tcp_client_probe.py @@ -32,9 +32,9 @@ class TCPClientProbe(DebugProbe): """! @brief Probe class that connects to a debug probe server.""" - + DEFAULT_PORT = 5555 - + PROTOCOL_VERSION = 1 class StatusCode: @@ -45,7 +45,7 @@ class StatusCode: TRANSFER_ERROR = 10 TRANSFER_TIMEOUT = 11 TRANSFER_FAULT = 12 - + ## Map from status code to exception class. STATUS_CODE_CLASS_MAP = { StatusCode.GENERAL_ERROR: exceptions.Error, @@ -55,7 +55,7 @@ class StatusCode: StatusCode.TRANSFER_TIMEOUT: exceptions.TransferTimeoutError, StatusCode.TRANSFER_FAULT: exceptions.TransferFaultError, } - + @classmethod def _extract_address(cls, unique_id): parts = unique_id.split(':', 1) @@ -64,14 +64,14 @@ def _extract_address(cls, unique_id): else: port = int(parts[1]) return parts[0], port - + @classmethod def get_all_connected_probes(cls, unique_id=None, is_explicit=False): if is_explicit and unique_id is not None: return [cls(unique_id)] else: return [] - + @classmethod def get_probe_with_id(cls, unique_id, is_explicit=False): return cls(unique_id) if is_explicit else None @@ -86,15 +86,15 @@ def __init__(self, unique_id): self._request_id = 0 self._lock_count = 0 self._lock_count_lock = threading.RLock() - + @property def vendor_name(self): return self._read_property('vendor_name', "vendor") - + @property def product_name(self): return self._read_property('product_name', "product") - + @property def supported_wire_protocols(self): return self._read_property('supported_wire_protocols') @@ -106,27 +106,27 @@ def unique_id(self): @property def wire_protocol(self): return self._read_property('wire_protocol') - + @property def is_open(self): return self._is_open - + @property def capabilities(self): return self._read_property('capabilities') - + @property def request_id(self): """! @brief Generate a new request ID.""" rid = self._request_id self._request_id += 1 return rid - + def _perform_request(self, request, *args): """! Execute a request-reply transaction with the server. Request: - + ```` { "id": , @@ -136,7 +136,7 @@ def _perform_request(self, request, *args): ```` Response: - + ```` { "id": , @@ -156,19 +156,19 @@ def _perform_request(self, request, *args): rq["arguments"] = args formatted_request = json.dumps(rq) TRACE.debug("Request: %s", formatted_request) - + # Send request to server. self._socket.write(formatted_request.encode('utf-8') + b"\n") - + # Read response. response_data = self._socket.readline().decode('utf-8').strip() decoded_response = json.loads(response_data) TRACE.debug("decoded_response = %s", decoded_response) - + # Check for required keys. if ('id' not in decoded_response) or ('status' not in decoded_response): raise exceptions.ProbeError("malformed response from server; missing required field") - + # Check response status. status = decoded_response['status'] if status != 0: @@ -176,18 +176,18 @@ def _perform_request(self, request, *args): error = decoded_response.get('error', "(missing error message key)") LOG.debug("error received from server for command %s (status code %i): %s", request, status, error) - + # Create an appropriate local exception based on the status code. exc = self._create_exception_from_status_code(status, "error received from server for command %s (status code %i): %s" % (request, status, error)) raise exc - + # Get response value. If not present then there was no return value from the command result = decoded_response.get('result', None) - + return result - + def _create_exception_from_status_code(self, status, message): """! @brief Convert a status code into an exception instance.""" # Other status codes can use the map. @@ -212,25 +212,25 @@ def open(self): self._socket.connect() self._is_open = True self._socket.set_timeout(0.1) - + # Send hello message. self._perform_request('hello', self.PROTOCOL_VERSION) - + self._perform_request('open') - + def close(self): if self._is_open: self._perform_request('close') self._socket.close() self._is_open = False - + def lock(self): # The lock count is then used to only send the remote lock request once. with self._lock_count_lock: if self._lock_count == 0: self._perform_request('lock') self._lock_count += 1 - + def unlock(self): # The remote unlock request is only sent when the outermost nested locking is unlocked. with self._lock_count_lock: @@ -265,7 +265,7 @@ def reset(self): def assert_reset(self, asserted): self._perform_request('assert_reset', asserted) - + def is_reset_asserted(self): return self._perform_request('is_reset_asserted') @@ -279,11 +279,11 @@ def flush(self): def read_dp(self, addr, now=True): result = self._perform_request('read_dp', addr) - + def read_dp_cb(): # TODO need to raise any exception from here return result - + return result if now else read_dp_cb def write_dp(self, addr, data): @@ -291,11 +291,11 @@ def write_dp(self, addr, data): def read_ap(self, addr, now=True): result = self._perform_request('read_ap', addr) - + def read_ap_cb(): # TODO need to raise any exception from here return result - + return result if now else read_ap_cb def write_ap(self, addr, data): @@ -303,23 +303,23 @@ def write_ap(self, addr, data): def read_ap_multiple(self, addr, count=1, now=True): results = self._perform_request('read_ap_multiple', addr, count) - + def read_ap_multiple_cb(): # TODO need to raise any exception from here return results - + return results if now else read_ap_multiple_cb def write_ap_multiple(self, addr, values): self._perform_request('write_ap_multiple', addr, values) - + def get_memory_interface_for_ap(self, ap_address): handle = self._perform_request('get_memory_interface_for_ap', ap_address.ap_version.value, ap_address.nominal_address) if handle is None: return None return RemoteMemoryInterface(self, handle) - + ##@} ## @name SWO @@ -338,10 +338,10 @@ def swo_read(self): return self._perform_request('swo_read') ##@} - + class RemoteMemoryInterface(MemoryInterface): """! @brief Local proxy for a remote memory interface.""" - + def __init__(self, remote_probe, handle): self._remote_probe = remote_probe self._handle = handle @@ -349,11 +349,11 @@ def __init__(self, remote_probe, handle): def write_memory(self, addr, data, transfer_size=32): assert transfer_size in (8, 16, 32) self._remote_probe._perform_request('write_mem', self._handle, addr, data, transfer_size) - + def read_memory(self, addr, transfer_size=32, now=True): assert transfer_size in (8, 16, 32) result = self._remote_probe._perform_request('read_mem', self._handle, addr, transfer_size) - + def read_callback(): return result return result if now else read_callback @@ -372,14 +372,14 @@ def read_memory_block8(self, addr, size): class TCPClientProbePlugin(Plugin): """! @brief Plugin class for TCPClientProbePlugin.""" - + def load(self): return TCPClientProbe - + @property def name(self): return "remote" - + @property def description(self): return "Client for the pyOCD debug probe server" diff --git a/pyocd/probe/tcp_probe_server.py b/pyocd/probe/tcp_probe_server.py index f6de80f5f..62f84f774 100644 --- a/pyocd/probe/tcp_probe_server.py +++ b/pyocd/probe/tcp_probe_server.py @@ -33,14 +33,14 @@ class DebugProbeServer(threading.Thread): """! @brief Shares a debug probe over a TCP server. - + When the start() method is called, a new daemon thread is created to run the server. The server can be terminated by calling the stop() method, which will also kill the server thread. """ - + def __init__(self, session, probe, port=None, serve_local_only=None): """! @brief Constructor. - + @param self The object. @param session A @ref pyocd.core.session.Session "Session" object. Does not need to have a probe assigned to it. @@ -55,75 +55,75 @@ def __init__(self, session, probe, port=None, serve_local_only=None): localhost. If not specified (set to None), then the 'serve_local_only' session option is used. """ super(DebugProbeServer, self).__init__() - + # Configure the server thread. self.name = "debug probe %s server" % probe.unique_id self.daemon = True - + # Init instance variables. self._session = session self._probe = probe self._is_running = False - + # Make sure we have a shared proxy for the probe. if isinstance(probe, SharedDebugProbeProxy): self._proxy = probe else: self._proxy = SharedDebugProbeProxy(probe) - + # Get the port from options if not specified. if port is None: self._port = session.options.get('probeserver.port') else: self._port = port - + # Default to the serve_local_only session option. if serve_local_only is None: serve_local_only = session.options.get('serve_local_only') - + host = 'localhost' if serve_local_only else '' address = (host, self._port) - + # Create the server and bind to the address, but don't start running yet. self._server = TCPProbeServer(address, session, self._proxy) self._server.server_bind() - + def start(self): """! @brief Start the server thread and begin listening.""" self._server.server_activate() super(DebugProbeServer, self).start() - + def stop(self): """! @brief Shut down the server. - + Any open connections will be forcibly closed. This function does not return until the server thread has exited. """ self._server.shutdown() self.join() - + @property def is_running(self): """! @brief Whether the server thread is running.""" return self._is_running - + @property def port(self): """! @brief The server's port. - + If port 0 was specified in the constructor, then, after start() is called, this will reflect the actual port on which the server is listening. """ return self._port - + def run(self): """! @brief The server thread implementation.""" self._is_running = True - + # Read back the actual port if 0 was specified. if self._port == 0: self._port = self._server.socket.getsockname()[1] - + LOG.info("Serving debug probe %s (%s) on port %i", self._probe.description, self._probe.unique_id, self._port) self._server.serve_forever() @@ -131,24 +131,24 @@ def run(self): class TCPProbeServer(ThreadingTCPServer): """! @brief TCP server subclass that carries the session and probe being served.""" - + # Change the default SO_REUSEADDR setting. allow_reuse_address = True - + def __init__(self, server_address, session, probe): self._session = session self._probe = probe ThreadingTCPServer.__init__(self, server_address, DebugProbeRequestHandler, bind_and_activate=False) - + @property def session(self): return self._session - + @property def probe(self): return self._probe - + def handle_error(self, request, client_address): LOG.error("Error while handling client request (client address %s):", client_address, exc_info=self._session.log_tracebacks) @@ -156,7 +156,7 @@ def handle_error(self, request, client_address): class DebugProbeRequestHandler(StreamRequestHandler): """! @brief Probe server request handler. - + This class implements the server side for the remote probe protocol. request: @@ -178,10 +178,10 @@ class DebugProbeRequestHandler(StreamRequestHandler): } ```` """ - + ## Current version of the remote probe protocol. PROTOCOL_VERSION = 1 - + class StatusCode: """! @brief Constants for errors reported from the server.""" GENERAL_ERROR = 1 @@ -198,22 +198,22 @@ def setup(self): self._client_domain = info[0] except socket.herror: self._client_domain = self.client_address[0] - + LOG.info("Remote probe client connected (%s from port %i)", self._client_domain, self.client_address[1]) - + # Get the session and probe we're serving from the server. self._session = self.server.session self._probe = self.server.probe - + # Give the probe a session if it doesn't have one, in case it needs to access settings. # TODO: create a session proxy so client-side options can be accessed if self._probe.session is None: self._probe.session = self._session - + # Dict to store handles for AP memory interfaces. self._next_ap_memif_handle = 0 self._ap_memif_handles = {} - + # Create the request handlers dict here so we can reference bound probe methods. self._REQUEST_HANDLERS = { # Command Handler Arg count @@ -250,17 +250,17 @@ def setup(self): 'read_block8': (self._request__read_block8, 3 ), # 'read_block8', handle:int, addr:int, word_count:int -> List[int] 'write_block8': (self._request__write_block8, 3 ), # 'write_block8', handle:int, addr:int, data:List[int] } - + # Let superclass do its thing. (Can't use super() here because the superclass isn't derived # from object in Py2.) StreamRequestHandler.setup(self) - + def finish(self): LOG.info("Remote probe client disconnected (%s from port %i)", self._client_domain, self.client_address[1]) - + self._session = None StreamRequestHandler.finish(self) - + def _send_error_response(self, status=1, message=""): response_dict = { "id": self._current_request_id, @@ -271,7 +271,7 @@ def _send_error_response(self, status=1, message=""): TRACE.debug("response: %s", response) response_encoded = response.encode('utf-8') self.wfile.write(response_encoded + b"\n") - + def _send_response(self, result): response_dict = { "id": self._current_request_id, @@ -283,7 +283,7 @@ def _send_response(self, result): TRACE.debug("response: %s", response) response_encoded = response.encode('utf-8') self.wfile.write(response_encoded + b"\n") - + def handle(self): # Process requests until the connection is closed. while True: @@ -291,48 +291,48 @@ def handle(self): request = None request_dict = None self._current_request_id = -1 - + # Read request line. request = self.rfile.readline() TRACE.debug("request: %s", request) if len(request) == 0: LOG.debug("empty request, closing connection") return - + try: request_dict = json.loads(request) except json.JSONDecodeError: self._send_error_response(message="invalid request format") continue - + if not isinstance(request_dict, dict): self._send_error_response(message="invalid request format") continue - + if 'id' not in request_dict: self._send_error_response(message="missing request ID") continue self._current_request_id = request_dict['id'] - + if 'request' not in request_dict: self._send_error_response(message="missing request field") continue request_type = request_dict['request'] - + # Get arguments. If the key isn't present then there are no arguments. request_args = request_dict.get('arguments', []) - + if not isinstance(request_args, list): self._send_error_response(message="invalid request arguments format") continue - + if request_type not in self._REQUEST_HANDLERS: self._send_error_response(message="unknown request type") continue handler, arg_count = self._REQUEST_HANDLERS[request_type] self._check_args(request_args, arg_count) result = handler(*request_args) - + # Send a success response. self._send_response(result) # Catch all exceptions so that an error response can be returned, to not leave the client hanging. @@ -349,7 +349,7 @@ def handle(self): # Reraise non-pyocd errors. if not isinstance(err, exceptions.Error): raise - + def _get_exception_status_code(self, err): """! @brief Convert an exception class into a status code.""" # Must test the exception class in order of specific to general. @@ -365,17 +365,17 @@ def _get_exception_status_code(self, err): return self.StatusCode.TRANSFER_ERROR else: return self.StatusCode.GENERAL_ERROR - + def _check_args(self, args, count): if len(args) != count: raise exceptions.Error("malformed request; invalid number of arguments") - + def _request__hello(self, version): # 'hello', protocol-version:int if version != self.PROTOCOL_VERSION: raise exceptions.Error("client requested unsupported protocol version %i (expected %i)" % (version, self.PROTOCOL_VERSION)) - + def _request__read_property(self, name): # 'readprop', name:str if not hasattr(self._probe, name): @@ -385,7 +385,7 @@ def _request__read_property(self, name): if name in self._PROPERTY_CONVERTERS: value = self._PROPERTY_CONVERTERS[name](value) return value - + def _request__connect(self, protocol_name): # 'connect', protocol:str try: @@ -393,7 +393,7 @@ def _request__connect(self, protocol_name): except KeyError: raise exceptions.Error("invalid protocol name %s" % protocol_name) self._probe.connect(protocol) - + def _request__get_memory_interface_for_ap(self, ap_address_version, ap_nominal_address): # 'get_memory_interface_for_ap', ap_address_version:int, ap_nominal_address:int -> handle:int|null ap_version = APVersion(ap_address_version) @@ -413,7 +413,7 @@ def _request__get_memory_interface_for_ap(self, ap_address_version, ap_nominal_a else: handle = None return handle - + def _request__swo_read(self): return list(self._probe.swo_read()) @@ -422,34 +422,34 @@ def _request__read_mem(self, handle, addr, xfer_size): if handle not in self._ap_memif_handles: raise exceptions.Error("invalid handle received from remote memory access") return self._ap_memif_handles[handle].read_memory(addr, xfer_size, now=True) - + def _request__write_mem(self, handle, addr, value, xfer_size): # 'write_mem', handle:int, addr:int, value:int, xfer_size:int if handle not in self._ap_memif_handles: raise exceptions.Error("invalid handle received from remote memory access") self._ap_memif_handles[handle].write_memory(addr, value, xfer_size) - + def _request__read_block32(self, handle, addr, word_count): # 'read_block32', handle:int, addr:int, word_count:int -> List[int] # TODO use base64 data if handle not in self._ap_memif_handles: raise exceptions.Error("invalid handle received from remote memory access") return self._ap_memif_handles[handle].read_memory_block32(addr, word_count) - + def _request__write_block32(self, handle, addr, data): # 'write_block32', handle:int, addr:int, data:List[int] # TODO use base64 data if handle not in self._ap_memif_handles: raise exceptions.Error("invalid handle received from remote memory access") self._ap_memif_handles[handle].write_memory_block32(addr, data) - + def _request__read_block8(self, handle, addr, word_count): # 'read_block8', handle:int, addr:int, word_count:int -> List[int] # TODO use base64 data if handle not in self._ap_memif_handles: raise exceptions.Error("invalid handle received from remote memory access") return self._ap_memif_handles[handle].read_memory_block8(addr, word_count) - + def _request__write_block8(self, handle, addr, data): # 'write_block8', handle:int, addr:int, data:List[int] # TODO use base64 data diff --git a/pyocd/rtos/argon.py b/pyocd/rtos/argon.py index 742e79791..263c9fa1e 100644 --- a/pyocd/rtos/argon.py +++ b/pyocd/rtos/argon.py @@ -69,7 +69,7 @@ def __iter__(self): class ArgonThreadContext(DebugContext): """! @brief Thread context for Argon.""" - + # SP is handled specially, so it is not in these dicts. CORE_REGISTER_OFFSETS = { @@ -190,7 +190,7 @@ def read_core_registers_raw(self, reg_list): # vector catch, so retrieve LR stored by OS on last # thread switch. hasExtendedFrame = self._thread.has_extended_frame - + if hasExtendedFrame: table = self.FPU_EXTENDED_REGISTER_OFFSETS hwStacked = 0x68 @@ -449,30 +449,30 @@ class ArgonTraceEvent(events.TraceEvent): kArTraceThreadSwitch = 1 # 2 value: 0=previous thread's new state, 1=new thread id kArTraceThreadCreated = 2 # 1 value kArTraceThreadDeleted = 3 # 1 value - + def __init__(self, eventID, threadID, name, state, ts=0): super(ArgonTraceEvent, self).__init__("argon", ts) self._event_id = eventID self._thread_id = threadID self._thread_name = name self._prev_thread_state = state - + @property def event_id(self): return self._event_id - + @property def thread_id(self): return self._thread_id - + @property def thread_name(self): return self._thread_name - + @property def prev_thread_state(self): return self._prev_thread_state - + def __str__(self): if self.event_id == ArgonTraceEvent.kArTraceThreadSwitch: stateName = ArgonThread.STATE_NAMES.get(self.prev_thread_state, "") @@ -487,7 +487,7 @@ def __str__(self): class ArgonTraceEventFilter(TraceEventFilter): """! @brief Trace event filter to identify Argon kernel trace events sent via ITM. - + As Argon kernel trace events are identified, the ITM trace events are replaced with instances of ArgonTraceEvent. """ @@ -496,7 +496,7 @@ def __init__(self, threads): self._threads = threads self._is_thread_event_pending = False self._pending_event = None - + def filter(self, event): if isinstance(event, events.TraceITMEvent): if event.port == 31: @@ -511,25 +511,25 @@ def filter(self, event): threadID = event.data name = self._threads.get(threadID, "") state = self._pending_event.data & 0x00ffffff - + # Create the Argon event. event = ArgonTraceEvent(eventID, threadID, name, state, self._pending_event.timestamp) self._is_thread_event_pending = False self._pending_event = None - return event + return event class ArgonPlugin(Plugin): """! @brief Plugin class for the Argon RTOS.""" - + def load(self): return ArgonThreadProvider - + @property def name(self): return "argon" - + @property def description(self): return "Argon RTOS" diff --git a/pyocd/rtos/common.py b/pyocd/rtos/common.py index 998489c2a..31c6d4297 100644 --- a/pyocd/rtos/common.py +++ b/pyocd/rtos/common.py @@ -65,7 +65,7 @@ class HandlerModeThread(TargetThread): """! @brief Class representing the handler mode.""" UNIQUE_ID = 2 - + def __init__(self, targetContext, provider): super(HandlerModeThread, self).__init__() self._target_context = targetContext diff --git a/pyocd/rtos/freertos.py b/pyocd/rtos/freertos.py index d9a70d7c5..621a68ac7 100644 --- a/pyocd/rtos/freertos.py +++ b/pyocd/rtos/freertos.py @@ -67,7 +67,7 @@ def __iter__(self): class FreeRTOSThreadContext(DebugContext): """! @brief Thread context for FreeRTOS.""" - + # SP/PSP are handled specially, so it is not in these dicts. COMMON_REGISTER_OFFSETS = { @@ -535,14 +535,14 @@ def _get_elf_symbol_size(self, name, addr, calculated_size): class FreeRTOSPlugin(Plugin): """! @brief Plugin class for FreeRTOS.""" - + def load(self): return FreeRTOSThreadProvider - + @property def name(self): return "freertos" - + @property def description(self): return "FreeRTOS" diff --git a/pyocd/rtos/rtx5.py b/pyocd/rtos/rtx5.py index 7b6fe3907..5e7482c0b 100644 --- a/pyocd/rtos/rtx5.py +++ b/pyocd/rtos/rtx5.py @@ -48,7 +48,7 @@ def __iter__(self): class RTXThreadContext(DebugContext): """! @brief Thread context for RTX5.""" - + # SP/PSP are handled specially, so it is not in these dicts. # Offsets are relative to stored SP in a task switch block, for the @@ -234,7 +234,7 @@ class RTXTargetThread(TargetThread): 0x83: "Waiting[MsgGet]", 0x93: "Waiting[MsgPut]", } - + def __init__(self, targetContext, provider, base): super(RTXTargetThread, self).__init__() self._target_context = targetContext @@ -247,13 +247,13 @@ def __init__(self, targetContext, provider, base): try: name_ptr = self._target_context.read32(self._base + RTXTargetThread.NAME_OFFSET) self._name = read_c_string(self._target_context, name_ptr) - + self.update_state() except exceptions.TransferError as exc: LOG.debug("Transfer error while reading thread %x name: %s", self._base, exc) self._name = "?" LOG.debug('RTXTargetThread 0x%x' % base) - + def update_state(self): try: state = self._target_context.read8(self._base + RTXTargetThread.STATE_OFFSET) @@ -353,7 +353,7 @@ def event_handler(self, notification): def _build_thread_list(self): newThreads = {} - + def create_or_update(thread): # Check for and reuse existing thread. if thread in self._threads: @@ -390,7 +390,7 @@ def create_or_update(thread): # Create fake handler mode thread. if self._target_context.read_core_register('ipsr') > 0: newThreads[HandlerModeThread.UNIQUE_ID] = HandlerModeThread(self._target_context, self) - + self._threads = newThreads def get_thread(self, threadId): @@ -448,14 +448,14 @@ def get_kernel_state(self): class RTX5Plugin(Plugin): """! @brief Plugin class for the RTX5 RTOS.""" - + def load(self): return RTX5ThreadProvider - + @property def name(self): return "rtx5" - + @property def description(self): return "RTX5" diff --git a/pyocd/rtos/zephyr.py b/pyocd/rtos/zephyr.py index 009e0afb2..a1f02f699 100644 --- a/pyocd/rtos/zephyr.py +++ b/pyocd/rtos/zephyr.py @@ -47,7 +47,7 @@ def __iter__(self): class ZephyrThreadContext(DebugContext): """! @brief Thread context for Zephyr.""" - + STACK_FRAME_OFFSETS = { 0: 0, # r0 1: 4, # r1 @@ -417,14 +417,14 @@ def version(self): class ZephyrPlugin(Plugin): """! @brief Plugin class for the Zephyr RTOS.""" - + def load(self): return ZephyrThreadProvider - + @property def name(self): return "zephyr" - + @property def description(self): return "Zephyr" diff --git a/pyocd/subcommands/base.py b/pyocd/subcommands/base.py index 6959f8761..f0c55e6c2 100644 --- a/pyocd/subcommands/base.py +++ b/pyocd/subcommands/base.py @@ -36,7 +36,7 @@ class SubcommandBase: class CommonOptions: """! @brief Namespace with parsers for repeated option groups.""" - + # Define logging related options. LOGGING = argparse.ArgumentParser(description='logging', add_help=False) LOGGING_GROUP = LOGGING.add_argument_group("logging") @@ -48,7 +48,7 @@ class CommonOptions: help="Set log level of loggers whose name matches any of the comma-separated list of glob-style " "patterns. Log level must be one of (critical, error, warning, info, debug). Can be " "specified multiple times. Example: -L*.trace,pyocd.core.*=debug") - + # Define config related options for all subcommands. CONFIG = argparse.ArgumentParser(description='common', add_help=False) CONFIG_GROUP = CONFIG.add_argument_group("configuration") @@ -66,11 +66,11 @@ class CommonOptions: help="(Deprecated) Send setting to DAPAccess layer.") CONFIG_GROUP.add_argument("--pack", metavar="PATH", action="append", help="Path to the .pack file for a CMSIS Device Family Pack.") - + # Define common options for all subcommands, including logging options. COMMON = argparse.ArgumentParser(description='common', parents=[LOGGING, CONFIG], add_help=False) - + # Common connection related options. CONNECT = argparse.ArgumentParser(description='common', add_help=False) CONNECT_GROUP = CONNECT.add_argument_group("connection") @@ -89,7 +89,7 @@ class CommonOptions: help="Do not wait for a probe to be connected if none are available.") CONNECT_GROUP.add_argument("-M", "--connect", dest="connect_mode", metavar="MODE", help="Select connect mode from one of (halt, pre-reset, under-reset, attach).") - + @classmethod def add_subcommands(cls, parser: argparse.ArgumentParser) -> None: """! @brief Add declared subcommands to the given parser.""" @@ -98,7 +98,7 @@ def add_subcommands(cls, parser: argparse.ArgumentParser) -> None: for subcmd_class in cls.SUBCOMMANDS: parsers = subcmd_class.get_args() subcmd_class.parser = parsers[-1] - + subparser = subparsers.add_parser( subcmd_class.NAMES[0], aliases=subcmd_class.NAMES[1:], @@ -120,15 +120,15 @@ class itself, as it is saved by the caller in cls.parser. def customize_subparser(cls, subparser: argparse.ArgumentParser) -> None: """! @brief Optionally modify a subparser after it is created.""" pass - + def __init__(self, args: argparse.Namespace): """! @brief Constructor. - + @param self This object. @param args Namespace of parsed argument values. """ self._args = args - + def invoke(self) -> int: """! @brief Run the subcommand. @return Process status code for the command. @@ -166,5 +166,5 @@ def _get_pretty_table(self, fields: List[str], header: bool = None) -> prettytab pt.hrules = prettytable.HEADER pt.vrules = prettytable.NONE return pt - + diff --git a/pyocd/subcommands/commander_cmd.py b/pyocd/subcommands/commander_cmd.py index 7e2ed6799..44c68ce16 100644 --- a/pyocd/subcommands/commander_cmd.py +++ b/pyocd/subcommands/commander_cmd.py @@ -27,7 +27,7 @@ class CommanderSubcommand(SubcommandBase): """! @brief `pyocd commander` subcommand.""" - + NAMES = ['commander', 'cmd'] HELP = "Interactive command console." DEFAULT_LOG_LEVEL = logging.WARNING @@ -46,9 +46,9 @@ def get_args(cls) -> List[argparse.ArgumentParser]: help="Optionally specify ELF file being debugged.") commander_options.add_argument("-c", "--command", dest="commands", metavar="CMD", action='append', nargs='+', help="Run commands.") - + return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser] - + def invoke(self) -> int: """! @brief Handle 'commander' subcommand.""" # Flatten commands list then extract primary command and its arguments. diff --git a/pyocd/subcommands/erase_cmd.py b/pyocd/subcommands/erase_cmd.py index 1c90d53e1..26603bcfb 100644 --- a/pyocd/subcommands/erase_cmd.py +++ b/pyocd/subcommands/erase_cmd.py @@ -30,7 +30,7 @@ class EraseSubcommand(SubcommandBase): """! @brief `pyocd erase` subcommand.""" - + NAMES = ['erase'] HELP = "Erase entire device flash or specified sectors." EPILOG = ("If no position arguments are listed, then no action will be taken unless the --chip or " @@ -43,7 +43,7 @@ class EraseSubcommand(SubcommandBase): "0x800-0x2000 (erase sectors starting at 0x800 up to but not including 0x2000) " "0+8192 (erase 8 kB starting at address 0)") DEFAULT_LOG_LEVEL = logging.WARNING - + @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: """! @brief Add this subcommand to the subparsers object.""" @@ -59,13 +59,13 @@ def get_args(cls) -> List[argparse.ArgumentParser]: erase_parser.add_argument("addresses", metavar="", action='append', nargs='*', help="List of sector addresses or ranges to erase.") - + return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, erase_parser] - + def invoke(self) -> int: """! @brief Handle 'erase' subcommand.""" self._increase_logging(["pyocd.flash.eraser"]) - + # Display a nice, helpful error describing why nothing was done and how to correct it. if (self._args.erase_mode is None) or not self._args.addresses: LOG.error("No erase operation specified. Please specify one of '--chip', '--sector', " @@ -73,7 +73,7 @@ def invoke(self) -> int: "of sector addresses to erase must be provided. " "See 'pyocd erase --help' for more.") return 1 - + session = ConnectHelper.session_with_chosen_probe( project_dir=self._args.project_dir, config_file=self._args.config, @@ -92,7 +92,7 @@ def invoke(self) -> int: with session: mode = self._args.erase_mode or FlashEraser.Mode.SECTOR eraser = FlashEraser(session, mode) - + addresses = flatten_args(self._args.addresses) eraser.erase(addresses) diff --git a/pyocd/subcommands/gdbserver_cmd.py b/pyocd/subcommands/gdbserver_cmd.py index 19a4002b7..3a481d723 100644 --- a/pyocd/subcommands/gdbserver_cmd.py +++ b/pyocd/subcommands/gdbserver_cmd.py @@ -38,10 +38,10 @@ class GdbserverSubcommand(SubcommandBase): """! @brief `pyocd gdbserver` subcommand.""" - + NAMES = ['gdbserver', 'gdb'] HELP = "Run the gdb remote server(s)." - + ## @brief Valid erase mode options. ERASE_OPTIONS = [ 'auto', @@ -89,14 +89,14 @@ def get_args(cls) -> List[argparse.ArgumentParser]: help="Allow single stepping to step into interrupts.") gdbserver_options.add_argument("-c", "--command", dest="commands", metavar="CMD", action='append', nargs='+', help="Run command (OpenOCD compatibility).") - + return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, gdbserver_parser] - + def __init__(self, args: argparse.Namespace): """! @brief Constructor.""" super().__init__(args) self._echo_msg = None - + def _process_commands(self, commands: Optional[List[str]]): """! @brief Handle OpenOCD commands for compatibility.""" if commands is None: @@ -127,7 +127,7 @@ def _gdbserver_listening_cb(self, note: Notification): if self._echo_msg is not None: print(self._echo_msg, file=sys.stderr) sys.stderr.flush() - + def invoke(self) -> int: """! @brief Handle 'gdbserver' subcommand.""" self._process_commands(self._args.commands) @@ -148,7 +148,7 @@ def invoke(self) -> int: 'serve_local_only' : self._args.serve_local_only, 'vector_catch' : self._args.vector_catch, }) - + # Split list of cores to serve. if self._args.core is not None: try: @@ -158,7 +158,7 @@ def invoke(self) -> int: return 1 else: core_list = None - + # Get the probe. probe = ConnectHelper.choose_probe( blocking=(not self._args.no_wait), @@ -168,10 +168,10 @@ def invoke(self) -> int: if probe is None: LOG.error("No probe selected.") return 1 - + # Create a proxy so the probe can be shared between the session and probe server. probe_proxy = SharedDebugProbeProxy(probe) - + # Create the session. session = Session(probe_proxy, project_dir=self._args.project_dir, @@ -198,21 +198,21 @@ def invoke(self) -> int: "s" if len(bad_cores) > 1 else "", ", ".join(str(x) for x in bad_cores)) return 1 - + # Set ELF if provided. if self._args.elf: session.board.target.elf = os.path.expanduser(self._args.elf) - + # Run the probe server is requested. if self._args.enable_probe_server: probe_server = DebugProbeServer(session, session.probe, self._args.probe_server_port, self._args.serve_local_only) session.probeserver = probe_server probe_server.start() - + # Start up the gdbservers. for core_number, core in session.board.target.cores.items(): - # Don't create a server for CPU-less memory Access Port. + # Don't create a server for CPU-less memory Access Port. if isinstance(session.board.target.cores[core_number], GenericMemAPTarget): continue # Don't create a server if this core is not listed by the user. diff --git a/pyocd/subcommands/json_cmd.py b/pyocd/subcommands/json_cmd.py index d2697af50..cefb9a17d 100644 --- a/pyocd/subcommands/json_cmd.py +++ b/pyocd/subcommands/json_cmd.py @@ -30,7 +30,7 @@ class JsonSubcommand(SubcommandBase): """! @brief `pyocd json` subcommand.""" - + NAMES = ['json'] HELP = "Output information as JSON." DEFAULT_LOG_LEVEL = logging.FATAL + 1 @@ -51,26 +51,26 @@ def get_args(cls) -> List[argparse.ArgumentParser]: help="List available features and options.") return [cls.CommonOptions.CONFIG, json_parser] - + @classmethod def customize_subparser(cls, subparser: argparse.ArgumentParser) -> None: """! @brief Optionally modify a subparser after it is created.""" subparser.set_defaults(verbose=0, quiet=0) - + def __init__(self, args: argparse.Namespace): super().__init__(args) - + # Disable all logging. logging.disable(logging.CRITICAL) - + def invoke(self) -> int: """! @brief Handle 'json' subcommand.""" all_outputs = (self._args.probes, self._args.targets, self._args.boards, self._args.features) - + # Default to listing probes. if not any(all_outputs): self._args.probes = True - + # Check for more than one output option being selected. if sum(int(x) for x in all_outputs) > 1: # Because we're outputting JSON we can't just log the error, but must report the error @@ -84,7 +84,7 @@ def invoke(self) -> int: print(json.dumps(obj, indent=4)) return 0 - + # Create a session with no device so we load any config. session = Session(None, project_dir=self._args.project_dir, @@ -93,7 +93,7 @@ def invoke(self) -> int: pack=self._args.pack, **convert_session_options(self._args.options) ) - + if self._args.targets or self._args.boards: # Create targets from provided CMSIS pack. if session.options['pack'] is not None: diff --git a/pyocd/subcommands/list_cmd.py b/pyocd/subcommands/list_cmd.py index aa183e6b2..e5e97e934 100644 --- a/pyocd/subcommands/list_cmd.py +++ b/pyocd/subcommands/list_cmd.py @@ -29,10 +29,10 @@ class ListSubcommand(SubcommandBase): """! @brief `pyocd list` subcommand.""" - + NAMES = ['list'] HELP = "List information about probes, targets, or boards." - + ## @brief Map to convert plugin groups to user friendly names. PLUGIN_GROUP_NAMES = { 'pyocd.probe': "Debug Probe", @@ -43,7 +43,7 @@ class ListSubcommand(SubcommandBase): def get_args(cls) -> List[argparse.ArgumentParser]: """! @brief Add this subcommand to the subparsers object.""" list_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) - + list_output = list_parser.add_argument_group("list output") list_output.add_argument('-p', '--probes', action='store_true', help="List available probes.") @@ -53,7 +53,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: help="List all known boards.") list_output.add_argument('--plugins', action='store_true', help="List available plugins.") - + list_options = list_parser.add_argument_group('list options') list_options.add_argument('-n', '--name', help="Restrict listing to items matching the given name substring. Applies to targets and boards.") @@ -63,23 +63,23 @@ def get_args(cls) -> List[argparse.ArgumentParser]: help="Restrict listing to targets from the specified source. Applies to targets.") list_options.add_argument('-H', '--no-header', action='store_true', help="Don't print a table header.") - + return [cls.CommonOptions.COMMON, list_parser] - + def invoke(self) -> int: """! @brief Handle 'list' subcommand.""" all_outputs = (self._args.probes, self._args.targets, self._args.boards, self._args.plugins) - + # Default to listing probes. if not any(all_outputs): self._args.probes = True - + # Check for more than one output option being selected. if sum(int(x) for x in all_outputs) > 1: LOG.error("Only one of the output options '--probes', '--targets', '--boards', " "or '--plugins' may be selected at a time.") return 1 - + # Create a session with no device so we load any config. session = Session(None, project_dir=self._args.project_dir, @@ -88,7 +88,7 @@ def invoke(self) -> int: pack=self._args.pack, **convert_session_options(self._args.options) ) - + if self._args.probes: ConnectHelper.list_connected_probes() elif self._args.targets: diff --git a/pyocd/subcommands/load_cmd.py b/pyocd/subcommands/load_cmd.py index 3d06390ed..1f60c572d 100644 --- a/pyocd/subcommands/load_cmd.py +++ b/pyocd/subcommands/load_cmd.py @@ -31,12 +31,12 @@ class LoadSubcommand(SubcommandBase): """! @brief `pyocd load` and `flash` subcommand.""" - + NAMES = ['load', 'flash'] HELP = "Load one or more images into target device memory." EPILOG = "Supported file formats are: binary, Intel hex, and ELF32." DEFAULT_LOG_LEVEL = logging.WARNING - + ## @brief Valid erase mode options. ERASE_OPTIONS = [ 'auto', @@ -66,18 +66,18 @@ def get_args(cls) -> List[argparse.ArgumentParser]: parser.add_argument("file", metavar="", nargs="+", help="File to write to memory. Binary files can have an optional base address appended to the file " "name as '@
', for instance 'app.bin@0x20000'.") - + return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, parser] - + def invoke(self) -> int: """! @brief Handle 'load' subcommand.""" self._increase_logging(["pyocd.flash.loader", __name__]) - + # Validate arguments. if (self._args.base_address is not None) and (len(self._args.file) > 1): raise ValueError("--base-address cannot be set when loading more than one file; " "use a base address suffix instead") - + session = ConnectHelper.session_with_chosen_probe( project_dir=self._args.project_dir, config_file=self._args.config, @@ -104,11 +104,11 @@ def invoke(self) -> int: base_address = int_base_0(suffix) else: base_address = self._args.base_address - + # Resolve our path. file_path = Path(filename).expanduser().resolve() filename = str(file_path) - + if base_address is None: LOG.info("Loading %s", filename) else: diff --git a/pyocd/subcommands/pack_cmd.py b/pyocd/subcommands/pack_cmd.py index 650c45357..fabd6738e 100644 --- a/pyocd/subcommands/pack_cmd.py +++ b/pyocd/subcommands/pack_cmd.py @@ -34,14 +34,14 @@ class PackSubcommandBase(SubcommandBase): """! @brief Base class for `pyocd pack` subcommands.""" - + # cmsis_pack_manager.Cache is used in quotes in the return type annotation because it may have # not been imported successfully. def _get_cache(self) -> "cmsis_pack_manager.Cache": """! @brief Handle 'clean' subcommand.""" if not CPM_AVAILABLE: raise exceptions.CommandError("'pack' subcommand is not available because cmsis-pack-manager is not installed") - + verbosity = self._args.verbose - self._args.quiet return cmsis_pack_manager.Cache(verbosity < 0, False) @@ -49,7 +49,7 @@ def _get_matches(self, cache: "cmsis_pack_manager.Cache") -> Set[str]: if not cache.index: LOG.info("No pack index present, downloading now...") cache.cache_descriptors() - + # Find matching part numbers. matches = set() for pattern in self._args.patterns: @@ -57,15 +57,15 @@ def _get_matches(self, cache: "cmsis_pack_manager.Cache") -> Set[str]: pat = re.compile(fnmatch.translate(pattern).rsplit('\\Z')[0], re.IGNORECASE) results = {name for name in cache.index.keys() if pat.search(name)} matches.update(results) - + if not matches: LOG.warning("No matching devices. Please make sure the pack index is up to date."), - + return matches class PackCleanSubcommand(PackSubcommandBase): """! @brief `pyocd pack clean` subcommand.""" - + NAMES = ['clean'] HELP = "Delete the pack index and all installed packs." @@ -74,11 +74,11 @@ def get_args(cls) -> List[argparse.ArgumentParser]: """! @brief Add this subcommand to the subparsers object.""" parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) return [cls.CommonOptions.LOGGING, parser] - + def invoke(self) -> int: """! @brief Handle 'clean' subcommand.""" cache = self._get_cache() - + LOG.info("Removing all pack data...") cache.cache_clean() print() @@ -86,7 +86,7 @@ def invoke(self) -> int: class PackUpdateSubcommand(PackSubcommandBase): """! @brief `pyocd pack update` subcommand.""" - + NAMES = ['update'] HELP = "Update the pack index." @@ -97,17 +97,17 @@ def get_args(cls) -> List[argparse.ArgumentParser]: parser.add_argument("-c", "--clean", action='store_true', help="Erase existing pack information before updating.") - + return [cls.CommonOptions.LOGGING, parser] - + def invoke(self) -> int: """! @brief Handle 'update' subcommand.""" cache = self._get_cache() - + if self._args.clean: LOG.info("Removing all pack data...") cache.cache_clean() - + LOG.info("Updating pack index...") cache.cache_descriptors() print() @@ -115,7 +115,7 @@ def invoke(self) -> int: class PackShowSubcommand(PackSubcommandBase): """! @brief `pyocd pack show` subcommand.""" - + NAMES = ['show'] HELP = "Show the list of installed packs." @@ -123,17 +123,17 @@ class PackShowSubcommand(PackSubcommandBase): def get_args(cls) -> List[argparse.ArgumentParser]: """! @brief Add this subcommand to the subparsers object.""" parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) - + display_options = parser.add_argument_group('display options') display_options.add_argument('-H', '--no-header', action='store_true', help="Don't print a table header.") - + return [cls.CommonOptions.LOGGING, parser] - + def invoke(self) -> int: """! @brief Handle 'show' subcommand.""" cache = self._get_cache() - + packs = pack_target.ManagedPacks.get_installed_packs(cache) pt = self._get_pretty_table(["Pack", "Version"]) for ref in packs: @@ -146,7 +146,7 @@ def invoke(self) -> int: class PackFindSubcommand(PackSubcommandBase): """! @brief `pyocd pack find` subcommand.""" - + NAMES = ['find'] HELP = "Report pack(s) in the index containing matching device part numbers." @@ -160,16 +160,16 @@ def get_args(cls) -> List[argparse.ArgumentParser]: help="Erase existing pack information before updating. Ignored if --update is not specified.") index_options.add_argument("-u", "--update", action='store_true', help="Update the pack index before searching.") - + display_options = parser.add_argument_group('display options') display_options.add_argument('-H', '--no-header', action='store_true', help="Don't print a table header.") - + parser.add_argument("patterns", metavar="", nargs='+', help="Glob-style pattern for matching a target part number.") - + return [cls.CommonOptions.LOGGING, parser] - + def invoke(self) -> int: """! @brief Handle 'find' subcommand.""" cache = self._get_cache() @@ -178,19 +178,19 @@ def invoke(self) -> int: if self._args.clean: LOG.info("Removing all pack data...") cache.cache_clean() - + LOG.info("Updating pack index...") cache.cache_descriptors() print() # Look for matching part numbers. matches = self._get_matches(cache) - + if matches: # Get the list of installed pack targets. installed_targets = pack_target.ManagedPacks.get_installed_targets(cache=cache) installed_target_names = [target.part_number.lower() for target in installed_targets] - + pt = self._get_pretty_table(["Part", "Vendor", "Pack", "Version", "Installed"]) for name in sorted(matches): info = cache.index[name] @@ -203,12 +203,12 @@ def invoke(self) -> int: info['name'].lower() in installed_target_names, ]) print(pt) - + return 0 class PackInstallSubcommand(PackSubcommandBase): """! @brief `pyocd pack install` subcommand.""" - + NAMES = ['install'] HELP = "Download and install pack(s) containing matching device part numbers." @@ -226,12 +226,12 @@ def get_args(cls) -> List[argparse.ArgumentParser]: download_options = parser.add_argument_group('download options') download_options.add_argument("-n", "--no-download", action='store_true', help="Just list the pack(s) that would be downloaded, don't actually download anything.") - + parser.add_argument("patterns", metavar="", nargs="+", help="Glob-style pattern for matching a target part number.") - + return [cls.CommonOptions.LOGGING, parser] - + def invoke(self) -> int: """! @brief Handle 'find' subcommand.""" cache = self._get_cache() @@ -240,14 +240,14 @@ def invoke(self) -> int: if self._args.clean: LOG.info("Removing all pack data...") cache.cache_clean() - + LOG.info("Updating pack index...") cache.cache_descriptors() print() # Look for matching part numbers. matches = self._get_matches(cache) - + if matches: devices = [cache.index[dev] for dev in matches] packs = cache.packs_for_devices(devices) @@ -265,7 +265,7 @@ def invoke(self) -> int: class PackSubcommand(PackSubcommandBase): """! @brief `pyocd pack` subcommand.""" - + NAMES = ['pack'] HELP = "Manage CMSIS-Packs for target support." SUBCOMMANDS = [ @@ -275,7 +275,7 @@ class PackSubcommand(PackSubcommandBase): PackShowSubcommand, PackUpdateSubcommand, ] - + @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: """! @brief Add this subcommand to the subparsers object.""" @@ -299,9 +299,9 @@ def get_args(cls) -> List[argparse.ArgumentParser]: help="Just list the pack(s) that would be downloaded, don't actually download anything.") pack_options.add_argument('-H', '--no-header', action='store_true', help="Don't print a table header.") - + return [cls.CommonOptions.LOGGING, pack_parser] - + def invoke(self) -> int: """! @brief Handle 'pack' subcommand.""" @@ -310,16 +310,16 @@ def invoke(self) -> int: return 0 cache = self._get_cache() - + if self._args.clean: LOG.info("Removing all pack data...") cache.cache_clean() - + if self._args.update: LOG.info("Updating pack index...") cache.cache_descriptors() print() - + if self._args.show: packs = pack_target.ManagedPacks.get_installed_packs(cache) pt = self._get_pretty_table(["Vendor", "Pack", "Version"]) @@ -333,14 +333,14 @@ def invoke(self) -> int: if self._args.find_devices or self._args.install_devices: self._args.patterns = self._args.find_devices or self._args.install_devices - + matches = self._get_matches(cache) - + if self._args.find_devices: # Get the list of installed pack targets. installed_targets = pack_target.ManagedPacks.get_installed_targets(cache=cache) installed_target_names = [target.part_number.lower() for target in installed_targets] - + pt = self._get_pretty_table(["Part", "Vendor", "Pack", "Version", "Installed"]) for name in sorted(matches): info = cache.index[name] diff --git a/pyocd/subcommands/reset_cmd.py b/pyocd/subcommands/reset_cmd.py index 07f54cb15..4008fcc7c 100644 --- a/pyocd/subcommands/reset_cmd.py +++ b/pyocd/subcommands/reset_cmd.py @@ -32,11 +32,11 @@ class ResetSubcommand(SubcommandBase): """! @brief `pyocd reset` subcommand.""" - + NAMES = ['reset'] HELP = "Reset a target device." DEFAULT_LOG_LEVEL = logging.WARNING - + @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: """! @brief Add this subcommand to the subparsers object.""" @@ -51,9 +51,9 @@ def get_args(cls) -> List[argparse.ArgumentParser]: "Default is core 0.") reset_options.add_argument("-l", "--halt", action="store_true", help="Halt the core on the first instruction after reset. Defaults to disabled.") - + return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, reset_parser] - + def invoke(self) -> None: """! @brief Handle 'reset' subcommand.""" # Verify selected reset type. @@ -62,7 +62,7 @@ def invoke(self) -> None: except ValueError: LOG.error("Invalid reset method: %s", self._args.reset_type) return - + session = ConnectHelper.session_with_chosen_probe( project_dir=self._args.project_dir, config_file=self._args.config, @@ -82,10 +82,10 @@ def invoke(self) -> None: # Handle hw reset specially using the probe, so we don't need a valid connection # and can skip discovery. If we're halting we need a connection even if performing a hardware reset. is_hw_reset = (the_reset_type == Target.ResetType.HW) and not self._args.halt - + # Only init the board if performing a sw reset. session.open(init_board=(not is_hw_reset)) - + LOG.info("Performing '%s' reset...", self._args.reset_type) if is_hw_reset: session.probe.reset() diff --git a/pyocd/subcommands/server_cmd.py b/pyocd/subcommands/server_cmd.py index 36a1331a0..c23b34e90 100644 --- a/pyocd/subcommands/server_cmd.py +++ b/pyocd/subcommands/server_cmd.py @@ -29,10 +29,10 @@ class ServerSubcommand(SubcommandBase): """! @brief `pyocd server` subcommand.""" - + NAMES = ['server'] HELP = "Run debug probe server." - + @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: """! @brief Add this subcommand to the subparsers object.""" @@ -63,9 +63,9 @@ def get_args(cls) -> List[argparse.ArgumentParser]: "':' where is the name of a probe plugin.") server_options.add_argument("-W", "--no-wait", action="store_true", help="Do not wait for a probe to be connected if none are available.") - + return [cls.CommonOptions.LOGGING, server_parser] - + def invoke(self) -> None: """! @brief Handle 'server' subcommand.""" # Create a session to load config, particularly logging config. Even though we do have a @@ -75,22 +75,22 @@ def invoke(self) -> None: session = Session(probe=None, serve_local_only=self._args.serve_local_only, options=session_options) - + # The ultimate intent is to serve all available probes by default. For now we just serve # a single probe. probe = ConnectHelper.choose_probe(unique_id=self._args.unique_id) if probe is None: return - + # Assign the session to the probe. probe.session = session - + # Create the server instance. server = DebugProbeServer(session, probe, self._args.port_number, self._args.serve_local_only) session.probeserver = server LOG.debug("Starting debug probe server") server.start() - + # Loop as long as the probe is running. The server thread is a daemon, so the main thread # must continue to exist. try: diff --git a/pyocd/target/builtin/cypress/target_CY8C64xx.py b/pyocd/target/builtin/cypress/target_CY8C64xx.py index 7423410ab..6652e4089 100644 --- a/pyocd/target/builtin/cypress/target_CY8C64xx.py +++ b/pyocd/target/builtin/cypress/target_CY8C64xx.py @@ -107,8 +107,8 @@ class cy8c64xx_s25hx512t(PSoC64): def __init__(self, session, ap_num): super(cy8c64xx_s25hx512t, self).__init__(session, CortexM_PSoC64_BLE2, self.MEMORY_MAP, ap_num) - - + + class cy8c64xx_nosmif(PSoC64): from .flash_algos.flash_algo_CY8C64xx import flash_algo as flash_algo_main from .flash_algos.flash_algo_CY8C6xxx_WFLASH import flash_algo as flash_algo_work @@ -158,8 +158,8 @@ def __init__(self, session): class cy8c64xx_cm4_s25hx512t(cy8c64xx_s25hx512t): def __init__(self, session): super(cy8c64xx_cm4_s25hx512t, self).__init__(session, 2) - - + + class cy8c64xx_cm0_nosmif(cy8c64xx_nosmif): def __init__(self, session): super(cy8c64xx_cm0_nosmif, self).__init__(session, 1) diff --git a/pyocd/target/builtin/cypress/target_CY8C6xx7.py b/pyocd/target/builtin/cypress/target_CY8C6xx7.py index 4971ebd68..b6d307d80 100644 --- a/pyocd/target/builtin/cypress/target_CY8C6xx7.py +++ b/pyocd/target/builtin/cypress/target_CY8C6xx7.py @@ -120,7 +120,7 @@ class CY8C6xx7_S25FS512S(PSoC6): def __init__(self, session): super(CY8C6xx7_S25FS512S, self).__init__(session, CortexM_PSoC6_BLE2, self.MEMORY_MAP) - + class CY8C6xx7_nosmif(PSoC6): from .flash_algos.flash_algo_CY8C6xx7 import flash_algo as flash_algo_main from .flash_algos.flash_algo_CY8C6xxx_WFLASH import flash_algo as flash_algo_work diff --git a/pyocd/target/builtin/target_CC3220SF.py b/pyocd/target/builtin/target_CC3220SF.py index ffb074224..e48af2861 100644 --- a/pyocd/target/builtin/target_CC3220SF.py +++ b/pyocd/target/builtin/target_CC3220SF.py @@ -113,7 +113,7 @@ def init(self): class CC3220SF(CoreSightTarget): VENDOR = "Texas Instruments" - + MEMORY_MAP = MemoryMap( RomRegion(start=0x00000000, length=0x00080000), FlashRegion(start=0x01000000, length=0x00100000, blocksize=0x800, is_boot_memory=True, flash_class=Flash_cc3220sf), @@ -125,4 +125,4 @@ def __init__(self, session): def post_connect_hook(self): self.cores[0].default_reset_type = self.ResetType.SW_VECTRESET - + diff --git a/pyocd/target/builtin/target_HC32L07x.py b/pyocd/target/builtin/target_HC32L07x.py index ba9b0bc11..81d3ec2f6 100644 --- a/pyocd/target/builtin/target_HC32L07x.py +++ b/pyocd/target/builtin/target_HC32L07x.py @@ -24,7 +24,7 @@ DEBUG_ACTIVE = 0x40002038 DEBUG_ACTIVE_VAL = 0x00000FFF -FLASH_ALGO = { +FLASH_ALGO = { 'load_address' : 0x20000000, # Flash algorithm as a hex string 'instructions': [ diff --git a/pyocd/target/builtin/target_HC32L110.py b/pyocd/target/builtin/target_HC32L110.py index 132057b79..7d7834277 100644 --- a/pyocd/target/builtin/target_HC32L110.py +++ b/pyocd/target/builtin/target_HC32L110.py @@ -24,7 +24,7 @@ DEBUG_ACTIVE = 0x40002038 DEBUG_ACTIVE_VAL = 0x00000FFF -FLASH_ALGO = { +FLASH_ALGO = { 'load_address' : 0x20000000, # Flash algorithm as a hex string 'instructions': [ @@ -58,7 +58,7 @@ 'page_size' : 0x200, 'analyzer_supported' : False, 'analyzer_address' : 0x00000000, - 'page_buffers' : [0x20000600], + 'page_buffers' : [0x20000600], 'min_program_length' : 0x200, } diff --git a/pyocd/target/builtin/target_HC32L13x.py b/pyocd/target/builtin/target_HC32L13x.py index a8b2b62a5..51db897e6 100644 --- a/pyocd/target/builtin/target_HC32L13x.py +++ b/pyocd/target/builtin/target_HC32L13x.py @@ -24,7 +24,7 @@ DEBUG_ACTIVE = 0x40002038 DEBUG_ACTIVE_VAL = 0x00000FFF -FLASH_ALGO = { +FLASH_ALGO = { 'load_address' : 0x20000000, # Flash algorithm as a hex string 'instructions': [ diff --git a/pyocd/target/builtin/target_HC32L19x.py b/pyocd/target/builtin/target_HC32L19x.py index 222fdb5d0..e27690d48 100644 --- a/pyocd/target/builtin/target_HC32L19x.py +++ b/pyocd/target/builtin/target_HC32L19x.py @@ -24,7 +24,7 @@ DEBUG_ACTIVE = 0x40002038 DEBUG_ACTIVE_VAL = 0x00000FFF -FLASH_ALGO = { +FLASH_ALGO = { 'load_address' : 0x20000000, # Flash algorithm as a hex string 'instructions': [ diff --git a/pyocd/target/builtin/target_LPC1114FN28_102.py b/pyocd/target/builtin/target_LPC1114FN28_102.py index d2bc6009f..e1d76fab6 100644 --- a/pyocd/target/builtin/target_LPC1114FN28_102.py +++ b/pyocd/target/builtin/target_LPC1114FN28_102.py @@ -52,7 +52,7 @@ class LPC11XX_32(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x8000, is_boot_memory=True, blocksize=4096, diff --git a/pyocd/target/builtin/target_LPC11U24FBD64_401.py b/pyocd/target/builtin/target_LPC11U24FBD64_401.py index 803a0f308..8ca6807cb 100644 --- a/pyocd/target/builtin/target_LPC11U24FBD64_401.py +++ b/pyocd/target/builtin/target_LPC11U24FBD64_401.py @@ -50,7 +50,7 @@ class LPC11U24(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x8000, is_boot_memory=True, blocksize=0x1000, diff --git a/pyocd/target/builtin/target_LPC1768.py b/pyocd/target/builtin/target_LPC1768.py index f9ea79f95..5d49905ff 100644 --- a/pyocd/target/builtin/target_LPC1768.py +++ b/pyocd/target/builtin/target_LPC1768.py @@ -67,7 +67,7 @@ class LPC1768(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x10000, is_boot_memory=True, blocksize=0x1000, @@ -94,7 +94,7 @@ def __init__(self, session): def add_core(self, core): super(LPC1768, self).add_core(core) core.delegate = self - + def map_flash(self): self.write32(0x400FC040, 1) @@ -104,10 +104,10 @@ def set_reset_catch(self, core, reset_type=None): # Clear reset vector catch and remember whether it was set. self._saved_vc = self.get_vector_catch() self.set_vector_catch(self._saved_vc & ~Target.VectorCatch.CORE_RESET) - + # Map flash to 0. self.map_flash() - + # Set breakpoint on user reset handler. self._reset_handler = self.read32(0x4) if self._reset_handler < 0x80000: @@ -120,6 +120,6 @@ def clear_reset_catch(self, core, reset_type=None): # Clear breakpoint if it wasn't previously set. if not self._had_reset_handler_bp: self.remove_breakpoint(self._reset_handler) - + # Restore vector catch. self.set_vector_catch(self._saved_vc) diff --git a/pyocd/target/builtin/target_LPC4088FBD144.py b/pyocd/target/builtin/target_LPC4088FBD144.py index d04d18312..bae993296 100644 --- a/pyocd/target/builtin/target_LPC4088FBD144.py +++ b/pyocd/target/builtin/target_LPC4088FBD144.py @@ -64,7 +64,7 @@ class LPC4088(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x10000, is_boot_memory=True, blocksize=0x1000, diff --git a/pyocd/target/builtin/target_LPC4330.py b/pyocd/target/builtin/target_LPC4330.py index 625710baa..25eeb2bbd 100644 --- a/pyocd/target/builtin/target_LPC4330.py +++ b/pyocd/target/builtin/target_LPC4330.py @@ -318,7 +318,7 @@ class LPC4330(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0x14000000, length=0x4000000, blocksize=0x400, is_boot_memory=True, algo=FLASH_ALGO), diff --git a/pyocd/target/builtin/target_LPC54114J256BD64.py b/pyocd/target/builtin/target_LPC54114J256BD64.py index 9af52492f..c800055bd 100644 --- a/pyocd/target/builtin/target_LPC54114J256BD64.py +++ b/pyocd/target/builtin/target_LPC54114J256BD64.py @@ -59,7 +59,7 @@ class LPC54114(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion(name='flash', start=0, length=0x40000, is_boot_memory=True, blocksize=0x8000, diff --git a/pyocd/target/builtin/target_LPC54608J512ET180.py b/pyocd/target/builtin/target_LPC54608J512ET180.py index d0a2c779f..c1b5de1fa 100644 --- a/pyocd/target/builtin/target_LPC54608J512ET180.py +++ b/pyocd/target/builtin/target_LPC54608J512ET180.py @@ -55,7 +55,7 @@ class LPC54608(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion(name='flash', start=0, length=0x80000, is_boot_memory=True, blocksize=0x8000, diff --git a/pyocd/target/builtin/target_LPC824M201JHI33.py b/pyocd/target/builtin/target_LPC824M201JHI33.py index 99a09dffb..19c7ffeb2 100644 --- a/pyocd/target/builtin/target_LPC824M201JHI33.py +++ b/pyocd/target/builtin/target_LPC824M201JHI33.py @@ -51,7 +51,7 @@ class LPC824(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x8000, is_boot_memory=True, blocksize=1024, diff --git a/pyocd/target/builtin/target_MAX32600.py b/pyocd/target/builtin/target_MAX32600.py index 3c967dd7b..371ab392c 100644 --- a/pyocd/target/builtin/target_MAX32600.py +++ b/pyocd/target/builtin/target_MAX32600.py @@ -57,7 +57,7 @@ class MAX32600(CoreSightTarget): VENDOR = "Maxim" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x40000, blocksize=0x800, is_boot_memory=True, algo=FLASH_ALGO), RamRegion( start=0x20000000, length=0x8000), diff --git a/pyocd/target/builtin/target_MAX32625.py b/pyocd/target/builtin/target_MAX32625.py index 0f43336f9..226c2d5c7 100644 --- a/pyocd/target/builtin/target_MAX32625.py +++ b/pyocd/target/builtin/target_MAX32625.py @@ -60,7 +60,7 @@ class MAX32625(CoreSightTarget): VENDOR = "Maxim" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x80000, blocksize=0x2000, is_boot_memory=True, algo=FLASH_ALGO), RamRegion( start=0x20000000, length=0x28000), diff --git a/pyocd/target/builtin/target_MIMXRT1015xxxxx.py b/pyocd/target/builtin/target_MIMXRT1015xxxxx.py index 23a9068ab..dfbb24341 100644 --- a/pyocd/target/builtin/target_MIMXRT1015xxxxx.py +++ b/pyocd/target/builtin/target_MIMXRT1015xxxxx.py @@ -727,7 +727,7 @@ class MIMXRT1015xxxxx(IMXRT): VENDOR = "NXP" # Note: by default there are 64 KB ITCM, 64 KB DTCM and 128 KB OCRAM available for MIMXRT1015. - # And it also has 256 KB FlexRAM that can be enabled and configured by GPR17, customers can + # And it also has 256 KB FlexRAM that can be enabled and configured by GPR17, customers can # allocate this 256 KB FlexRAM to ITCM/DTCM/OCRAM, but the FlexRAM is not available by default. MEMORY_MAP = MemoryMap( RamRegion(name="itcm", start=0x00000000, length=0x10000), # 64 KB diff --git a/pyocd/target/builtin/target_MIMXRT1176xxxxx.py b/pyocd/target/builtin/target_MIMXRT1176xxxxx.py index 68066829b..ec3775f66 100644 --- a/pyocd/target/builtin/target_MIMXRT1176xxxxx.py +++ b/pyocd/target/builtin/target_MIMXRT1176xxxxx.py @@ -21,7 +21,7 @@ from ...core.memory_map import (FlashRegion, RomRegion, RamRegion, MemoryMap) from ...debug.svd.loader import SVDFile from ...coresight.ap import AccessPort, APv1Address -from ...coresight.cortex_m import CortexM +from ...coresight.cortex_m import CortexM LOG = logging.getLogger(__name__) FCFB = 0x42464346 @@ -777,7 +777,7 @@ class MIMXRT1176xxxxx_CM7(CoreSightTarget): VENDOR = "NXP" - + # Note: itcm, dtcm, and ocram share a single 512 KB block of RAM that can be configurably # divided between those regions (this is called FlexRAM). Thus, the memory map regions for # each of these RAMs allocate the maximum possible of 512 KB, but that is the maximum and @@ -857,7 +857,7 @@ def create_cores(self): class MIMXRT1176xxxxx_CM4(CoreSightTarget): VENDOR = "NXP" - + # Note: itcm, dtcm, and ocram share a single 512 KB block of RAM that can be configurably # divided between those regions (this is called FlexRAM). Thus, the memory map regions for # each of these RAMs allocate the maximum possible of 512 KB, but that is the maximum and diff --git a/pyocd/target/builtin/target_MPS3_AN522.py b/pyocd/target/builtin/target_MPS3_AN522.py index c8d9dd226..26d1f8a00 100644 --- a/pyocd/target/builtin/target_MPS3_AN522.py +++ b/pyocd/target/builtin/target_MPS3_AN522.py @@ -20,7 +20,7 @@ class AN522(CoreSightTarget): VENDOR = "Arm" - + MEMORY_MAP = MemoryMap( RamRegion( name='itcm', start=0x00000000, length=0x00080000, access='rwx'), RamRegion( name='sram', start=0x30000000, length=0x00100000, access='rwx'), diff --git a/pyocd/target/builtin/target_MPS3_AN540.py b/pyocd/target/builtin/target_MPS3_AN540.py index 3dd7e8c81..bca8eda4d 100644 --- a/pyocd/target/builtin/target_MPS3_AN540.py +++ b/pyocd/target/builtin/target_MPS3_AN540.py @@ -20,7 +20,7 @@ class AN540(CoreSightTarget): VENDOR = "Arm" - + MEMORY_MAP = MemoryMap( RamRegion( name='itcm_ns', start=0x00000000, length=0x00100000, access='rwx'), RamRegion( name='itcm_s', start=0x10000000, length=0x00100000, access='rwxs'), diff --git a/pyocd/target/builtin/target_RP2040.py b/pyocd/target/builtin/target_RP2040.py index 9a2b86802..a27971a9e 100644 --- a/pyocd/target/builtin/target_RP2040.py +++ b/pyocd/target/builtin/target_RP2040.py @@ -85,12 +85,12 @@ def _parity32(value): class RP2040Base(CoreSightTarget): """! @brief Raspberry Pi RP2040. - + This device is very strange in that it as three DPs. The first two DPs each have a single AHB-AP for the two Cortex-M0+ cores. The third DP is a "Rescue DP" that has no APs, but the CDBGPWRUPREQ signal is repurposed as a rescue signal. """ - + class Targetsel: """! @brief DP TARGETEL values for each DP.""" CORE_0 = 0x01002927 @@ -98,10 +98,10 @@ class Targetsel: RESCUE_DP = 0xf1002927 VENDOR = "Raspberry Pi" - + MEMORY_MAP = MemoryMap( RomRegion( start=0, length=0x4000, name="bootrom", ), - FlashRegion(start=0x10000000, length=0x1000000, name="xip", + FlashRegion(start=0x10000000, length=0x1000000, name="xip", sector_size=4096, page_size=256, algo=FLASH_ALGO, @@ -127,12 +127,12 @@ def __init__(self, session): def create_init_sequence(self): seq = super().create_init_sequence() - + seq.insert_before('load_svd', ('check_probe', self._check_probe)) \ .insert_before('dp_init', ('select_core0', self._select_core)) return seq - + def _check_probe(self): # Have to import here to avoid a circular import from ...probe.debug_probe import DebugProbe @@ -145,7 +145,7 @@ def _select_core(self): def select_dp(self, targetsel): """! @brief Select the DP with the matching TARGETSEL.""" probe = self.session.probe - + # Have to connect the probe first, or SWCLK will not be enabled. probe.connect(DebugProbe.Protocol.SWD) @@ -157,7 +157,7 @@ def select_dp(self, targetsel): # SWD line reset to activate all DPs. swj.line_reset() swj.idle_cycles(2) - + # Send multi-drop SWD target selection sequence to select the requested DP. probe.swd_sequence([ # DP TARGETSEL write @@ -171,10 +171,10 @@ def select_dp(self, targetsel): # - Park = 1 # -> LSB first, that's 0b10011001 or 0x99 (8, 0x99), - + # 5 cycles with SWDIO as input (5,), - + # DP TARGETSEL value # output 32 + 1 cycles (33, targetsel | mask.parity32_high(targetsel)), @@ -186,15 +186,15 @@ def select_dp(self, targetsel): DP_IDR = 0x00 dpidr = probe.read_dp(DP_IDR) LOG.debug("DP IDR after writing TARGETSEL: 0x%08x", dpidr) - + probe.write_dp(0x8, 0x2) # DPBANKSEL=2 to select TARGETID targetid = probe.read_dp(0x4) LOG.debug("DP TARGETID: 0x%08x", targetid) - + probe.write_dp(0x8, 0x3) # DPBANKSEL=3 to select DLPIDR dlpidr = probe.read_dp(0x4) LOG.debug("DP DLPIDR: 0x%08x", dlpidr) - + probe.write_dp(0x8, 0x0) # restore DPBANKSEL=0 class RP2040Core0(RP2040Base): @@ -206,7 +206,7 @@ def __init__(self, session): class RP2040Core1(RP2040Base): """! @brief RP2040 target for core 1.""" - + def __init__(self, session): super().__init__(session) self._core_targetsel = self.Targetsel.CORE_1 diff --git a/pyocd/target/builtin/target_RTL8195AM.py b/pyocd/target/builtin/target_RTL8195AM.py index aefab94f9..012356c74 100644 --- a/pyocd/target/builtin/target_RTL8195AM.py +++ b/pyocd/target/builtin/target_RTL8195AM.py @@ -21,7 +21,7 @@ class RTL8195AM(CoreSightTarget): VENDOR = "Realtek Semiconductor" - + MEMORY_MAP = MemoryMap( RamRegion( start=0x00000000, length=0x400000), RamRegion( start=0x10000000, length=0x80000), diff --git a/pyocd/target/builtin/target_STM32F051T8.py b/pyocd/target/builtin/target_STM32F051T8.py index 24748cd36..a6445a1a0 100644 --- a/pyocd/target/builtin/target_STM32F051T8.py +++ b/pyocd/target/builtin/target_STM32F051T8.py @@ -69,7 +69,7 @@ class STM32F051(CoreSightTarget): VENDOR = "STMicroelectronics" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0x08000000, length=0x10000, blocksize=0x400, is_boot_memory=True, algo=FLASH_ALGO), diff --git a/pyocd/target/builtin/target_STM32F103RC.py b/pyocd/target/builtin/target_STM32F103RC.py index fa00adb01..868155c2f 100644 --- a/pyocd/target/builtin/target_STM32F103RC.py +++ b/pyocd/target/builtin/target_STM32F103RC.py @@ -52,7 +52,7 @@ class STM32F103RC(CoreSightTarget): VENDOR = "STMicroelectronics" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0x08000000, length=0x80000, blocksize=0x800, is_boot_memory=True, algo=FLASH_ALGO), diff --git a/pyocd/target/builtin/target_STM32F439xx.py b/pyocd/target/builtin/target_STM32F439xx.py index e245253c4..7af604a0e 100644 --- a/pyocd/target/builtin/target_STM32F439xx.py +++ b/pyocd/target/builtin/target_STM32F439xx.py @@ -67,10 +67,10 @@ class DBGMCU: class STM32F439xG(CoreSightTarget): VENDOR = "STMicroelectronics" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0x08000000, length=0x10000, sector_size=0x4000, - page_size=0x1000, + page_size=0x1000, is_boot_memory=True, erase_all_weight=CHIP_ERASE_WEIGHT, algo=FLASH_ALGO), @@ -97,7 +97,7 @@ def post_connect_hook(self): class STM32F439xI(CoreSightTarget): VENDOR = "STMicroelectronics" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0x08000000, length=0x10000, sector_size=0x4000, page_size=0x1000, diff --git a/pyocd/target/builtin/target_STM32L031x6.py b/pyocd/target/builtin/target_STM32L031x6.py index fed49eff8..955256486 100644 --- a/pyocd/target/builtin/target_STM32L031x6.py +++ b/pyocd/target/builtin/target_STM32L031x6.py @@ -80,7 +80,7 @@ class DBGMCU: class STM32L031x6(CoreSightTarget): VENDOR = "STMicroelectronics" - + MEMORY_MAP = MemoryMap( FlashRegion(name='Flash', start=0x08000000, length=0x8000, blocksize=0x80, is_boot_memory=True, algo=FLASH_ALGO), RamRegion(name='RAM', start=0x20000000, length=0x2000), diff --git a/pyocd/target/builtin/target_STM32L432xx.py b/pyocd/target/builtin/target_STM32L432xx.py index a39770009..57f3d9d80 100644 --- a/pyocd/target/builtin/target_STM32L432xx.py +++ b/pyocd/target/builtin/target_STM32L432xx.py @@ -78,7 +78,7 @@ class DBGMCU: } class STM32L432xC(CoreSightTarget): - + VENDOR = "STMicroelectronics" MEMORY_MAP = MemoryMap( diff --git a/pyocd/target/builtin/target_STM32L475xx.py b/pyocd/target/builtin/target_STM32L475xx.py index 7279572ad..99d54aa49 100644 --- a/pyocd/target/builtin/target_STM32L475xx.py +++ b/pyocd/target/builtin/target_STM32L475xx.py @@ -66,7 +66,7 @@ class DBGMCU: } class STM32L475xx(CoreSightTarget): - + VENDOR = "STMicroelectronics" def post_connect_hook(self): diff --git a/pyocd/target/builtin/target_lpc800.py b/pyocd/target/builtin/target_lpc800.py index bf45462df..48e2f38aa 100644 --- a/pyocd/target/builtin/target_lpc800.py +++ b/pyocd/target/builtin/target_lpc800.py @@ -51,7 +51,7 @@ class LPC800(CoreSightTarget): VENDOR = "NXP" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x4000, blocksize=0x400, is_boot_memory=True, algo=FLASH_ALGO), RamRegion( start=0x10000000, length=0x1000) diff --git a/pyocd/target/builtin/target_musca_a1.py b/pyocd/target/builtin/target_musca_a1.py index 07d5fc6f7..dac8df906 100644 --- a/pyocd/target/builtin/target_musca_a1.py +++ b/pyocd/target/builtin/target_musca_a1.py @@ -68,7 +68,7 @@ 0xfe31f7ff, 0x0018f89d, 0xf89d7020, 0x70600019, 0x001af89d, 0xf89d70a0, 0x70e0001b, 0xb0082000, 0x0000bd70, 0x00000000, 0x00000000 ], - + # Function addresses 'pc_init': 0x20000021, 'pc_unInit': 0x20000043, @@ -96,7 +96,7 @@ class MuscaA1(CoreSightTarget): VENDOR = "Arm" - + MEMORY_MAP = MemoryMap( # Due to an errata, only the first 256 kB of QSPI is memory mapped. The remainder # of the 8 MB region can be read and written via register accesses only. diff --git a/pyocd/target/builtin/target_musca_b1.py b/pyocd/target/builtin/target_musca_b1.py index af0767e68..947a344cf 100644 --- a/pyocd/target/builtin/target_musca_b1.py +++ b/pyocd/target/builtin/target_musca_b1.py @@ -143,7 +143,7 @@ 0xf811e003, 0xf8003b01, 0x1e523b01, 0x4770d2f9, 0x52800000, 0x0003ffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00800000, 0x00000000, 0x00000000 ], - + # Function addresses 'pc_init': 0x20000021, 'pc_unInit': 0x20000071, @@ -252,7 +252,7 @@ class MuscaB1(CoreSightTarget): VENDOR = "Arm" - + MEMORY_MAP = MemoryMap( FlashRegion(name='neflash', start=0x0A000000, length=0x00200000, access='rx', blocksize=0x4000, @@ -295,17 +295,17 @@ def __init__(self, session): def create_init_sequence(self): seq = super(MuscaB1, self).create_init_sequence() - - seq.insert_before('halt_on_connect', + + seq.insert_before('halt_on_connect', ('enable_sysresetreq', self._enable_sysresetreq), ) - + return seq - + def _enable_sysresetreq(self): LOG.info("Enabling SYSRSTREQ0_EN and SYSRSTREQ1_EN") reset_mask = self.read32(RESET_MASK) reset_mask |= RESET_MASK_SYSRSTREQ0_EN | RESET_MASK_SYSRSTREQ1_EN self.write32(RESET_MASK, reset_mask) - - + + diff --git a/pyocd/target/builtin/target_nRF51822_xxAA.py b/pyocd/target/builtin/target_nRF51822_xxAA.py index 21134601f..d45243cec 100644 --- a/pyocd/target/builtin/target_nRF51822_xxAA.py +++ b/pyocd/target/builtin/target_nRF51822_xxAA.py @@ -53,7 +53,7 @@ class NRF51(CoreSightTarget): VENDOR = "Nordic Semiconductor" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x40000, blocksize=0x400, is_boot_memory=True, algo=FLASH_ALGO), diff --git a/pyocd/target/builtin/target_ncs36510.py b/pyocd/target/builtin/target_ncs36510.py index 41f2e5623..3c2997b12 100644 --- a/pyocd/target/builtin/target_ncs36510.py +++ b/pyocd/target/builtin/target_ncs36510.py @@ -82,7 +82,7 @@ class NCS36510(CoreSightTarget): VENDOR = "ONSemiconductor" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0x2000, length=0x50000, blocksize=0x800, is_boot_memory=True, algo=FLASH_ALGO), diff --git a/pyocd/target/builtin/target_w7500.py b/pyocd/target/builtin/target_w7500.py index 06cd59ce3..7cd99e639 100644 --- a/pyocd/target/builtin/target_w7500.py +++ b/pyocd/target/builtin/target_w7500.py @@ -41,7 +41,7 @@ class W7500(CoreSightTarget): VENDOR = "WIZnet" - + MEMORY_MAP = MemoryMap( FlashRegion( start=0x00000000, length=0x20000, blocksize=0x100, is_boot_memory=True, algo=FLASH_ALGO), diff --git a/pyocd/target/family/flash_kinetis.py b/pyocd/target/family/flash_kinetis.py index 103df151d..51b9ca986 100644 --- a/pyocd/target/family/flash_kinetis.py +++ b/pyocd/target/family/flash_kinetis.py @@ -39,10 +39,10 @@ class Flash_Kinetis(Flash): def override_security_bits(self, address, data): """! @brief Check security bytes. - + Override Flash Configuration Field bytes at address 0x400-0x40f to ensure that flash security won't be enabled. If flash security is enabled, then the chip is inaccessible via SWD. - + FCF bytes: [0x0-0x7]=backdoor key [0x8-0xb]=flash protection bytes @@ -54,14 +54,14 @@ def override_security_bits(self, address, data): [0xd]=FOPT [0xe]=EEPROM protection bytes (FlexNVM devices only) [0xf]=data flash protection bytes (FlexNVM devices only) - + This function enforces that: - 0x8-0xb==0xff - 0xe-0xf==0xff - FSEC=0xfe - + FOPT can be set to any value except 0x00. - + @retval Data with modified security bits """ # Check if the data passed in contains the security bits diff --git a/pyocd/target/family/target_lpc5500.py b/pyocd/target/family/target_lpc5500.py index 91511bc40..e2edeae82 100644 --- a/pyocd/target/family/target_lpc5500.py +++ b/pyocd/target/family/target_lpc5500.py @@ -80,7 +80,7 @@ class LPC5500Family(CoreSightTarget): def create_init_sequence(self): seq = super(LPC5500Family, self).create_init_sequence() - + seq.wrap_task('discovery', self.modify_discovery) return seq @@ -96,25 +96,25 @@ def modify_discovery(self, seq): ) \ .append(('restore_max_invalid_aps', self.restore_max_invalid_aps)) return seq - + def set_max_invalid_aps(self): # Save current option and make sure it is set to at least 3. self._saved_max_invalid_aps = self.session.options.get('adi.v5.max_invalid_ap_count') if self._saved_max_invalid_aps < self._MIN_INVALID_APS: self.session.options.set('adi.v5.max_invalid_ap_count', self._MIN_INVALID_APS) - + def restore_max_invalid_aps(self): # Only restore if we changed it. if self._saved_max_invalid_aps < self._MIN_INVALID_APS: self.session.options.set('adi.v5.max_invalid_ap_count', self._saved_max_invalid_aps) - + def _modify_ap1(self, seq): # If AP#1 exists we need to adjust it before we can read the ROM. if seq.has_task('init_ap.1'): seq.insert_before('init_ap.1', ('set_ap1_nonsec', self._set_ap1_nonsec), ) - + return seq def check_locked_state(self, seq): @@ -122,12 +122,12 @@ def check_locked_state(self, seq): # The device is not locked if AP#0 was found and is enabled. if (0 in self.aps) and self.aps[0].is_enabled: return - + # The debugger mailbox should always be present. if not DM_AP in self.aps: LOG.error("cannot request debug unlock; no debugger mailbox AP was found") return - + # Perform the unlock procedure using the debugger mailbox. self.unlock(self.aps[DM_AP]) @@ -160,7 +160,7 @@ def create_lpc55xx_cores(self): self.add_core(core0) except exceptions.Error as err: LOG.error("Error creating core 0: %s", err, exc_info=self.session.log_tracebacks) - + # Create core 1 if the AP is present. It uses the standard Cortex-M core class for v8-M. if (1 in self.aps) and (self.aps[0].is_enabled): try: @@ -171,22 +171,22 @@ def create_lpc55xx_cores(self): self.add_core(core1) except exceptions.Error as err: LOG.error("Error creating core 1: %s", err, exc_info=self.session.log_tracebacks) - + def _enable_traceclk(self): # Don't make it worse if no APs were found. if (0 not in self.aps) or (not self.aps[0].is_enabled): return - + SYSCON_NS_Base_Addr = 0x40000000 IOCON_NS_Base_Addr = 0x40001000 TRACECLKSEL_Addr = SYSCON_NS_Base_Addr + 0x268 TRACECLKDIV_Addr = SYSCON_NS_Base_Addr + 0x308 AHBCLKCTRLSET0_Addr = IOCON_NS_Base_Addr + 0x220 - + clksel = self.read32(TRACECLKSEL_Addr) # Read current TRACECLKSEL value if clksel > 2: self.write32(TRACECLKSEL_Addr, 0x0) # Select Trace divided clock - + clkdiv = self.read32(TRACECLKDIV_Addr) & 0xFF # Read current TRACECLKDIV value, preserve divider but clear rest to enable self.write32(TRACECLKDIV_Addr, clkdiv) @@ -195,7 +195,7 @@ def _enable_traceclk(self): def trace_start(self): # Configure PIO0_10: FUNC - 6, MODE - 0, SLEW - 1, INVERT - 0, DIGMODE - 0, OD - 0 self.write32(0x40001028, 0x00000046) - + self.call_delegate('trace_start', target=self, mode=0) # On a reset when ITM is enabled, TRACECLKDIV/TRACECLKSEL will be reset @@ -213,20 +213,20 @@ def unlock(self, dm_ap): # Set RESYNCH_REQ (0x1) and CHIP_RESET_REQ (0x20) in DM.CSW. dm_ap.write_reg(addr=DM_CSW, data=(DM_CSW_RESYNCH_REQ_MASK | DM_CSW_CHIP_RESET_REQ_MASK)) dm_ap.dp.flush() - + # Wait for reset to complete. sleep(0.1) - + # Read CSW to verify the reset happened and the register is cleared. retval = dm_ap.read_reg(addr=DM_CSW) if retval != 0: LOG.error("debugger mailbox failed to reset the device") return - + # Write debug unlock request. dm_ap.write_reg(addr=DM_REQUEST, data=DM_START_DBG_SESSION) dm_ap.dp.flush() - + # Read reply from boot ROM. The return status is the low half-word. retval = dm_ap.read_reg(addr=DM_RETURN) & 0xffff if retval != 0: @@ -242,7 +242,7 @@ def reset_and_halt(self, reset_type=None): catch_mode = 0 delegateResult = self.call_delegate('set_reset_catch', core=self, reset_type=reset_type) - + # Save CortexM.DEMCR demcr = self.read_memory(CortexM.DEMCR) @@ -250,10 +250,10 @@ def reset_and_halt(self, reset_type=None): if not delegateResult: # This sequence is copied from the NXP LPC55S69_DFP debug sequence. reset_vector = 0xFFFFFFFF - + # Clear reset vector catch. self.write32(CortexM.DEMCR, demcr & ~CortexM.DEMCR_VC_CORERESET) - + # If the processor is in Secure state, we have to access the flash controller # through the secure alias. if self.get_security_state() == Target.SecurityState.SECURE: @@ -297,7 +297,7 @@ def reset_and_halt(self, reset_type=None): if (self.read32(base + FLASH_INT_STATUS) & 0x00000004) != 0: break sleep(0.01) - + # Check for error reading flash word. if (self.read32(base + FLASH_INT_STATUS) & 0xB) == 0: # Read the reset vector address. diff --git a/pyocd/target/pack/cmsis_pack.py b/pyocd/target/pack/cmsis_pack.py index 734a9e12d..e5ddd6f50 100644 --- a/pyocd/target/pack/cmsis_pack.py +++ b/pyocd/target/pack/cmsis_pack.py @@ -56,16 +56,16 @@ def _get_part_number_from_element(element: Element) -> str: class CmsisPack(object): """! @brief Wraps a CMSIS Device Family Pack. - + This class provides a top-level interface for extracting device information from CMSIS-Packs. After an instance is constructed, a list of the devices described within the pack is available from the `devices` property. Each item in the list is a CmsisPackDevice object. - + The XML element hierarchy that defines devices is as follows. ``` family [-> subFamily] -> device [-> variant] ``` - + Internally, this class is responsible for collecting the device-related XML elements from each of the levels of the hierarchy described above. It determines which elements belong to each defined device and passes those to CmsisPackDevice. It is then CmsisPackDevice that performs @@ -73,14 +73,14 @@ class CmsisPack(object): """ def __init__(self, file_or_path): """! @brief Constructor. - + Opens the CMSIS-Pack and builds instances of CmsisPackDevice for all the devices and variants defined within the pack. - + @param self @param file_or_path The .pack file to open. May be a string that is the path to the pack, or may be a ZipFile, or a file-like object that is already opened. - + @exception MalformedCmsisPackError The pack is not a zip file, or the .pdsc file is missing from within the pack. """ @@ -91,7 +91,7 @@ def __init__(self, file_or_path): self._pack_file = zipfile.ZipFile(file_or_path, 'r') except zipfile.BadZipFile as err: raise MalformedCmsisPackError(f"Failed to open CMSIS-Pack '{file_or_path}': {err}") from err - + # Find the .pdsc file. for name in self._pack_file.namelist(): if name.endswith('.pdsc'): @@ -99,28 +99,28 @@ def __init__(self, file_or_path): break else: raise MalformedCmsisPackError(f"CMSIS-Pack '{file_or_path}' is missing a .pdsc file") - + with self._pack_file.open(self._pdscName) as pdscFile: self._pdsc = CmsisPackDescription(self, pdscFile) - + @property def filename(self): """! @brief Accessor for the filename or path of the .pack file.""" return self._pack_file.filename - + @property def pdsc(self): """! @brief Accessor for the CmsisPackDescription instance for the pack's PDSC file.""" return self._pdsc - + @property def devices(self): """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._pdsc.devices - + def get_file(self, filename): """! @brief Return file-like object for a file within the pack. - + @param self @param filename Relative path within the pack. May use forward or back slashes. @return A BytesIO object is returned that contains all of the data from the file @@ -133,37 +133,37 @@ def get_file(self, filename): class CmsisPackDescription(object): def __init__(self, pack, pdsc_file): """! @brief Constructor. - + @param self This object. @param pack Reference to the CmsisPack instance. @param pdsc_file A file-like object for the .pdsc contained in _pack_. """ self._pack = pack - + # Convert PDSC into an ElementTree. self._pdsc = ElementTree(file=pdsc_file) self._state_stack = [] self._devices = [] - + # Remember if we have already warned about overlapping memory regions # so we can limit these to one warning per DFP self._warned_overlapping_memory_regions = False - + # Extract devices. for family in self._pdsc.iter('family'): self._parse_devices(family) - + @property def pack(self): """! @brief Reference to the containing CmsisPack object.""" return self._pack - + @property def devices(self): """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._devices - + def _parse_devices(self, parent): # Extract device description elements we care about. newState = _DeviceInfo(element=parent) @@ -181,7 +181,7 @@ def _parse_devices(self, parent): # Push the new device description state onto the stack. self._state_stack.append(newState) - + # Create a device object if this element defines one. if parent.tag in ('device', 'variant'): # Build device info from elements applying to this device. @@ -191,14 +191,14 @@ def _parse_devices(self, parent): algos=self._extract_algos(), debugs=self._extract_debugs() ) - + dev = CmsisPackDevice(self.pack, deviceInfo) self._devices.append(dev) # Recursively process subelements. for elem in children: self._parse_devices(elem) - + self._state_stack.pop() def _extract_families(self): @@ -248,7 +248,7 @@ def filter(map, elem): pname = elem.attrib.get('Pname', None) info = (name, pname) - + if info in map: del map[info] for k in list(map.keys()): @@ -273,7 +273,7 @@ def filter(map, elem): del map[k] map[info] = elem - + return self._extract_items('memories', filter) def _extract_algos(self): @@ -282,24 +282,24 @@ def filter(map, elem): if ('style' in elem.attrib) and (elem.attrib['style'] != 'Keil'): LOG.debug("skipping non-Keil flash algorithm") return None, None - + # Both start and size are required. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) memrange = (start, size) - + # An algo with the same range as an existing algo will override the previous. map[memrange] = elem - + return self._extract_items('algos', filter) - + def _extract_debugs(self): def filter(map, elem): if 'Pname' in elem.attrib: name = elem.attrib['Pname'] unit = elem.attrib.get('Punit', 0) name += str(unit) - + if '*' in map: map.clear() map[name] = elem @@ -308,15 +308,15 @@ def filter(map, elem): # all processors. map.clear() map['*'] = elem - + return self._extract_items('debugs', filter) def _get_bool_attribute(elem, name, default=False): """! @brief Extract an XML attribute with a boolean value. - + Supports "true"/"false" or "1"/"0" as the attribute values. Leading and trailing whitespace is stripped, and the comparison is case-insensitive. - + @param elem ElementTree.Element object. @param name String for the attribute name. @param default An optional default value if the attribute is missing. If not provided, @@ -335,10 +335,10 @@ def _get_bool_attribute(elem, name, default=False): class CmsisPackDevice(object): """! @brief Wraps a device defined in a CMSIS Device Family Pack. - + Responsible for converting the XML elements that describe the device into objects usable by pyOCD. This includes the memory map and flash algorithms. - + An instance of this class can represent either a `` or `` XML element from the PDSC. """ @@ -356,10 +356,10 @@ def __init__(self, pack, device_info): self._saw_startup = False self._default_ram = None self._memory_map = None - + def _build_memory_regions(self): """! @brief Creates memory region instances for the device. - + For each `` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions(). @@ -370,7 +370,7 @@ def _build_memory_regions(self): if 'name' in elem.attrib: name = elem.attrib['name'] access = elem.attrib['access'] - + if ('p' in access): type = MemoryType.DEVICE elif ('w' in access): @@ -379,7 +379,7 @@ def _build_memory_regions(self): type = MemoryType.ROM elif 'id' in elem.attrib: name = elem.attrib['id'] - + if 'RAM' in name: access = 'rwx' type = MemoryType.RAM @@ -388,11 +388,11 @@ def _build_memory_regions(self): type = MemoryType.ROM else: continue - + # Both start and size are required attributes. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) - + isDefault = _get_bool_attribute(elem, 'default') isStartup = _get_bool_attribute(elem, 'startup') if isStartup: @@ -408,11 +408,11 @@ def _build_memory_regions(self): 'is_testable': isDefault, 'alias': elem.attrib.get('alias', None), } - + # Create the memory region and add to map. region = MEMORY_TYPE_CLASS_MAP[type](**attrs) self._regions.append(region) - + # Record the first default ram for use in flash algos. if self._default_ram is None and type == MemoryType.RAM and isDefault: self._default_ram = region @@ -420,7 +420,7 @@ def _build_memory_regions(self): # Ignore errors. LOG.debug("ignoring error parsing memories for CMSIS-Pack devices %s: %s", self.part_number, str(err)) - + def _get_containing_region(self, addr: int) -> Optional[MemoryRegion]: """@brief Return the memory region containing the given address.""" for region in self._regions: @@ -430,7 +430,7 @@ def _get_containing_region(self, addr: int) -> Optional[MemoryRegion]: def _build_flash_regions(self): """! @brief Converts ROM memory regions to flash regions. - + Each ROM region in the `_regions` attribute is converted to a flash region if a matching flash algo can be found. If the flash has multiple sector sizes, then separate flash regions will be created for each sector size range. The flash algo is converted to a @@ -452,7 +452,7 @@ def _build_flash_regions(self): # We're only interested in ROM regions here. if region.type != MemoryType.ROM: continue - + # Look for matching flash algo. algo_element = self._find_matching_algo(region) if algo_element is None: @@ -465,7 +465,7 @@ def _build_flash_regions(self): LOG.warning("Failed to convert ROM region to flash region because flash algorithm '%s' could not be " " found (%s)", algo_element.attrib['name'], self.part_number) continue - + # The ROM region will be replaced with one or more flash regions. regions_to_delete.append(region) @@ -473,14 +473,14 @@ def _build_flash_regions(self): current_session = Session.get_current() if current_session and current_session.options.get("debug.log_flm_info"): LOG.debug("Flash algo info: %s", packAlgo.flash_info) - + # Choose the page size. The check for <=32 is to handle some flash algos with incorrect # page sizes that are too small and probably represent the phrase size. page_size = packAlgo.page_size if page_size <= 32: page_size = min(s[1] for s in packAlgo.sector_sizes) - - # Select the RAM to use for the algo. + + # Select the RAM to use for the algo. try: # See if an explicit RAM range was specified for the algo. ram_start = int(algo_element.attrib['RAMstart'], base=0) @@ -499,7 +499,7 @@ def _build_flash_regions(self): # start of the provided region, this won't be a problem unless the DFP is # actually erroneous. ram_size = 128 * 1024 - + ram_for_algo = RamRegion(start=ram_start, length=ram_size) except KeyError: # No RAM addresses were given, so go with the RAM marked default. @@ -512,13 +512,13 @@ def _build_flash_regions(self): # Create a separate flash region for each sector size range. regions_to_add += list(self._split_flash_region_by_sector_size( region, page_size, algo, packAlgo)) # type: ignore - + # Now update the regions list. for region in regions_to_delete: self._regions.remove(region) for region in regions_to_add: self._regions.append(region) - + def _split_flash_region_by_sector_size(self, region: MemoryRegion, page_size: int, @@ -528,7 +528,7 @@ def _split_flash_region_by_sector_size(self, # The sector_sizes attribute is a list of bi-tuples of (start-address, sector-size), sorted by start address. for j, (offset, sector_size) in enumerate(pack_algo.sector_sizes): start = region.start + offset - + # Determine the end address of the this sector range. For the last range, the end # is just the end of the entire region. Otherwise it's the start of the next # range - 1. @@ -536,11 +536,11 @@ def _split_flash_region_by_sector_size(self, end = region.end else: end = region.start + pack_algo.sector_sizes[j + 1][0] - 1 - + # Skip wrong start and end addresses if end < start: continue - + # Limit page size. if page_size > sector_size: region_page_size = sector_size @@ -549,19 +549,19 @@ def _split_flash_region_by_sector_size(self, region_page_size) else: region_page_size = page_size - + # If we don't have a boot memory yet, pick the first flash. if not self._saw_startup: is_boot = True self._saw_startup = True else: is_boot = region.is_boot_memory - + # Construct region name. If there is more than one sector size, we need to make the region's name unique. region_name = region.name if len(pack_algo.sector_sizes) > 1: region_name += f"_{sector_size:#x}" - + # Construct the flash region. yield FlashRegion(name=region_name, access=region.access, @@ -584,12 +584,12 @@ def _find_matching_algo(self, region): algoStart = int(algo.attrib['start'], base=0) algoSize = int(algo.attrib['size'], base=0) algoEnd = algoStart + algoSize - 1 - + # Check if the region indicated by start..size fits within the algo. if (algoStart <= region.start <= algoEnd) and (algoStart <= region.end <= algoEnd): return algo return None - + def _load_flash_algo(self, filename: str) -> Optional[PackFlashAlgo]: """! @brief Return the PackFlashAlgo instance for the given flash algo filename.""" if self.pack is not None: @@ -605,26 +605,26 @@ def _load_flash_algo(self, filename: str) -> Optional[PackFlashAlgo]: def pack(self): """! @brief The CmsisPack object that defines this device.""" return self._pack - + @property def part_number(self): """! @brief Part number for this device. - + This value comes from either the `Dname` or `Dvariant` attribute, depending on whether the device was created from a `` or `` element. """ return self._part - + @property def vendor(self): """! @brief Vendor or manufacturer name.""" return self._info.families[0].split(':')[0] - + @property def families(self): """! @brief List of families the device belongs to, ordered most generic to least.""" return [f for f in self._info.families[1:]] - + @property def memory_map(self): """! @brief MemoryMap object.""" @@ -632,15 +632,15 @@ def memory_map(self): if self._memory_map is None: self._build_memory_regions() self._build_flash_regions() - + # Warn if there was no boot memory. if not self._saw_startup: LOG.warning("CMSIS-Pack device %s has no identifiable boot memory", self.part_number) - + self._memory_map = MemoryMap(self._regions) - + return self._memory_map - + @property def svd(self): """! @brief File-like object for the device's SVD file. @@ -651,7 +651,7 @@ def svd(self): return self._pack.get_file(svdPath) except (KeyError, IndexError): return None - + @property def default_reset_type(self): """! @brief One of the Target.ResetType enums. @@ -669,10 +669,10 @@ def default_reset_type(self): return Target.ResetType.SW except (KeyError, IndexError): return Target.ResetType.SW - + def __repr__(self): return "<%s@%x %s>" % (self.__class__.__name__, id(self), self.part_number) - - - + + + diff --git a/pyocd/target/pack/flash_algo.py b/pyocd/target/pack/flash_algo.py index 84d1eb7ca..656a0b752 100644 --- a/pyocd/target/pack/flash_algo.py +++ b/pyocd/target/pack/flash_algo.py @@ -41,7 +41,7 @@ class PackFlashAlgo(object): This class is intended to provide easy access to the information provided by a flash algorithm, such as symbols and the flash algorithm itself. - + @sa PackFlashInfo """ @@ -57,7 +57,7 @@ class PackFlashAlgo(object): "EraseChip", "Verify", } - + SECTIONS_TO_FIND = ( ("PrgCode", "SHT_PROGBITS"), ("PrgData", "SHT_PROGBITS"), @@ -106,18 +106,18 @@ def __init__(self, data): def get_pyocd_flash_algo(self, blocksize, ram_region): """! @brief Return a dictionary representing a pyOCD flash algorithm, or None. - + The most interesting operation this method performs is dynamically allocating memory for the flash algo from a given RAM region. Note that the .data and .bss sections are concatenated with .text. That's why there isn't a specific allocation for those sections. - + Double buffering is supported as long as there is enough RAM. - + Memory layout: ``` [stack] [code] [buf1] [buf2] ``` - + @param self @param blocksize The size to use for page buffers, normally the erase block size. @param ram_region A RamRegion object where the flash algo will be allocated. diff --git a/pyocd/target/pack/pack_target.py b/pyocd/target/pack/pack_target.py index 6e299f6f1..c1ac9a4f0 100644 --- a/pyocd/target/pack/pack_target.py +++ b/pyocd/target/pack/pack_target.py @@ -34,7 +34,7 @@ class ManagedPacks(object): """! @brief Namespace for managed CMSIS-Pack utilities. - + By managed, we mean managed by the cmsis-pack-manager package. All the methods on this class apply only to those packs managed by cmsis-pack-manager, not any targets from packs specified by the user. @@ -52,7 +52,7 @@ def get_installed_packs(cache=None): for pack in cache.packs_for_devices(cache.index.values()): # Generate full path to the .pack file. pack_path = os.path.join(cache.data_path, pack.get_pack_name()) - + # If the .pack file exists, the pack is installed. if os.path.isfile(pack_path): results.append(pack) diff --git a/pyocd/tools/gdb_server.py b/pyocd/tools/gdb_server.py index 497f8f2dd..f29c71905 100644 --- a/pyocd/tools/gdb_server.py +++ b/pyocd/tools/gdb_server.py @@ -257,7 +257,7 @@ def run(self, args=None): if not self.args.no_deprecation_warning: LOG.warning("pyocd-gdbserver is deprecated; please use the new combined pyocd tool.") - + self.process_commands(self.args.commands) gdb = None @@ -271,7 +271,7 @@ def run(self, args=None): # Build dict of session options. sessionOptions = convert_session_options(self.args.option) sessionOptions.update(self.gdb_server_settings) - + session = ConnectHelper.session_with_chosen_probe( config_file=self.args.config, no_config=self.args.no_config, @@ -290,7 +290,7 @@ def run(self, args=None): for core_number, core in session.board.target.cores.items(): if isinstance(session.board.target.cores[core_number], GenericMemAPTarget): continue - + gdb = GDBServer(session, core=core_number) # Only subscribe to the server for the first core, so echo messages aren't printed # multiple times. diff --git a/pyocd/tools/lists.py b/pyocd/tools/lists.py index 6c82028fd..9e851f729 100644 --- a/pyocd/tools/lists.py +++ b/pyocd/tools/lists.py @@ -31,7 +31,7 @@ class ListGenerator(object): @staticmethod def list_probes(): """! @brief Generate dictionary with info about the connected debug probes. - + Output version history: - 1.0, initial version """ @@ -71,7 +71,7 @@ def list_probes(): @staticmethod def list_boards(name_filter=None): """! @brief Generate dictionary with info about supported boards. - + Output version history: - 1.0, initial version - 1.1, added is_target_builtin and is_target_supported keys @@ -112,7 +112,7 @@ def list_boards(name_filter=None): @staticmethod def list_targets(name_filter=None, vendor_filter=None, source_filter=None): """! @brief Generate dictionary with info about all supported targets. - + Output version history: - 1.0, initial version - 1.1, added part_families @@ -136,19 +136,19 @@ def list_targets(name_filter=None, vendor_filter=None, source_filter=None): # Filter by name. if name_filter and name_filter not in name.lower(): continue - + s = Session(None) # Create empty session t = TARGET[name](s) - + # Filter by vendor. if vendor_filter and vendor_filter not in t.vendor.lower(): continue - + # Filter by source. source = 'pack' if hasattr(t, '_pack_device') else 'builtin' if source_filter and source_filter != source: continue - + d = { 'name' : name, 'vendor' : t.vendor, @@ -161,7 +161,7 @@ def list_targets(name_filter=None, vendor_filter=None, source_filter=None): if isinstance(svdPath, str) and os.path.exists(svdPath): d['svd_path'] = svdPath targets.append(d) - + if not source_filter or source_filter == 'pack': # Add targets from cmsis-pack-manager cache. for dev in pack_target.ManagedPacks.get_installed_targets(): @@ -183,11 +183,11 @@ def list_targets(name_filter=None, vendor_filter=None, source_filter=None): pass return obj - + @staticmethod def list_plugins(): """! @brief Generate dictionary with lists of available plugins. - + Output version history: - 1.0, initial version with debug probe and RTOS plugins """ @@ -204,7 +204,7 @@ def list_plugins(): 'status': 0, 'plugins': plugin_groups_list, } - + # Add plugins info for group_name in plugin_groups: plugin_list = [] @@ -212,7 +212,7 @@ def list_plugins(): 'plugin_type': group_name, 'plugins': plugin_list, } - + for entry_point in pkg_resources.iter_entry_points(group_name): klass = entry_point.load() plugin = klass() @@ -224,13 +224,13 @@ def list_plugins(): } plugin_list.append(info) plugin_groups_list.append(group_info) - + return obj - + @staticmethod def list_features(): """! @brief Generate dictionary with info about supported features and options. - + Output version history: - 1.1, added 'plugins' feature - 1.0, initial version @@ -249,11 +249,11 @@ def list_features(): ], 'options' : options_list, } - + # Add plugins plugins = ListGenerator.list_plugins() plugins_list.extend(plugins['plugins']) - + # Add options for option_name in options.OPTIONS_INFO.keys(): info = options.OPTIONS_INFO[option_name] @@ -270,5 +270,5 @@ def list_features(): types_list = [info.type.__name__] option_dict['type'] = types_list options_list.append(option_dict) - + return obj diff --git a/pyocd/trace/events.py b/pyocd/trace/events.py index 7c009fe2c..419047624 100644 --- a/pyocd/trace/events.py +++ b/pyocd/trace/events.py @@ -19,15 +19,15 @@ class TraceEvent(object): def __init__(self, desc="", ts=0): self._desc = desc self._timestamp = ts - + @property def timestamp(self): return self._timestamp - + @timestamp.setter def timestamp(self, ts): self._timestamp = ts - + def __str__(self): return "[{}] {}".format(self._timestamp, self._desc) @@ -44,11 +44,11 @@ class TraceTimestamp(TraceEvent): def __init__(self, tc, ts=0): super(TraceTimestamp, self).__init__("timestamp", ts) self._tc = 0 - + @property def tc(self): return self._tc - + def __str__(self): return "[{}] local timestamp TC={:#x} {}".format(self._timestamp, self.tc, self.timestamp) @@ -59,19 +59,19 @@ def __init__(self, port, data, width, ts=0): self._port = port self._data = data self._width = width - + @property def port(self): return self._port - + @property def data(self): return self._data - + @property def width(self): return self._width - + def __str__(self): width = self.width if width == 1: @@ -94,11 +94,11 @@ class TraceEventCounter(TraceEvent): def __init__(self, counterMask, ts=0): super(TraceEventCounter, self).__init__("exception", ts) self._mask = counterMask - + @property def counter_mask(self): return self._mask - + def _get_event_desc(self, evt): msg = "" if evt & TraceEventCounter.CYC_MASK: @@ -114,7 +114,7 @@ def _get_event_desc(self, evt): if evt & TraceEventCounter.CPI_MASK: msg += " CPI" return msg - + def __str__(self): return "[{}] DWT: Event:{}".format(self.timestamp, self._get_event_desc(self.counter_mask)) @@ -129,25 +129,25 @@ class TraceExceptionEvent(TraceEvent): EXITED : "Exited", RETURNED : "Returned" } - + def __init__(self, exceptionNumber, exceptionName, action, ts=0): super(TraceExceptionEvent, self).__init__("exception", ts) self._number = exceptionNumber self._name = exceptionName self._action = action - + @property def exception_number(self): return self._number - + @property def exception_name(self): return self._name - + @property def action(self): return self._action - + def __str__(self): action = TraceExceptionEvent.ACTION_DESC.get(self.action, "") return "[{}] DWT: Exception #{:d} {} {}".format(self.timestamp, self.exception_number, action, self.exception_name) @@ -161,13 +161,13 @@ def __init__(self, pc, ts=0): @property def pc(self): return self._pc - + def __str__(self): return "[{}] DWT: PC={:#010x}".format(self.timestamp, self.pc) class TraceDataTraceEvent(TraceEvent): """! @brief DWT data trace event. - + Valid combinations: - PC value. - Bits[15:0] of a data address. @@ -191,23 +191,23 @@ def comparator(self): @property def pc(self): return self._pc - + @property def address(self): return self._addr - + @property def value(self): return self._value - + @property def is_read(self): return self._rnw - + @property def transfer_size(self): return self._sz - + def __str__(self): hasPC = self.pc is not None hasAddress = self.address is not None diff --git a/pyocd/trace/sink.py b/pyocd/trace/sink.py index f236fa94c..c909259b6 100644 --- a/pyocd/trace/sink.py +++ b/pyocd/trace/sink.py @@ -27,20 +27,20 @@ def receive(self, event): class TraceEventFilter(TraceEventSink): """! @brief Abstract interface for a trace event filter.""" - + def __init__(self, sink=None): self._sink = sink def connect(self, sink): """! @brief Connect the downstream trace sink or filter.""" self._sink = sink - + def receive(self, event): """! @brief Handle a single trace event. - + Passes the event through the filter() method. If one or more objects are returned, they are then passed to the trace sink connected to this filter (which may be another filter). - + @param self @param event An instance of TraceEvent or one of its subclasses. """ @@ -51,10 +51,10 @@ def receive(self, event): self._sink.receive(event_item) else: self._sink.receive(event) - + def filter(self, event): """! @brief Filter a single trace event. - + @param self @param event An instance of TraceEvent or one of its subclasses. @return Either None, a single TraceEvent, or a sequence of TraceEvents. @@ -63,13 +63,13 @@ def filter(self, event): class TraceEventTee(TraceEventSink): """! @brief Trace event sink that replicates events to multiple sinks.""" - + def __init__(self): self._sinks = [] def connect(self, sinks): """! @brief Connect one or more downstream trace sinks. - + @param self @param sinks If this parameter is a single object, it will be added to the list of downstream trace event sinks. If it is an iterable (list, tuple, etc.), then it will @@ -82,7 +82,7 @@ def connect(self, sinks): def receive(self, event): """! @brief Replicate a single trace event to all connected downstream trace event sinks. - + @param self @param event An instance of TraceEvent or one of its subclasses. """ diff --git a/pyocd/trace/swo.py b/pyocd/trace/swo.py index 62cbc27c8..5939ab020 100644 --- a/pyocd/trace/swo.py +++ b/pyocd/trace/swo.py @@ -19,12 +19,12 @@ class SWOParser(object): """! @brief SWO data stream parser. - + Processes a stream of SWO data and generates TraceEvent objects. SWO data is passed to the parse() method. It processes the data and creates TraceEvent objects which are passed to an event sink object that is a subclass of TraceEventSink. The event sink must either be provided when the SWOParser is constructed, or can be set using the connect() method. - + A SWOParser instance can be reused for multiple SWO sessions. If a break in SWO data streaming occurs, the reset() method should be called before passing further data to parse(). """ @@ -32,18 +32,18 @@ def __init__(self, core, sink=None): self.reset() self._core = core self._sink = sink - + def reset(self): self._bytes_parsed = 0 self._itm_page = 0 self._timestamp = 0 self._pending_events = [] self._pending_data_trace = None - + # Get generator instance and prime it. self._parser = self._parse() next(self._parser) - + def connect(self, sink): """! @brief Connect the downstream trace sink or filter.""" self._sink = sink @@ -55,26 +55,26 @@ def bytes_parsed(self): def parse(self, data): """! @brief Process SWO data. - + This method will return once the provided data is consumed, and can be called again when more data is available. There is no minimum or maximum limit on the size of the provided data. As trace events are identified during parsing, they will be passed to the event sink object passed into the constructor or connect(). - + @param self @param data A sequence of integer byte values, usually a bytearray. """ for value in data: self._parser.send(value) self._bytes_parsed += 1 - + def _flush_events(self): """! @brief Send all pending events to event sink.""" if self._sink is not None: for event in self._pending_events: self._sink.receive(event) self._pending_events = [] - + def _merge_data_trace_events(self, event): """! @brief Look for pairs of data trace events and merge.""" if isinstance(event, events.TraceDataTraceEvent): @@ -105,21 +105,21 @@ def _merge_data_trace_events(self, event): self._pending_events.append(self._pending_data_trace) self._pending_data_trace = None return False - + def _send_event(self, event): """! @brief Process event objects and decide when to send to event sink. - + This method handles the logic to associate a timestamp event with the prior other event. A list of pending events is built up until either a timestamp or overflow event is generated, at which point all pending events are flushed to the event sink. If a timestamp is seen, the timestamp of all pending events is set prior to flushing. """ flush = False - + # Handle merging data trace events. if self._merge_data_trace_events(event): return - + if isinstance(event, events.TraceTimestamp): for ev in self._pending_events: ev.timestamp = event.timestamp @@ -128,13 +128,13 @@ def _send_event(self, event): self._pending_events.append(event) if isinstance(event, events.TraceOverflow): flush = True - + if flush: self._flush_events() - + def _parse(self): """! @brief SWO parser as generator function coroutine. - + The generator yields every time it needs a byte of SWO data. The caller must use the generator's send() method to provide the next byte. """ @@ -143,7 +143,7 @@ def _parse(self): while True: byte = yield hdr = byte - + # Sync packet. if hdr == 0: packets = 0 @@ -218,18 +218,18 @@ def _parse(self): elif l == 2: byte1 = yield byte2 = yield - payload = (byte1 | + payload = (byte1 | (byte2 << 8)) else: byte1 = yield byte2 = yield byte3 = yield byte4 = yield - payload = (byte1 | + payload = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24)) - + # Instrumentation packet. if (hdr & 0x4) == 0: port = (self._itm_page * 32) + a @@ -248,7 +248,7 @@ def _parse(self): else: invalid = True # Periodic PC - elif a == 2: + elif a == 2: # A payload of 0 indicates a period PC sleep event. self._send_event(events.TracePeriodicPC(payload, timestamp)) # Data trace @@ -270,6 +270,6 @@ def _parse(self): # Invalid DWT 'a' value. else: invalid = True - + diff --git a/pyocd/trace/swv.py b/pyocd/trace/swv.py index df5963655..0b3e89093 100644 --- a/pyocd/trace/swv.py +++ b/pyocd/trace/swv.py @@ -34,14 +34,14 @@ class SWVEventSink(TraceEventSink): """! @brief Trace event sink that converts ITM packets to a text stream.""" - + def __init__(self, console): """! @brief Constructor. @param self @param console File-like object to which SWV data will be written. """ self._console = console - + def receive(self, event): """! @brief Handle an SWV trace event. @param self @@ -82,32 +82,32 @@ def __init__(self, session, core_number=0, lock=None): self._shutdown_event = threading.Event() self._swo_clock = 0 self._lock = lock - + self._session.subscribe(self._reset_handler, Target.Event.POST_RESET, self._session.target.cores[core_number]) - + def init(self, sys_clock, swo_clock, console): """! @brief Configures trace graph and starts thread. - + This method performs all steps required to start up SWV. It first calls the target's trace_start() method, which allows for target-specific trace initialization. Then it configures the TPIU and ITM modules. A simple trace data processing graph is created that connects an SWVEventSink with a SWOParser. Finally, the reader thread is started. - + If the debug probe does not support SWO, a warning is printed but nothing else is done. - + @param self @param sys_clock @param swo_clock @param console """ self._swo_clock = swo_clock - + if DebugProbe.Capability.SWO not in self._session.probe.capabilities: LOG.warning("Probe %s does not support SWO", self._session.probe.unique_id) return - + self._session.target.trace_start() - + itm = self._session.target.get_first_child_of_type(ITM) tpiu = self._session.target.get_first_child_of_type(TPIU) @@ -124,15 +124,15 @@ def init(self, sys_clock, swo_clock, console): self._parser = SWOParser(self._session.target.cores[self._core_number]) self._sink = SWVEventSink(console) self._parser.connect(self._sink) - + self.start() - + def stop(self): """! @brief Stops processing SWV data. - + The reader thread is terminated first, then the ITM is disabled. The last step is to call the target's trace_stop() method. - + Does nothing if the init() method did not complete successfully. """ if not self.is_alive(): @@ -143,12 +143,12 @@ def stop(self): itm = self._session.target.get_first_child_of_type(ITM) itm.disable() - + self._session.target.trace_stop() - + def run(self): """! @brief SWV reader thread routine. - + Starts the probe receiving SWO data by calling DebugProbe.swo_start(). For as long as the thread runs, it reads SWO data from the probe and passes it to the SWO parser created in init(). When the thread is signaled to stop, it calls DebugProbe.swo_stop() before exiting. @@ -170,14 +170,14 @@ def run(self): except exceptions.ProbeError: pass self._session.probe.swo_start(self._swo_clock) - + while not self._shutdown_event.is_set(): data = self._session.probe.swo_read() if data: if swv_raw_server: swv_raw_server.write(data) self._parser.parse(data) - + if self._lock: self._lock.release() @@ -185,18 +185,18 @@ def run(self): if self._lock: self._lock.acquire() - + self._session.probe.swo_stop() if swv_raw_server: swv_raw_server.stop() - + if self._lock: self._lock.release() - + def _reset_handler(self, notification): """! @brief Reset notification handler. - + If the target is reset while the SWV reader is running, then the Target::trace_start() method is called to reinit trace output. """ diff --git a/pyocd/utility/autoflush.py b/pyocd/utility/autoflush.py index 899bf05b5..2330eda01 100644 --- a/pyocd/utility/autoflush.py +++ b/pyocd/utility/autoflush.py @@ -18,24 +18,24 @@ class Autoflush(object): """! @brief Context manager for performing flushes. - + Pass a Target instance to the constructor, and when the context exits, the target will be automatically flushed. If a TransferError or subclass, such as TransferFaultError, is raised within the context, then the flush will be skipped. - + The parameter passed to the constructor can actually be any object with a `flush()` method, due to Python's dynamic dispatch. """ - + def __init__(self, target): """! @brief Constructor. - + @param self The object. @param target Object on which the flush will be performed. Normally this is a Target instance. """ self._target = target - + def __enter__(self): return self diff --git a/pyocd/utility/cmdline.py b/pyocd/utility/cmdline.py index 3e56bd5c9..62ec13da4 100644 --- a/pyocd/utility/cmdline.py +++ b/pyocd/utility/cmdline.py @@ -53,7 +53,7 @@ def split_command_line(cmd_line: Union[str, List[str]]) -> List[str]: def convert_vector_catch(vcvalue: Union[str, bytes]) -> int: """! @brief Convert a vector catch string to a mask. - + @exception ValueError Raised if an invalid vector catch character is encountered. """ # Make case insensitive. @@ -84,14 +84,14 @@ def convert_session_options(option_list: Iterable[str]) -> Dict[str, Any]: else: name = o.strip().lower() value = None - + # Check for and strip "no-" prefix before we validate the option name. if (value is None) and (name.startswith('no-')): name = name[3:] had_no_prefix = True else: had_no_prefix = False - + # Look for this option. try: info = OPTIONS_INFO[name] @@ -121,7 +121,7 @@ def convert_session_options(option_list: Iterable[str]) -> Dict[str, Any]: except ValueError: LOG.warning("invalid value for option '%s'", name) continue - + options[name] = value return options diff --git a/pyocd/utility/columns.py b/pyocd/utility/columns.py index a6e9d8908..833d4c52e 100644 --- a/pyocd/utility/columns.py +++ b/pyocd/utility/columns.py @@ -22,13 +22,13 @@ class ColumnFormatter(object): """! @brief Formats a set of values in multiple columns. - + The value_list must be a list of bi-tuples (name, value) sorted in the desired display order. - + The number of columns will be determined by the terminal width and maximum value width. The values will be printed in column major order. """ - + def __init__(self, maxwidth=None, inset=2): """! @brief Constructor. @param self The object. @@ -42,19 +42,19 @@ def __init__(self, maxwidth=None, inset=2): self._items = [] self._max_name_width = 0 self._max_value_width = 0 - + def add_items(self, item_list): """! @brief Add items to the output. @param self The object. @param item_list Must be a list of bi-tuples (name, value) sorted in the desired display order. """ self._items.extend(item_list) - + # Update max widths. for name, value in item_list: self._max_name_width = max(self._max_name_width, len(name)) self._max_value_width = max(self._max_value_width, len(value)) - + def format(self): """! @brief Return the formatted columns as a string. @param self The object. @@ -63,7 +63,7 @@ def format(self): item_width = self._max_name_width + self._max_value_width + self._inset * 2 + 2 column_count = self._term_width // item_width row_count = (len(self._items) + column_count - 1) // column_count - + rows = [[i for i in self._items[r::row_count]] for r in range(row_count)] @@ -77,7 +77,7 @@ def format(self): inset=(" " * self._inset)) txt += "\n" return txt - + def write(self, output_file=None): """! @brief Write the formatted columns to stdout or the specified file. @param self The object. @@ -87,5 +87,5 @@ def write(self, output_file=None): if output_file is None: output_file = sys.stdout output_file.write(self.format()) - + diff --git a/pyocd/utility/concurrency.py b/pyocd/utility/concurrency.py index 9ba5448dc..d92e657f0 100644 --- a/pyocd/utility/concurrency.py +++ b/pyocd/utility/concurrency.py @@ -18,7 +18,7 @@ def locked(func): """! @brief Decorator to automatically lock a method of a class. - + The class is required to have `lock()` and `unlock()` methods. """ @wraps(func) diff --git a/pyocd/utility/conversion.py b/pyocd/utility/conversion.py index be3bd8edf..bcdfd6314 100644 --- a/pyocd/utility/conversion.py +++ b/pyocd/utility/conversion.py @@ -23,10 +23,10 @@ def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00): """! @brief Convert a list of bytes to a list of n-bit integers (little endian) - + If the length of the data list is not a multiple of `bitwidth` // 8, then the pad value is used for the additional required bytes. - + @param data List of bytes. @param bitwidth Width in bits of the resulting values. @param pad Optional value used to pad input data if not aligned to the bitwidth. @@ -46,7 +46,7 @@ def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00): def nbit_le_list_to_byte_list(data, bitwidth): """! @brief Convert a list of n-bit values into a byte list. - + @param data List of n-bit values. @param bitwidth Width in bits of the input vales. @result List of integer bytes. @@ -55,7 +55,7 @@ def nbit_le_list_to_byte_list(data, bitwidth): def byte_list_to_u32le_list(data, pad=0x00): """! @brief Convert a list of bytes to a list of 32-bit integers (little endian) - + If the length of the data list is not a multiple of 4, then the pad value is used for the additional required bytes. """ @@ -118,7 +118,7 @@ def float64_to_u64(data): def uint_to_hex_le(value, width): """! @brief Create an n-digit hexadecimal string from an integer value. @param value Integer value to format. - @param width The width in bits. + @param width The width in bits. @return A string with the number of hex bytes required to fit `width` bits, rounded up to the next whole byte. The bytes represent `value` in little-endian order. That is, the first hex byte contains the LSB of `value`, while the last hex byte the MSB. diff --git a/pyocd/utility/graph.py b/pyocd/utility/graph.py index a9a0a9645..9677e58c8 100644 --- a/pyocd/utility/graph.py +++ b/pyocd/utility/graph.py @@ -17,9 +17,9 @@ class GraphNode(object): """! @brief Simple graph node. - + All nodes have a parent, which is None for a root node, and zero or more children. - + Supports indexing and iteration over children. """ @@ -28,34 +28,34 @@ def __init__(self): super(GraphNode, self).__init__() self._parent = None self._children = [] - + @property def parent(self): """! @brief This node's parent in the object graph.""" return self._parent - + @property def children(self): """! @brief Child nodes in the object graph.""" return self._children - + @property def is_leaf(self): """! @brief Returns true if the node has no children.""" return len(self.children) == 0 - + def add_child(self, node): """! @brief Link a child node onto this object.""" node._parent = self self._children.append(node) - + def find_root(self): """! @brief Returns the root node of the object graph.""" root = self while root.parent is not None: root = root.parent return root - + def find_children(self, predicate, breadth_first=True): """! @brief Recursively search for children that match a given predicate. @param self @@ -76,14 +76,14 @@ def _search(node, klass): results.extend(_search(child, klass)) elif breadth_first: childrenToExamine.append(child) - + if breadth_first: for child in childrenToExamine: results.extend(_search(child, klass)) return results - + return _search(self, predicate) - + def get_first_child_of_type(self, klass): """! @brief Breadth-first search for a child of the given class. @param self @@ -97,33 +97,33 @@ def get_first_child_of_type(self, klass): return matches[0] else: return None - + def __getitem__(self, key): """! @brief Returns the indexed child. - + Slicing is supported. """ return self._children[key] - + def __iter__(self): """! @brief Iterate over the node's children.""" return iter(self.children) - + def _dump_desc(self): """! @brief Similar to __repr__ by used for dump_to_str().""" return str(self) - + def dump_to_str(self): """! @brief Returns a string describing the object graph.""" - + def _dump(node, level): result = (" " * level) + "- " + node._dump_desc() + "\n" for child in node.children: result += _dump(child, level + 1) return result - + return _dump(self, 0) - + def dump(self): """! @brief Pretty print the object graph to stdout.""" print(self.dump_to_str()) diff --git a/pyocd/utility/hex.py b/pyocd/utility/hex.py index 3ceb858b9..4fe7e73a4 100644 --- a/pyocd/utility/hex.py +++ b/pyocd/utility/hex.py @@ -25,7 +25,7 @@ def format_hex_width(value, width): """! @brief Formats the value as hex of the specified bit width. - + @param value Integer value to be formatted. @param width Bit width, must be one of 8, 16, 32, 64. @return String with (width / 8) hex digits. Does not have a "0x" prefix. @@ -43,27 +43,27 @@ def format_hex_width(value, width): def dump_hex_data(data, start_address=0, width=8, output=None, print_ascii=True): """! @brief Prints a canonical hex dump of the given data. - + Each line of the output consists of an address column, the data as hex, and a printable ASCII representation of the data. - + The @a width parameter controls grouping of the hex bytes in the output. The bytes of the provided data are progressively read as little endian values of the specified bit width, then printed at that width. For example, for input data of [0x61 0x62 0x63 0x64], if @width is set to 8 the output will be "61 62 63 64", for 16 it will be printed as "6261 6463", and for 32 bit width it will be shown as "64636261". A space is inserted after each bit-width value, with an extra space every 4 bytes for 8 bit width. - + The output looks similar to this (width of 8): ``` 00000000: 85 89 70 0f 20 b1 ff bc a9 0c c8 3c bc a6 47 dd ..p. ......<..G. 00000010: c8 c9 66 ab 59 c8 35 6c 57 94 00 c8 17 35 85 b2 ..f.Y.5lW....5.. ``` - + The output is always terminated with a newline. - + If you want a string instead of output to a file, use the dump_hex_data_to_str() function. - + @param data The data to print as hex. Can be a `bytes`, `bytearray`, or list of integers. @param start_address Address of the first byte of the data. Defaults to 0. If set to None, then the address column is not printed. @@ -126,7 +126,7 @@ def line_width_in_chars(elements: int) -> int: d.reverse() s += "".join((chr(b) if (chr(b) in _PRINTABLE) else '.') for b in d) output.write(" " * (max_line_width - actual_line_width) + " " + s + "|") - + output.write("\n") def dump_hex_data_to_str(data, **kwargs): diff --git a/pyocd/utility/mask.py b/pyocd/utility/mask.py index b6334e671..cb004b347 100644 --- a/pyocd/utility/mask.py +++ b/pyocd/utility/mask.py @@ -19,20 +19,20 @@ def bitmask(*args): """! @brief Returns a mask with specified bit ranges set. - + An integer mask is generated based on the bits and bit ranges specified by the arguments. Any number of arguments can be provided. Each argument may be either a 2-tuple of integers, a list of integers, or an individual integer. The result is the combination of masks produced by the arguments. - + - 2-tuple: The tuple is a bit range with the first element being the MSB and the second element the LSB. All bits from LSB up to and included MSB are set. - list: Each bit position specified by the list elements is set. - int: The specified bit position is set. - + @return An integer mask value computed from the logical OR'ing of masks generated by each argument. - + Example: @code >>> hex(bitmask((23,17),1)) @@ -56,7 +56,7 @@ def bitmask(*args): def bit_invert(value, width=32): """! @brief Return the bitwise inverted value of the argument given a specified width. - + @param value Integer value to be inverted. @param width Bit width of both the input and output. If not supplied, this defaults to 32. @return Integer of the bitwise inversion of @a value. @@ -91,11 +91,11 @@ def __init__(self, msb, lsb=None, name=None): self._lsb = lsb if (lsb is not None) else msb self._name = name assert self._msb >= self._lsb - + @property def width(self): return self._msb - self._lsb + 1 - + def get(self, value): """! @brief Extract the bitfield value from a register value. @param self The Bitfield object. @@ -103,7 +103,7 @@ def get(self, value): @return Integer value of the bitfield extracted from `value`. """ return bfx(value, self._msb, self._lsb) - + def set(self, register_value, field_value): """! @brief Modified the bitfield in a register value. @param self The Bitfield object. @@ -112,7 +112,7 @@ def set(self, register_value, field_value): @return Integer register value with the bitfield updated to `field_value`. """ return bfi(register_value, self._msb, self._lsb, field_value) - + def __repr__(self): return "<{}@{:x} name={} {}:{}>".format(self.__class__.__name__, id(self), self._name, self._msb, self._lsb) @@ -126,7 +126,7 @@ def msb(n): def same(d1, d2): """! @brief Test whether two sequences contain the same values. - + Unlike a simple equality comparison, this function works as expected when the two sequences are of different types, such as a list and bytearray. The sequences must return compatible types from indexing. diff --git a/pyocd/utility/notification.py b/pyocd/utility/notification.py index 129113455..7c0a2c349 100644 --- a/pyocd/utility/notification.py +++ b/pyocd/utility/notification.py @@ -46,17 +46,17 @@ def __repr__(self): class Notifier(object): """!@brief Mix-in class that provides notification broadcast capabilities. - + In this notification model, subscribers register callbacks for one or more events. The events are simply a Python object of any kind, as long as it is hashable. Typically integers or Enums are used. Subscriptions can be registered for any sender of an event, or be filtered by the sender (called the source). - + When a notification is sent to the callback, it is wrapped up as a Notification object. Along with the notification, an optional, arbitrary data value can be sent. This allows for further specifying the event, or passing related values (or anything else you can think of). """ - + def __init__(self): ## Dict of subscribers for particular events and sources. # @@ -75,7 +75,7 @@ def __init__(self): def subscribe(self, cb, events, source=None): """!@brief Subscribe to selection of events from an optional source. - + @param self @param cb The callable that will be invoked when a matching notification is sent. Must accept a single parameter, a Notification instance. @@ -87,12 +87,12 @@ def subscribe(self, cb, events, source=None): """ if not isinstance(events, (tuple, list, set)): events = [events] - + for event in events: if event not in self._subscribers: self._subscribers[event] = ([], {}) event_info = self._subscribers[event] - + if source is None: event_info[0].append(cb) else: @@ -102,7 +102,7 @@ def subscribe(self, cb, events, source=None): def unsubscribe(self, cb, events=None): """!@brief Remove a callback from the subscribers list. - + @param self @param cb The callback to remove from all subscriptions. @param events Optional. May be a single event or an iterable of events. If specified, the @@ -110,16 +110,16 @@ def unsubscribe(self, cb, events=None): """ if (events is not None) and (not isinstance(events, (tuple, list, set))): events = [events] - + for event, event_info in self._subscribers.items(): # Skip this event if it's not one on the removal list. if (events is not None) and (event not in events): continue - + # Remove callback from all-sources list. if cb in event_info[0]: event_info[0].remove(cb) - + # Scan source-specific subscribers. for source_info in event_info[1].values(): if cb in source_info: @@ -127,7 +127,7 @@ def unsubscribe(self, cb, events=None): def notify(self, event, source=None, data=None): """!@brief Notify subscribers of an event. - + @param self @param event Event to send. Must be a hashable object. It is acceptable to notify for an event for which there are no subscribers. @@ -142,26 +142,26 @@ def notify(self, event, source=None, data=None): # Nobody has subscribed to this event, so nothing to do. TRACE.debug("Not sending notification because no subscribers: event=%s", event) return - + # Look up subscribers for this event + source combo. try: source_subscribers = event_info[1][source] except KeyError: # No source-specific subscribers. source_subscribers = [] - + # Create combined subscribers list. Exit if no subscribers matched. subscribers = event_info[0] + source_subscribers if not subscribers: TRACE.debug("Not sending notification because no matching subscribers: event=%s", event) return - + # Create the notification object now that we know there are some subscribers. if source is None: source = self note = Notification(event, source, data) TRACE.debug("Sending notification to %d subscribers: %s", len(subscribers), note) - + # Tell everyone! for cb in subscribers: cb(note) diff --git a/pyocd/utility/progress.py b/pyocd/utility/progress.py index 1d95c34c6..4144f3519 100644 --- a/pyocd/utility/progress.py +++ b/pyocd/utility/progress.py @@ -23,7 +23,7 @@ class ProgressReport(object): """! @brief Base progress report class. - + This base class implements the logic but no output. """ def __init__(self, file=None): @@ -32,11 +32,11 @@ def __init__(self, file=None): self.backwards_progress = False self.done = False self.last = 0 - + def __call__(self, progress): assert progress >= 0.0 - # Cap progress at 1.0. + # Cap progress at 1.0. if progress > 1.0: progress = 1.0 LOG.debug("progress out of bounds: %.3f", progress) @@ -60,7 +60,7 @@ def __call__(self, progress): self._finish() if self.backwards_progress: LOG.debug("Progress went backwards!") - + def _start(self): self.prev_progress = 0 self.backwards_progress = False @@ -76,14 +76,14 @@ def _finish(self): class ProgressReportTTY(ProgressReport): """! @brief Progress report subclass for TTYs. - + The progress bar is fully redrawn onscreen as progress is updated to give the impression of animation. """ ## These width constants can't be changed yet without changing the code below to match. WIDTH = 50 - + def _update(self, progress): self._file.write('\r') i = int(progress * self.WIDTH) @@ -96,7 +96,7 @@ def _finish(self): class ProgressReportNoTTY(ProgressReport): """! @brief Progress report subclass for non-TTY output. - + A simpler progress bar is used than for the TTY version. Only the difference between the previous and current progress is drawn for each update, making the output suitable for piping to a file or similar output. @@ -104,7 +104,7 @@ class ProgressReportNoTTY(ProgressReport): ## These width constants can't be changed yet without changing the code below to match. WIDTH = 40 - + def _start(self): super(ProgressReportNoTTY, self)._start() @@ -125,14 +125,14 @@ def _finish(self): def print_progress(file=None): """! @brief Progress printer factory. - + This factory function checks whether the output file is a TTY, and instantiates the appropriate subclass of ProgressReport. - + @param file The output file. Optional. If not provided, or if set to None, then sys.stdout will be used automatically. """ - + if file is None: file = sys.stdout try: @@ -141,7 +141,7 @@ def print_progress(file=None): # Either the file doesn't have a fileno method, or calling it returned an # error. In either case, just assume we're not connected to a TTY. istty = False - + klass = ProgressReportTTY if istty else ProgressReportNoTTY return klass(file) diff --git a/pyocd/utility/sequencer.py b/pyocd/utility/sequencer.py index c5a69008d..499983a1d 100644 --- a/pyocd/utility/sequencer.py +++ b/pyocd/utility/sequencer.py @@ -22,21 +22,21 @@ class CallSequence(object): """! @brief Call sequence manager. - + Contains an ordered sequence of tasks. Each task has a name and associated callable. The CallSequence class itself is callable, so instances can be nested as tasks within other CallSequences. - + When tasks within a sequence are called, they may optionally return a new CallSequence instance. If this happens, the new sequence is executed right away, before continuing with the next task in the original sequence. - + A CallSequence can be iterated over. It will return tuples of (task-name, callable). """ def __init__(self, *args): """! @brief Constructor. - + The constructor accepts an arbitrary number of parameters describing an ordered set of tasks. Each parameter must be a 2-tuple with the first element being the task's name and the second element a callable that implements the task. If you @@ -44,64 +44,64 @@ def __init__(self, *args): """ self._validate_tasks(args) self._calls = OrderedDict(args) - + def _validate_tasks(self, tasks): for i in tasks: assert len(i) == 2 assert type(i[0]) is str assert isinstance(i[1], Callable) - + @property def sequence(self): """! @brief Returns an OrderedDict of the call sequence. - + Task names are keys. """ return self._calls - + @sequence.setter def sequence(self, seq): """! @brief Replace the entire call sequence. - + Accepts either an OrderedDict or a list of 2-tuples like the constructor. """ if isinstance(seq, OrderedDict): self._calls = seq elif type(seq) is list and len(seq) and type(seq[0]) is tuple: self._calls = OrderedDict(seq) - + @property def count(self): """! @brief Returns the number of tasks in the sequence.""" return len(self._calls) - + def clear(self): """! @brief Remove all tasks from the sequence.""" self._calls = OrderedDict() - + def copy(self): """! @brief Duplicate the sequence.""" new_seq = CallSequence() new_seq._calls = self._calls.copy() return new_seq - + def remove_task(self, name): """! @brief Remove a task with the given name. @exception KeyError Raised if no task with the specified name exists. """ del self._calls[name] return self - + def has_task(self, name): """! @brief Returns a boolean indicating presence of the named task in the sequence.""" return name in self._calls - + def get_task(self, name): """! @brief Return the callable for the named task. @exception KeyError Raised if no task with the specified name exists. """ return self._calls[name] - + def replace_task(self, name, replacement): """! @brief Change the callable associated with a task.""" assert isinstance(replacement, Callable) @@ -112,10 +112,10 @@ def replace_task(self, name, replacement): # that is already in the dict. self._calls[name] = replacement return self - + def wrap_task(self, name, wrapper): """! @brief Wrap an existing task with a new callable. - + The wrapper is expected to take a single parameter, the return value from the original task. This allows for easy filtering of a new call sequence returned by the original task. @@ -125,15 +125,15 @@ def wrap_task(self, name, wrapper): # Get original callable. orig = self._calls[name] - + # OrderedDict preserves the order when changing the value of a key # that is already in the dict. self._calls[name] = lambda : wrapper(orig()) return self - + def append(self, *args): """! @brief Append a new task or tasks to the sequence. - + Like the constructor, this method takes any number of arguments. Each must be a 2-tuple task description. """ @@ -145,17 +145,17 @@ def append(self, *args): def insert_before(self, beforeTaskName, *args): """! @brief Insert a task or tasks before a named task. - + @param beforeTaskName The name of an existing task. The new tasks will be inserted prior to this task. - + After the task name parameter, any number of task description 2-tuples may be passed. - + @exception KeyError Raised if the named task does not exist in the sequence. """ self._validate_tasks(args) - + if not self.has_task(beforeTaskName): raise KeyError(beforeTaskName) @@ -172,17 +172,17 @@ def insert_before(self, beforeTaskName, *args): def insert_after(self, afterTaskName, *args): """! @brief Insert a task or tasks after a named task. - + @param afterTaskName The name of an existing task. The new tasks will be inserted after this task. - + After the task name parameter, any number of task description 2-tuples may be passed. - + @exception KeyError Raised if the named task does not exist in the sequence. """ self._validate_tasks(args) - + if not self.has_task(afterTaskName): raise KeyError(afterTaskName) @@ -199,30 +199,30 @@ def insert_after(self, afterTaskName, *args): def invoke(self): """! @brief Execute each task in order. - + A task may return a CallSequence, in which case the new sequence is immediately executed. """ for name, call in self._calls.items(): LOG.debug("Running task %s", name) resultSequence = call() - + # Invoke returned call sequence. if resultSequence is not None and isinstance(resultSequence, CallSequence): # LOG.debug("Invoking returned call sequence: %s", resultSequence) resultSequence.invoke() - + def __call__(self, *args, **kwargs): """! @brief Another way to execute the tasks. - + Supports nested CallSequences. """ self.invoke() - + def __iter__(self): """! @brief Iterate over the sequence.""" return iter(self._calls.items()) - + def __repr__(self): s = "<%s@%x: " % (self.__class__.__name__, id(self)) for name, task in self._calls.items(): diff --git a/pyocd/utility/server.py b/pyocd/utility/server.py index b4b8c7605..695c90947 100644 --- a/pyocd/utility/server.py +++ b/pyocd/utility/server.py @@ -26,18 +26,18 @@ class StreamServer(threading.Thread): """! @brief File-like object that serves data over a TCP socket. - + The user can connect to the socket with telnet or netcat. - + The server thread will automatically be started by the constructor. To shut down the server and its thread, call the stop() method. """ - + def __init__(self, port, serve_local_only=True, name=None, is_read_only=True, extra_info=None): """! @brief Constructor. - + Starts the server immediately. - + @param self @param port The TCP/IP port number on which the server should listen. If 0 is passed, then an arbitrary unused port is selected by the OS. In this case, the `port` property @@ -68,7 +68,7 @@ def __init__(self, port, serve_local_only=True, name=None, is_read_only=True, ex self._shutdown_event = threading.Event() self.daemon = True self.start() - + @property def port(self): return self._port diff --git a/pyocd/utility/sockets.py b/pyocd/utility/sockets.py index ccdcb26df..79f79f9c4 100644 --- a/pyocd/utility/sockets.py +++ b/pyocd/utility/sockets.py @@ -79,22 +79,22 @@ def set_timeout(self, timeout): class ClientSocket(object): """! @brief Simple client-side TCP socket. - + Provides a file-like interface to a TCP socket. Blocking and timeout are configurable. """ - + DEFAULT_TIMEOUT = 10.0 - + def __init__(self, host, port, packet_size=4096, timeout=None): self._address = (host, port) self._packet_size = packet_size self._timeout = timeout or self.DEFAULT_TIMEOUT self._socket = None self._buffer = bytearray() - + def connect(self): self._socket = socket.create_connection(self._address, self._timeout) - + def close(self): if self._socket is not None: # Close both ends of the connection, then close the socket itself. @@ -122,7 +122,7 @@ def read(self, packet_size=None): def write(self, data): return self._socket.sendall(data) - + def readline(self): while True: # Try to extract a line from the buffer. diff --git a/pyocd/utility/strings.py b/pyocd/utility/strings.py index c65bf0f42..e37508bab 100644 --- a/pyocd/utility/strings.py +++ b/pyocd/utility/strings.py @@ -19,21 +19,21 @@ class UniquePrefixMatcher: """! @brief Manages detection of shortest unique prefix match of a set of strings.""" - + def __init__(self, items: Optional[Iterable[str]] = None): """! @brief Constructor. @param self This object. @param items Optional sequence of strings. """ self._items = set(items) if (items is not None) else set() - + def add_items(self, items: Iterable[str]) -> None: """! @brief Add some items to be matched. @param self This object. @param items Sequence of strings. """ self._items.update(items) - + def find_all(self, prefix: str) -> Tuple[str, ...]: """! @brief Return all items matching the given prefix. @param self This object. @@ -48,7 +48,7 @@ def find_all(self, prefix: str) -> Tuple[str, ...]: if prefix in self._items: return (prefix,) return tuple(i for i in self._items if i.startswith(prefix)) - + def find_one(self, prefix: str) -> Optional[str]: """! @brief Return the item matching the given prefix, or None. @param self This object. @@ -66,10 +66,10 @@ def find_one(self, prefix: str) -> Optional[str]: def uniquify_name(name: str, others: Sequence[str]) -> str: """@brief Ensure the given name is unique amongst the other provided names. - + If the `name` parameter is not unique, an integer will be appended to it. If the name already ends in an integer, that value will be incremented by 1. - + @param name The name to uniqify. @param others Sequence of other names to compare against. @return A string guaranteed to not be the same as any string contained in `others`. @@ -87,5 +87,5 @@ def uniquify_name(name: str, others: Sequence[str]) -> str: # Update the name with the trailing int incremented. name += str(u_value + 1) - + return name diff --git a/pyocd/utility/timeout.py b/pyocd/utility/timeout.py index e1e5abc20..9577f6721 100644 --- a/pyocd/utility/timeout.py +++ b/pyocd/utility/timeout.py @@ -20,11 +20,11 @@ class Timeout: """! @brief Timeout helper context manager. - + The recommended way to use this class is demonstrated here. It uses an else block on a while loop to handle the timeout. The code in the while loop must use a break statement to exit in the successful case. - + @code with Timeout(5, sleeptime=0.1) as t_o: while t_o.check(): # or "while not t_o.did_time_out" @@ -34,10 +34,10 @@ class Timeout: else: print("Timed out!") @endcode - + Another method of using the class is to check the `did_time_out` property from within the while loop, as shown below. - + @code with Timeout(5) as t_o: while perform_some_test(): @@ -47,14 +47,14 @@ class Timeout: break sleep(0.1) @endcode - + You may also combine the call to check() in the while loop with other boolean expressions related to the operation being performed. - + If you pass a non-zero value for _sleeptime_ to the constructor, the check() method will automatically sleep by default starting with the second call. You can disable auto-sleep by passing `autosleep=False` to check(). - + Passing a timeout of None to the constructor is allowed. In this case, check() will always return True and the loop must be exited via some other means. """ @@ -92,10 +92,10 @@ def start(self) -> None: self._start = time() self._timed_out = False self._is_first_check = True - + def clear(self): """! @brief Reset the timeout back to initial, non-running state. - + The timeout can be made to run again by calling start(). """ self._is_running = False @@ -104,19 +104,19 @@ def clear(self): def check(self, autosleep: bool = True) -> bool: """! @brief Check for timeout and possibly sleep. - + Starting with the second call to this method, it will automatically sleep before returning if: - The timeout has not yet occurred. - A non-zero _sleeptime_ was passed to the constructor. - The _autosleep_ parameter is True. - + This method is intended to be used as the predicate of a while loop. If this method is called prior to the timeout being started (by the start() method or entering it as a context manager) this the return value will always be True (not timeed out). Only after the timeout is running will the elapsed time be tested. - + @param self @param autosleep Whether to sleep if not timed out yet. The sleeptime passed to the constructor must have been non-zero. @@ -131,7 +131,7 @@ def check(self, autosleep: bool = True) -> bool: sleep(self._sleeptime) self._is_first_check = False return not self._timed_out - + @property def is_running(self) -> bool: """! @brief Whether the timeout object has started timing.""" diff --git a/src/gdb_test_program/linker_script.ld b/src/gdb_test_program/linker_script.ld index 3daa5bd6c..49efa80cd 100644 --- a/src/gdb_test_program/linker_script.ld +++ b/src/gdb_test_program/linker_script.ld @@ -39,12 +39,12 @@ SECTIONS . = ALIGN(4); *(.data) /* .data sections */ *(.data*) /* .data* sections */ - + . = ALIGN(4); *(.bss) *(.bss*) *(COMMON) - + . = ALIGN(4); *(.rodata) /* .rodata sections (constants, strings, etc.) */ *(.rodata*) /* .rodata* sections (constants, strings, etc.) */ diff --git a/src/gdb_test_program/main.c b/src/gdb_test_program/main.c index 0935be720..d09cce34c 100644 --- a/src/gdb_test_program/main.c +++ b/src/gdb_test_program/main.c @@ -28,22 +28,22 @@ volatile uint32_t watchpoint_write_buffer[3]; void function_1() { - + } void function_2() { - + } void function_3() { - + } void breakpoint_test() { - + } void watchpoint_test() @@ -75,7 +75,7 @@ void watchpoint_test() int main() { int i; - + // Initialize variables run_breakpoint_test = 0; watchpoint_write = 0; diff --git a/test/automated_test.py b/test/automated_test.py index bcc57ab7d..e18c19f53 100755 --- a/test/automated_test.py +++ b/test/automated_test.py @@ -106,12 +106,12 @@ def split_results_by_board(result_list): def generate_xml_results(result_list): board_results = split_results_by_board(result_list) - + suite_id = 0 total_failures = 0 total_tests = 0 total_time = 0 - + root = ElementTree.Element('testsuites', name="pyocd" ) @@ -127,7 +127,7 @@ def generate_xml_results(result_list): suite.text = "\n" suite.tail = "\n" suite_id += 1 - + for result in results: total += 1 @@ -143,11 +143,11 @@ def generate_xml_results(result_list): total_tests += total total_failures += failures total_time += suite_time - + root.set('tests', str(total_tests)) root.set('failures', str(total_failures)) root.set('time', "%.3f" % total_time) - + xml_results = os.path.join(TEST_OUTPUT_DIR, XML_RESULTS_TEMPLATE.format(get_env_file_name())) ElementTree.ElementTree(root).write(xml_results, encoding="UTF-8", xml_declaration=True) @@ -173,15 +173,15 @@ def print_test_header(output_file, board, test): def test_board(board_id, n, loglevel, logToConsole, commonLogFile): """! @brief Run all tests on a given board. - + When multiple test jobs are being used, this function is the entry point executed in child processes. - + Always writes both stdout and log messages of tests to a board-specific log file, and saves the output for each test to a string that is stored in the TestResult object. Depending on the logToConsole and commonLogFile parameters, output may also be copied to the console (sys.stdout) and/or a common log file for all boards. - + @param board_id Unique ID of the board to test. @param n Unique index of the test run. @param loglevel Log level passed to logger instance. Usually INFO or DEBUG. @@ -202,7 +202,7 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): log_filename = os.path.join(TEST_OUTPUT_DIR, LOG_FILE_TEMPLATE.format(name_info)) if os.path.exists(log_filename): os.remove(log_filename) - + # Skip board if specified in the config. if session.options['skip_test']: print("Skipping board %s due as specified in config" % board.unique_id) @@ -215,7 +215,7 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): # Open board-specific output file. This is done after skipping so a skipped board doesn't have a # log file created for it (but a previous log file will be removed, above). log_file = open(log_filename, "w", buffering=1) # 1=Line buffered - + # Setup logging. log_handler = RecordingLogHandler(None) log_handler.setFormatter(logging.Formatter(LOG_FORMAT)) @@ -234,7 +234,7 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): # Run all tests on this board. for test in test_list: print("{} #{}: starting {}...".format(board.name, n, test.name), file=originalStdout) - + # Set the test number on the test object. Used to get a unique port for the GdbTest. test.n = n @@ -242,7 +242,7 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): print_test_header(log_file, board, test) if commonLogFile is not None: print_test_header(commonLogFile, board, test) - + # Create a StringIO object to record the test's output, an IOTee to copy # output to both the log file and StringIO, then set the log handler and # stdio to write to the tee. @@ -255,7 +255,7 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): log_handler.stream = tee sys.stdout = tee sys.stderr = tee - + test_start = time() result = test.run(board) test_stop = time() @@ -263,7 +263,7 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): tee.flush() result.output = testOutput.getvalue() result_list.append(result) - + passFail = "PASSED" if result.passed else "FAILED" print("{} #{}: finished {}... {} ({:.3f} s)".format( board.name, n, test.name, passFail, result.time), @@ -285,7 +285,7 @@ def filter_tests(args): sys.exit(1) excludes = [t.strip().lower() for t in args.exclude_tests.split(',')] if args.exclude_tests else [] includes = [t.strip().lower() for t in args.include_tests.split(',')] if args.include_tests else [] - + for test in all_tests: if excludes: include_it = (test.name.lower() not in excludes) @@ -293,7 +293,7 @@ def filter_tests(args): include_it = (test.name.lower() in includes) else: include_it = True - + if include_it: test_list.append(test) @@ -308,18 +308,18 @@ def main(): parser.add_argument('-x', '--exclude-tests', metavar="TESTS", default="", help="Comma-separated list of tests to exclude.") parser.add_argument('-i', '--include-tests', metavar="TESTS", default="", help="Comma-separated list of tests to include.") args = parser.parse_args() - + # Allow CI to override the number of concurrent jobs. if 'CI_JOBS' in os.environ: args.jobs = int(os.environ['CI_JOBS']) - + filter_tests(args) - + if args.list_tests: for test in test_list: print(test.name) return - + # Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses # fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec() # to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python @@ -330,7 +330,7 @@ def main(): args.jobs = 1 ensure_output_dir() - + # Setup logging based on concurrency and quiet option. level = logging.DEBUG if args.debug else logging.INFO if args.jobs == 1 and not args.quiet: @@ -350,7 +350,7 @@ def main(): # Put together list of boards to test board_list = ConnectHelper.get_all_connected_probes(blocking=False) board_id_list = sorted(b.unique_id for b in board_list) - + # Filter boards. if args.board: board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())] @@ -364,11 +364,11 @@ def main(): # Create a pool of processes to run tests. try: pool = mp.Pool(args.jobs) - + # Issue board test job to process pool. async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile)) for n, board_id in enumerate(board_id_list)] - + # Gather results. for r in async_results: result_list += r.get(timeout=JOB_TIMEOUT) @@ -383,7 +383,7 @@ def main(): with open(summary_file, "w") as output_file: print_summary(test_list, result_list, test_time, output_file) generate_xml_results(result_list) - + exit_val = 0 if Test.all_tests_pass(result_list) else -1 exit(exit_val) diff --git a/test/basic_test.py b/test/basic_test.py index 550aaf05d..dff819d21 100644 --- a/test/basic_test.py +++ b/test/basic_test.py @@ -53,7 +53,7 @@ class BasicTest(Test): def __init__(self): super(BasicTest, self).__init__("Basic Test", run_basic_test) - + def run_basic_test(board_id): return basic_test(board_id, None) @@ -171,7 +171,7 @@ def basic_test(board_id, file): print("Failed to write range step test code to RAM") else: print("wrote range test step code to RAM successfully") - + target.write_core_register('pc', test_addr) currentPC = target.read_core_register('pc') print("start PC: 0x%X" % currentPC) @@ -288,7 +288,7 @@ def basic_test(board_id, file): data = target.read_memory_block8(address, sector_size) if data != [flash.region.erased_byte_value] * sector_size: print("FAILED to erase sector @ 0x%x (%d bytes)" % (address, sector_size)) - + # Re-verify the 1st and 3rd page were not erased, and that the 2nd page is fully erased did_pass = False for i in range(0, sectors_to_test): diff --git a/test/commander_test.py b/test/commander_test.py index 6dc8e0998..0281ce022 100644 --- a/test/commander_test.py +++ b/test/commander_test.py @@ -66,21 +66,21 @@ def commander_test(board_id): test_count = 0 failed_commands = [] result = CommanderTestResult() - + COMMANDS_TO_TEST = [ # general commands ["continue"], ["status"], ["halt"], ["status"], - + # commander command group - these are not tested by commands_test.py. ["list"], ["exit"], # Must be last command! ] print("\n------ Testing commander ------\n") - + # Set up commander args. args = UserDict() args.no_init = False @@ -96,7 +96,7 @@ def commander_test(board_id): args.unique_id = board_id args.target_override = None args.elf = GDB_TEST_ELF - + test_count += 1 try: cmdr = PyOCDCommander(args, COMMANDS_TO_TEST) @@ -106,7 +106,7 @@ def commander_test(board_id): except Exception: print("TEST FAILED") traceback.print_exc() - + test_count += 1 print("Testing exit code") print("Exit code:", cmdr.exit_code) diff --git a/test/commands_test.py b/test/commands_test.py index bb1d7f899..c6e94baad 100644 --- a/test/commands_test.py +++ b/test/commands_test.py @@ -81,7 +81,7 @@ def commands_test(board_id): temp_test_hex_name = binary_to_hex_file(binary_file, boot_region.start) temp_bin_file = tempfile.mktemp('.bin') - + with open(binary_file, "rb") as f: test_data = list(bytearray(f.read())) test_data_length = len(test_data) @@ -93,10 +93,10 @@ def commands_test(board_id): test_count = 0 failed_commands = [] result = CommandsTestResult() - + context = CommandExecutionContext() context.attach_session(session) - + COMMANDS_TO_TEST = [ "status", "reset", @@ -183,10 +183,10 @@ def commands_test(board_id): "set step-into-interrupts 1", "set log info", "set frequency %d" % test_params['test_clock'], - + # Semicolon-separated commands. 'rw 0x%08x ; rw 0x%08x' % (ram_base, ram_base + 4), - + # Python and system commands. '$2+ 2', '!echo hello', @@ -203,10 +203,10 @@ def commands_test(board_id): # "where", # "symbol", ] - + # For now we just verify that the commands run without raising an exception. print("\n------ Testing commands ------") - + def test_command(cmd): try: print("\nTEST: %s" % cmd) diff --git a/test/concurrency_test.py b/test/concurrency_test.py index 32e0b9e99..6c273303b 100644 --- a/test/concurrency_test.py +++ b/test/concurrency_test.py @@ -43,7 +43,7 @@ run_in_parallel, ) -# Test configuration values. +# Test configuration values. TEST_MAX_LENGTH = 1 * 1024 * 1024 TEST_THREAD_COUNT = 8 TEST_SUBCHUNK_COUNT = 2 # Number of reads/writes per thread. @@ -87,18 +87,18 @@ def concurrency_test(board_id): test_pass_count = 0 test_count = 0 result = ConcurrencyTestResult() - + target.reset_and_halt() - + # Prepare TEST_THREAD_COUNT regions of RAM with patterns data_len = min(TEST_MAX_LENGTH, ram_region.length) chunk_len = data_len // TEST_THREAD_COUNT subchunk_len = chunk_len // TEST_SUBCHUNK_COUNT - + chunk_data = [] for i in range(TEST_THREAD_COUNT): chunk_data.append([(i + j) % 256 for j in range(chunk_len)]) - + def write_chunk_data(core, i): start = ram_region.start + chunk_len * i for j in range(TEST_SUBCHUNK_COUNT): @@ -119,22 +119,22 @@ def read_chunk_data(core, i): data = core.read_memory_block8(addr, subchunk_len) chunk_read_data[i].extend(data) print("Finished reading region %i:%i" % (i, j)) - + # Test with a single core/AP. print("\n------ Test 1: Concurrent memory accesses, single core ------") - + core = target.cores[0] # Write chunk patterns concurrently. print("Writing %i regions to RAM" % TEST_THREAD_COUNT) run_in_parallel(write_chunk_data, [[core, i] for i in range(TEST_THREAD_COUNT)]) - + print("Reading %i regions to RAM" % TEST_THREAD_COUNT) chunk_read_data = [list() for i in range(TEST_THREAD_COUNT)] run_in_parallel(read_chunk_data, [[core, i] for i in range(TEST_THREAD_COUNT)]) - + print("Comparing data") - + for i in range(TEST_THREAD_COUNT): test_count += 1 if same(chunk_read_data[i], chunk_data[i]): @@ -142,13 +142,13 @@ def read_chunk_data(core, i): print("Region %i PASSED" % i) else: print("Region %i FAILED" % i) - + # Test with a multiple cores/APs. # Disabled until cores each have their own memory map, the regions accessible to each # core can be identified. if False: # len(target.cores) > 1: print("\n------ Test 2: Concurrent memory accesses, multiple cores ------") - + cycle_count = ((len(target.cores) + TEST_THREAD_COUNT - 1) // TEST_THREAD_COUNT * TEST_THREAD_COUNT) repeat_cores = ncycles(iter(target.cores), cycle_count) thread_args = [] @@ -158,13 +158,13 @@ def read_chunk_data(core, i): # Write chunk patterns concurrently. print("Writing %i regions to RAM" % TEST_THREAD_COUNT) run_in_parallel(write_chunk_data, thread_args) - + print("Reading %i regions to RAM" % TEST_THREAD_COUNT) chunk_read_data = [list() for i in range(TEST_THREAD_COUNT)] run_in_parallel(read_chunk_data, thread_args) - + print("Comparing data") - + for i in range(TEST_THREAD_COUNT): test_count += 1 if same(chunk_read_data[i], chunk_data[i]): diff --git a/test/cortex_test.py b/test/cortex_test.py index d76b23a23..c9c9365ed 100644 --- a/test/cortex_test.py +++ b/test/cortex_test.py @@ -56,7 +56,7 @@ class CortexTestResult(TestResult): "run_halt", "gdb_step", ] - + def __init__(self): super(CortexTestResult, self).__init__(None, None, None) self.name = "cortex" @@ -218,7 +218,7 @@ def reset_methods(fnc): print("Software reset (default=emulated)") target.selected_core.default_reset_type = Target.ResetType.SW_EMULATED fnc(reset_type=None) - + print("(Default) Software reset (SYSRESETREQ)") target.selected_core.default_software_reset_type = Target.ResetType.SW_SYSRESETREQ fnc(reset_type=Target.ResetType.SW) @@ -228,7 +228,7 @@ def reset_methods(fnc): print("(Default) Software reset (emulated)") target.selected_core.default_software_reset_type = Target.ResetType.SW_EMULATED fnc(reset_type=Target.ResetType.SW) - + print("Software reset (option=default)") target.selected_core.default_reset_type = Target.ResetType.SW target.selected_core.default_software_reset_type = Target.ResetType.SW_SYSRESETREQ @@ -251,7 +251,7 @@ def reset_methods(fnc): fnc(reset_type=None) reset_methods(target.reset) - + # Test passes if there are no exceptions test_pass_count += 1 test_count += 1 @@ -312,11 +312,11 @@ def reset_methods(fnc): print("TEST PASSED") else: print("TEST FAILED") - + # Restore regs origRegs[0] = origR0 target.write_core_registers_raw(['r0', 'r1', 'r2', 'r3'], origRegs) - + print("Verify exception is raised while core is running") target.resume() try: @@ -331,7 +331,7 @@ def reset_methods(fnc): print("TEST PASSED") else: print("TEST FAILED") - + print("Verify failure to write core register while running raises exception") try: target.write_core_register('r0', 0x1234) @@ -345,7 +345,7 @@ def reset_methods(fnc): print("TEST PASSED") else: print("TEST FAILED") - + # Resume execution. target.halt() @@ -404,11 +404,11 @@ def reset_methods(fnc): else: print("TEST FAILED (0x%08x==0x%08x, %f==%f, 0x%08x==0x%08x, %f==%f)" \ % (vals[0], _1p1, s0, 1.1, vals[1], _2p2, s1, 2.2)) - + # Restore s0 origRegs[0] = origRawS0 target.write_core_registers_raw(['s0', 's1'], origRegs) - + print("Verify that all listed core registers can be accessed") reg_count = 0 passed_reg_count = 0 diff --git a/test/debug_context_test.py b/test/debug_context_test.py index 2a3f087b4..80bbce99c 100644 --- a/test/debug_context_test.py +++ b/test/debug_context_test.py @@ -88,24 +88,24 @@ def debug_context_test(board_id): with open(binary_file, "rb") as f: test_binary_data = bytearray(f.read()) test_binary_data_length = len(test_binary_data) - + # Generate ELF file from the binary test file. temp_test_elf_name = binary_to_elf_file(binary_file, boot_region.start) test_pass_count = 0 test_count = 0 result = DebugContextTestResult() - + target.reset_and_halt() - + # Reproduce a gdbserver failure. print("\n------ Test 1: Mem cache ------") - + ctx = target.get_target_context() print("Writing gdb test binary") ctx.write_memory_block8(ram_base, gdb_test_binary_data) - + print("Reading first chunk") data = ctx.read_memory_block8(ram_base, 64) if data == gdb_test_binary_data[:64]: @@ -114,7 +114,7 @@ def debug_context_test(board_id): else: print("TEST FAILED") test_count += 1 - + print("Reading N chunks") did_pass = True for n in range(8): @@ -129,20 +129,20 @@ def debug_context_test(board_id): print("TEST PASSED") else: print("TEST FAILED") - + # Force a memory cache clear. target.step() - + # ELF reader test goals: # 1. Verify correct data is read without accessing the target memory. # 2. Test null interval failure. # print("\n------ Test 2: ELF reader ------") - + # Set the elf on the target, which will add a context to read from the elf. target.elf = temp_test_elf_name ctx = target.get_target_context() - + print("Check that ElfReaderContext was created") if isinstance(ctx, ElfReaderContext): test_pass_count += 1 @@ -150,7 +150,7 @@ def debug_context_test(board_id): else: print("TEST FAILED") test_count += 1 - + # Program the test binary. print("Programming test binary to boot memory") FileProgrammer(session).program(binary_file, base_address=boot_region.start) diff --git a/test/flash_loader_test.py b/test/flash_loader_test.py index c1d0356fe..efd5c0d62 100644 --- a/test/flash_loader_test.py +++ b/test/flash_loader_test.py @@ -86,18 +86,18 @@ def flash_loader_test(board_id): # Generate an Intel hex file from the binary test file. temp_test_hex_name = binary_to_hex_file(binary_file, boot_region.start) - + # Generate ELF file from the binary test file. temp_test_elf_name = binary_to_elf_file(binary_file, boot_region.start) test_pass_count = 0 test_count = 0 result = FlashLoaderTestResult() - + with open(binary_file, "rb") as f: data = list(bytearray(f.read())) data_length = len(data) - + print("\n------ Test Basic Load ------") loader = FlashLoader(session, chip_erase="sector") loader.add_data(boot_start_addr, data) @@ -109,7 +109,7 @@ def flash_loader_test(board_id): else: print("TEST FAILED") test_count += 1 - + print("\n------ Test Load Sector Erase ------") test_data = [0x55] * boot_blocksize addr = (boot_end_addr + 1) - (boot_blocksize * num_test_sectors) @@ -117,12 +117,12 @@ def flash_loader_test(board_id): orig_data_length = addr - boot_start_addr else: orig_data_length = data_length - + loader = FlashLoader(session, chip_erase="sector") loader.add_data(addr, test_data) loader.add_data(addr + boot_blocksize, test_data) loader.commit() - + verify_data = target.read_memory_block8(addr, boot_blocksize * num_test_sectors) verify_data2 = target.read_memory_block8(boot_start_addr, orig_data_length) if same(verify_data, test_data * num_test_sectors) and same(verify_data2, data[:orig_data_length]): @@ -131,7 +131,7 @@ def flash_loader_test(board_id): else: print("TEST FAILED") test_count += 1 - + print("\n------ Test Basic Sector Erase ------") addr = (boot_end_addr + 1) - (boot_blocksize * num_test_sectors) eraser = FlashEraser(session, FlashEraser.Mode.SECTOR) @@ -143,7 +143,7 @@ def flash_loader_test(board_id): else: print("TEST FAILED") test_count += 1 - + print("\n------ Test Load Chip Erase ------") loader = FlashLoader(session, chip_erase="chip") loader.add_data(boot_start_addr, data) @@ -155,7 +155,7 @@ def flash_loader_test(board_id): else: print("TEST FAILED") test_count += 1 - + print("\n------ Test Binary File Load ------") programmer = FileProgrammer(session) programmer.program(binary_file, file_format='bin', base_address=boot_start_addr) @@ -166,7 +166,7 @@ def flash_loader_test(board_id): else: print("TEST FAILED") test_count += 1 - + print("\n------ Test Intel Hex File Load ------") programmer = FileProgrammer(session) programmer.program(temp_test_hex_name, file_format='hex') @@ -177,7 +177,7 @@ def flash_loader_test(board_id): else: print("TEST FAILED") test_count += 1 - + print("\n------ Test ELF File Load ------") programmer = FileProgrammer(session) programmer.program(temp_test_elf_name, file_format='elf') diff --git a/test/flash_test.py b/test/flash_test.py index 17f9393a4..d49de584b 100644 --- a/test/flash_test.py +++ b/test/flash_test.py @@ -142,7 +142,7 @@ def flash_test(board_id): test_pass_count = 0 test_count = 0 result = FlashTestResult() - + # Test each flash region separately. for rom_region in memory_map.iter_matching_regions(type=MemoryType.FLASH, is_testable=True): rom_start = rom_region.start @@ -150,7 +150,7 @@ def flash_test(board_id): flash = rom_region.flash flash_info = flash.get_flash_info() - + # This can be any value, as long as it's not the erased byte value. We take the # inverse of the erased value so that for most flash, the unerased value is 0x00. unerasedValue = invert32(flash.region.erased_byte_value) & 0xff @@ -162,7 +162,7 @@ def flash_test(board_id): data = f.read() data = struct.unpack("%iB" % len(data), data) unused = rom_size - len(data) - + # Make sure data doesn't overflow this region. if unused < 0: data = data[:rom_size] @@ -173,7 +173,7 @@ def flash_test(board_id): # Turn on extra checks for the next 4 tests flash.set_flash_algo_debug(True) - + print("\n------ Test Erased Value Check ------") d = [flash.region.erased_byte_value] * 128 if flash.region.is_data_erased(d): diff --git a/test/gdb_test.py b/test/gdb_test.py index e00c5d4be..154e1c7a1 100644 --- a/test/gdb_test.py +++ b/test/gdb_test.py @@ -114,7 +114,7 @@ def test_gdb(board_id=None, n=0): target_test_params = get_target_test_params(session) test_port = 3333 + n telnet_port = 4444 + n - + # Hardware breakpoints are not supported above 0x20000000 on # Cortex-M devices with FPB revision 1. fpb = session.target.selected_core.fpb diff --git a/test/gdb_test_script.py b/test/gdb_test_script.py index 77084d3f4..8f081c88e 100644 --- a/test/gdb_test_script.py +++ b/test/gdb_test_script.py @@ -220,7 +220,7 @@ def run_test(): # Connect to server gdb_execute("target remote localhost:%d" % test_port) - + # Show memory regions, useful for debug and verification. gdb_execute("info mem") diff --git a/test/import_all.py b/test/import_all.py index b32f4b12e..07ea56483 100644 --- a/test/import_all.py +++ b/test/import_all.py @@ -29,15 +29,15 @@ def process_dir(dotted_path: str, dir_path: Path) -> None: for entry in sorted(dir_path.iterdir(), key=lambda v: v.name): is_subpackage = (entry.is_dir() and (entry / "__init__.py").exists()) is_module = entry.suffix == ".py" - + if not (is_subpackage or is_module): continue - + module_path = dotted_path + '.' + entry.stem print(f"Importing: {module_path}") import_module(module_path) import_count += 1 - + # Recursive into valid sub-packages. if is_subpackage: process_dir(module_path, entry) diff --git a/test/probeserver_test.py b/test/probeserver_test.py index 78d58ea99..0efebf19d 100644 --- a/test/probeserver_test.py +++ b/test/probeserver_test.py @@ -100,7 +100,7 @@ def test_probeserver(board_id=None, n=0): board_id = board.unique_id target_type = board.target_type - # Run the test. We can't kill the server thread, so + # Run the test. We can't kill the server thread, so LOG.info('Starting server on port %d', test_port) server_args = ['pyocd', 'server', '-v', @@ -108,7 +108,7 @@ def test_probeserver(board_id=None, n=0): "--uid=%s" % board_id, ] server_program = Popen(server_args, stdout=PIPE, stderr=STDOUT) - + try: # Read server output waiting for it to report that the server is running. with Timeout(TEST_TIMEOUT_SECONDS) as time_out: @@ -121,7 +121,7 @@ def test_probeserver(board_id=None, n=0): raise TestError("no more output from server") else: raise TestError("server failed to start") - + server_thread = threading.Thread(target=wait_with_deadline, args=[server_program, TEST_TIMEOUT_SECONDS]) server_thread.daemon = True server_thread.start() diff --git a/test/speed_test.py b/test/speed_test.py index 3947e9115..cf227597c 100644 --- a/test/speed_test.py +++ b/test/speed_test.py @@ -108,7 +108,7 @@ def speed_test(board_id): test_params = get_target_test_params(session) session.probe.set_clock(test_params['test_clock']) - + test_config = "uncached 8-bit" def test_ram(record_speed=False, width=8): @@ -185,44 +185,44 @@ def test_rom(record_speed=False, width=8): print("Reading %i byte took %.3f seconds: %.3f B/s" % (test_size, diff, read_speed)) print("TEST PASSED") return True - + # 8-bit without memcache passed = test_ram(True, 8) test_count += 1 test_pass_count += int(passed) - + passed = test_rom(True, 8) test_count += 1 test_pass_count += int(passed) - + # 32-bit without memcache test_config = "uncached 32-bit" passed = test_ram(False, 32) test_count += 1 test_pass_count += int(passed) - + passed = test_rom(False, 32) test_count += 1 test_pass_count += int(passed) - + # With memcache target = target.get_target_context() test_config = "cached 8-bit, pass 1" - + passed = test_ram() test_count += 1 test_pass_count += int(passed) - + passed = test_rom() test_count += 1 test_pass_count += int(passed) - + # Again with memcache test_config = "cached 8-bit, pass 2" passed = test_ram() test_count += 1 test_pass_count += int(passed) - + passed = test_rom() test_count += 1 test_pass_count += int(passed) diff --git a/test/test_util.py b/test/test_util.py index b856a2e96..9494e861a 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -155,7 +155,7 @@ def wait_with_deadline(process, timeout): class IOTee(object): def __init__(self, *args): self.outputs = list(args) - + def add(self, output): self.outputs.append(output) @@ -171,7 +171,7 @@ class RecordingLogHandler(logging.Handler): def __init__(self, iostream, level=logging.NOTSET): super(RecordingLogHandler, self).__init__(level) self.stream = iostream - + def emit(self, record): try: message = self.format(record) @@ -189,11 +189,11 @@ def __init__(self, test_board, test, result): self.name = "test" self.time = 0 self.output = "" - + @property def board(self): return self._board - + @board.setter def board(self, newBoard): self._board = newBoard.target_type if newBoard else 'unknown' @@ -220,7 +220,7 @@ def get_test_case(self): system_out = ElementTree.SubElement(case, 'system-out') system_out.text = self.filter_output(self.output) return case - + def filter_output(self, output): """! @brief Hex-encode null byte and control characters.""" result = six.text_type() @@ -285,4 +285,4 @@ def all_tests_pass(result_list): if len(result_list) <= 0: passed = False return passed - + diff --git a/test/unit/mockcore.py b/test/unit/mockcore.py index 0aa227d6c..cde0fa994 100644 --- a/test/unit/mockcore.py +++ b/test/unit/mockcore.py @@ -59,7 +59,7 @@ def __init__(self, has_fpu=True): if has_fpu: self.core_registers.add_group(CoreRegisterGroups.VFP_V5) self.clear_all_regs() - + def clear_all_regs(self): self.regs = {i:0 for i in self.core_registers.by_index.keys()} # r0-15, xpsr, msp, psp self.regs[CFBP_INDEX] = 0 diff --git a/test/unit/test_cmdline.py b/test/unit/test_cmdline.py index 9117697e9..9b66bedd4 100644 --- a/test/unit/test_cmdline.py +++ b/test/unit/test_cmdline.py @@ -45,7 +45,7 @@ def test_split_whitespace(self): assert split_command_line('a\rb') == ['a', 'b'] assert split_command_line('a\nb') == ['a', 'b'] assert split_command_line('a \tb') == ['a', 'b'] - + @pytest.mark.parametrize(("input", "result"), [ (r'\h\e\l\l\o', ['hello']), (r'"\"hello\""', ['"hello"']), @@ -79,14 +79,14 @@ def test_vc_str(self, vc, msk): [(six.b(x), y) for x,y in VECTOR_CATCH_CHAR_MAP.items()]) def test_vc_b(self, vc, msk): assert convert_vector_catch(vc) == msk - + class TestConvertSessionOptions(object): def test_empty(self): assert convert_session_options([]) == {} - + def test_unknown_option(self): assert convert_session_options(['dumkopf']) == {} - + def test_bool(self): assert convert_session_options(['auto_unlock']) == {'auto_unlock': True} assert convert_session_options(['no-auto_unlock']) == {'auto_unlock': False} @@ -97,12 +97,12 @@ def test_bool(self): assert convert_session_options(['auto_unlock=0']) == {'auto_unlock': False} assert convert_session_options(['auto_unlock=false']) == {'auto_unlock': False} assert convert_session_options(['auto_unlock=anything-goes-here']) == {'auto_unlock': False} - + def test_noncasesense(self): # Test separate paths for with and without a value. assert convert_session_options(['AUTO_Unlock']) == {'auto_unlock': True} assert convert_session_options(['AUTO_Unlock=0']) == {'auto_unlock': False} - + def test_int(self): # Non-bool with no value is ignored (and logged). assert convert_session_options(['frequency']) == {} @@ -114,7 +114,7 @@ def test_int(self): assert convert_session_options(['frequency=1000']) == {'frequency': 1000} # Valid hex int assert convert_session_options(['frequency=0x40']) == {'frequency': 64} - + def test_str(self): # Ignore with no value assert convert_session_options(['test_binary']) == {} @@ -122,5 +122,5 @@ def test_str(self): assert convert_session_options(['no-test_binary']) == {} # Valid assert convert_session_options(['test_binary=abc']) == {'test_binary': 'abc'} - + diff --git a/test/unit/test_compatibility.py b/test/unit/test_compatibility.py index 2aab071e1..9622aa9a4 100644 --- a/test/unit/test_compatibility.py +++ b/test/unit/test_compatibility.py @@ -30,11 +30,11 @@ def test_iter_single_bytes_bytes(self): assert next(i) == b'2' assert next(i) == b'3' assert next(i) == b'4' - + def test_to_bytes_safe(self): assert to_bytes_safe(b"hello") == b"hello" assert to_bytes_safe("string") == b"string" - + def test_to_str_safe(self): assert to_str_safe(b"bytes") == "bytes" assert to_str_safe("string") == "string" diff --git a/test/unit/test_conversion.py b/test/unit/test_conversion.py index 83db32b73..c683b5fd9 100644 --- a/test/unit/test_conversion.py +++ b/test/unit/test_conversion.py @@ -114,7 +114,7 @@ def test_nbit_le_list_to_byte_list_lg(self, w, i, r): def test_byte_list_to_u32le_list_empty(self): assert byte_list_to_u32le_list([]) == [] - + def test_byteListToU32leList(self): data = range(32) assert byte_list_to_u32le_list(data) == [ @@ -127,7 +127,7 @@ def test_byteListToU32leList(self): 0x1B1A1918, 0x1F1E1D1C, ] - + def test_byte_list_to_u32le_list_unaligned(self): assert byte_list_to_u32le_list([1]) == [0x00000001] assert byte_list_to_u32le_list([1, 2, 3]) == [0x00030201] @@ -193,10 +193,10 @@ def test_hex8ToU32le(self): def test_hex16ToU64le(self): assert hex16_to_u64le("0102ABCD171819EF") == 0x0102ABCD171819EF - + def test_uint_to_hex_le_odd_width(self): assert uint_to_hex_le(0xd0102ABCD, 36) == "cdab02010d" - + def test_hex_le_to_uint_odd_width(self): assert hex_le_to_uint("0102ABCD0d", 36) == 0x0dCDAB0201 @@ -248,7 +248,7 @@ class TestGdbEscape(object): [six.int2byte(x) for x in range(256) if (x not in ESCAPEES)]) def test_escape_passthrough(self, data): assert escape(data) == data - + @pytest.mark.parametrize(("data", "expected"), [ (b'#', b'}\x03'), (b'$', b'}\x04'), @@ -257,16 +257,16 @@ def test_escape_passthrough(self, data): ]) def test_escape_1(self, data, expected): assert escape(data) == expected - + def test_escape_2(self): assert escape(b'1234#09*xyz') == b'1234}\x0309}\x0axyz' - + # Verify all chars that shouldn't be escaped pass through unmodified. @pytest.mark.parametrize("data", [six.int2byte(x) for x in range(256) if (x not in ESCAPEES)]) def test_unescape_passthrough(self, data): assert unescape(data) == [six.byte2int(data)] - + @pytest.mark.parametrize(("expected", "data"), [ (0x23, b'}\x03'), (0x24, b'}\x04'), @@ -275,7 +275,7 @@ def test_unescape_passthrough(self, data): ]) def test_unescape_1(self, data, expected): assert unescape(data) == [expected] - + def test_unescape_2(self): assert unescape(b'1234}\x0309}\x0axyz') == \ [0x31, 0x32, 0x33, 0x34, 0x23, 0x30, 0x39, 0x2a, 0x78, 0x79, 0x7a] @@ -283,9 +283,9 @@ def test_unescape_2(self): class TestPairwise(object): def test_empty(self): assert list(pairwise([])) == [] - + def test_str(self): assert list(pairwise('abcdef')) == [('a','b'), ('c','d'), ('e','f')] - + def test_int(self): assert list(pairwise([1, 2, 3, 4, 5, 6])) == [(1, 2), (3, 4), (5, 6)] diff --git a/test/unit/test_exceptions.py b/test/unit/test_exceptions.py index cb9c38e42..14e3ef05a 100644 --- a/test/unit/test_exceptions.py +++ b/test/unit/test_exceptions.py @@ -23,12 +23,12 @@ class TestFaultError: def test_no_args(self): e = TransferFaultError() assert str(e) == 'Memory transfer fault' - + def test_no_args_set_addr(self): e = TransferFaultError() e.fault_address = 0x1000 assert str(e) == 'Memory transfer fault @ 0x00001000' - + def test_no_args_set_addr_len(self): e = TransferFaultError() e.fault_address = 0x1000 @@ -42,12 +42,12 @@ def test_msg(self): def test_arg_tuple(self): e = TransferFaultError(-1, 1234) assert str(e) == 'Memory transfer fault (-1, 1234)' - + def test_msg_ctor_addr(self): e = TransferFaultError("my bad", fault_address=0x20008400) assert e.fault_address == 0x20008400 assert str(e) == 'Memory transfer fault (my bad) @ 0x20008400' - + def test_msg_ctor_addr_len(self): e = TransferFaultError("my bad", fault_address=0x20008400, length=32) assert e.fault_address == 0x20008400 diff --git a/test/unit/test_graph.py b/test/unit/test_graph.py index 619c19ec7..bd9f06c24 100644 --- a/test/unit/test_graph.py +++ b/test/unit/test_graph.py @@ -22,7 +22,7 @@ class BaseNode(GraphNode): def __init__(self, value): super(BaseNode, self).__init__() self.value = value - + def __repr__(self): return "<{}@{:#010x} {}".format(self.__class__.__name__, id(self), self.value) @@ -83,7 +83,7 @@ def test_multiple_child(self): assert a.children == [] assert b.children == [] assert c.children == [] - + def test_multilevel(self, graph, a, b, c): assert len(graph.children) == 2 assert graph.children == [a, c] @@ -95,17 +95,17 @@ def test_multilevel(self, graph, a, b, c): assert c.parent is graph assert b.children == [] assert c.children == [] - + def test_find_breadth(self, graph, a, b, c): assert graph.find_children(lambda n: n.value == 1) == [b] assert graph.find_children(lambda n: n.value == 1 or n.value == 2) == [c, b] - + def test_find_depth(self, graph, a, b, c): assert graph.find_children(lambda n: n.value == 1, breadth_first=False) == [b] assert graph.find_children(lambda n: n.value == 1 or n.value == 2, breadth_first=False) == [b, c] - + def test_first(self, graph, a, b, c): assert graph.get_first_child_of_type(NodeA) == a assert graph.get_first_child_of_type(NodeB) == c assert a.get_first_child_of_type(NodeB) == b - + diff --git a/test/unit/test_memcache.py b/test/unit/test_memcache.py index 4d0abaf99..698c1efa4 100644 --- a/test/unit/test_memcache.py +++ b/test/unit/test_memcache.py @@ -135,7 +135,7 @@ def test_20_empty_read(self, memcache): def test_21_empty_write(self, memcache): memcache.write_memory_block8(128, []) - + # This test reproduces a bug where writes followed by reads will start # accumulating and returning extra data. def test_22_multi_write_read_size(self, memcache): @@ -173,14 +173,14 @@ def test_25_multi_write_subrange_1_read_size(self, memcache): memcache.write_memory_block8(64, data[64:96]) block = memcache.read_memory_block8(0, test_size) assert data == block - + def test_26_read_subrange(self, memcache): data = list((n % 256) for n in range(320)) memcache.write_memory_block8(0x20000000, data) block = memcache.read_memory_block8(0x2000007e, 4) assert block == data[0x7e:0x82] - + # TODO test read32/16/8 with and without callbacks diff --git a/test/unit/test_memory_map.py b/test/unit/test_memory_map.py index bef35d307..503c4b00b 100644 --- a/test/unit/test_memory_map.py +++ b/test/unit/test_memory_map.py @@ -94,27 +94,27 @@ def test_empty_range_1(self): assert range.start == 0x1000 assert range.end == 0xfff assert range.length == 0 - + def test_empty_range_2(self): range = MemoryRange(start=0x1000, end=0xfff) assert range.start == 0x1000 assert range.end == 0xfff assert range.length == 0 - + def test_eq(self): assert MemoryRange(0, length=1000) == MemoryRange(0, length=1000) - + def test_lt(self): assert MemoryRange(0, length=1000) < MemoryRange(1000, length=1000) - + def test_gt(self): assert MemoryRange(1000, length=1000) > MemoryRange(0, length=1000) - + def test_sort(self, ram1, ram2, flash, rom): regionList = [ram2, rom, flash, ram1] sortedRegionList = sorted(regionList) assert sortedRegionList == [flash, rom, ram1, ram2] - + def test_inplace_sort(self, ram1, ram2, flash, rom): regionList = [ram2, rom, flash, ram1] regionList.sort() @@ -135,7 +135,7 @@ def test_range_w_region_neq(self, ram1, rom): a = MemoryRange(0, 0x1000, region=ram1) b = MemoryRange(0x5000, length=200, region=rom) assert hash(a) != hash(b) - + a = MemoryRange(0, 0x1000, region=ram1) b = MemoryRange(0, 0x1000, region=rom) assert hash(a) != hash(b) @@ -150,11 +150,11 @@ class TestMemoryRegion: def test_empty_region_1(self): with pytest.raises(AssertionError): rgn = MemoryRegion(start=0x1000, length=0) - + def test_empty_region_2(self): with pytest.raises(AssertionError): rgn = MemoryRegion(start=0x1000, end=0xfff) - + def test_default_name(self): rgn = RamRegion(start=0x1000, end=0x1fff) assert rgn.name == 'ram' @@ -167,7 +167,7 @@ def test_default_name(self): rgn = DeviceRegion(start=0x1000, end=0x1fff) assert rgn.name == 'device' - + def test_block_sector_size(self): rgn = FlashRegion(start=0x1000, end=0x1fff, blocksize=256) assert rgn.blocksize == 256 @@ -176,7 +176,7 @@ def test_block_sector_size(self): rgn = FlashRegion(start=0x1000, end=0x1fff, sector_size=256) assert rgn.blocksize == 256 assert rgn.sector_size == 256 - + def test_flash_attrs(self, flash): assert flash.type == MemoryType.FLASH assert flash.start == 0 @@ -299,7 +299,7 @@ def test_intersects(self, ram1): assert ram1.intersects_range(0x20000020, length=0x10) assert ram1.intersects_range(0x1fff0000, end=0x20001000) assert ram1.intersects_range(0x1ffff000, length=0x40000) - + def test_copy_ram(self, ram1): ramcpy = copy.copy(ram1) assert ramcpy.type == ram1.type @@ -308,20 +308,20 @@ def test_copy_ram(self, ram1): assert ramcpy.length == ram1.length assert ramcpy.name == ram1.name assert ramcpy == ram1 - + def test_copy_flash(self, flash): flashcpy = copy.copy(flash) assert flashcpy == flash - + def test_copy_flash_with_flm(self, flash_with_flm): flashcpy = copy.copy(flash_with_flm) assert flashcpy == flash_with_flm - + def test_copy_flash_with_assigned_flm(self, flash): flash.flm = FLM_PATH flashcpy = copy.copy(flash) assert flashcpy == flash - + def test_clone_with_changes(self, flash, ram1): flashcpy = flash.clone_with_changes(name='another flash') assert flashcpy.name == 'another flash' @@ -333,10 +333,10 @@ def test_clone_with_changes(self, flash, ram1): assert acopy.start == 0x30000000 assert acopy.length == ram1.length assert acopy.end == 0x30000000 + ram1.length - 1 - + def test_eq(self, flash, ram1): assert flash != ram1 - + a = RamRegion(name='a', start=0x1000, length=0x2000) b = RamRegion(name='a', start=0x1000, length=0x2000) assert a == b @@ -420,23 +420,23 @@ def test_x(self): ) rgns = dualMap.get_intersecting_regions(0x1fffc9f8, end=0x1fffc9fc) assert len(rgns) > 0 - + def test_get_type_iter(self, memmap, flash, rom, ram1, ram2): assert list(memmap.iter_matching_regions(type=MemoryType.FLASH)) == [flash] assert list(memmap.iter_matching_regions(type=MemoryType.ROM)) == [rom] assert list(memmap.iter_matching_regions(type=MemoryType.RAM)) == [ram1, ram2] - + def test_match_iter(self, memmap, flash, ram1, ram2, ram_alias): assert list(memmap.iter_matching_regions(blocksize=0x100)) == [flash] assert list(memmap.iter_matching_regions(start=0x20000000)) == [ram1] - + def test_first_match(self, memmap, flash, ram2): assert memmap.get_first_matching_region(blocksize=0x100) == flash assert memmap.get_first_matching_region(length=1024, is_cacheable=False) == ram2 - + def test_alias(self, memmap2, ram2, ram_alias): assert ram_alias.alias is ram2 - + def test_get_default(self, memmap, flash, ram2): assert memmap.get_default_region_of_type(MemoryType.FLASH) == flash assert memmap.get_default_region_of_type(MemoryType.RAM) == ram2 @@ -448,14 +448,14 @@ def test_index_by_num(self, memmap, flash, ram2): def test_index_by_name(self, memmap, rom, ram2): assert memmap['rom'] == rom assert memmap['ram2'] == ram2 - + def test_clone(self, memmap): mapcpy = memmap.clone() assert id(mapcpy) != id(memmap) assert id(mapcpy.get_first_matching_region(type=MemoryType.RAM)) != \ id(memmap.get_first_matching_region(type=MemoryType.RAM)) assert mapcpy == memmap - + def test_contains_int(self, memmap): assert 0x200 in memmap assert 0x20000100 in memmap @@ -463,32 +463,32 @@ def test_contains_int(self, memmap): assert 0x1c000040 in memmap assert 0x80000000 not in memmap assert 0xffffffff not in memmap - + def test_contains_name(self, memmap): assert 'rom' in memmap assert 'flash' in memmap assert 'funky' not in memmap assert '' not in memmap - + def test_contains_region(self, memmap, flash): assert flash in memmap - + def test_len(self, memmap): assert len(memmap) == 4 - + def test_iter(self, memmap): assert [r.name for r in memmap] == ['flash', 'rom', 'ram', 'ram2'] - + def test_reversed(self, memmap): assert [r.name for r in reversed(memmap)] == ['ram2', 'ram', 'rom', 'flash'] - + def test_abc(self): assert isinstance(MemoryMap(), collections.abc.Sequence) - + def test_name_uniquing(self, memmap): memmap.add_region(RomRegion(0x01000000, length=0x8000, name="rom")) assert memmap.get_first_matching_region(name="rom_1").start == 0x01000000 - + def test_multiple_unnamed_regions(self): map = MemoryMap(RomRegion(0x01000000, length=0x8000), RamRegion(0x20000000, length=0x8000)) diff --git a/test/unit/test_mockcore.py b/test/unit/test_mockcore.py index 498f938b4..9c2549da0 100644 --- a/test/unit/test_mockcore.py +++ b/test/unit/test_mockcore.py @@ -65,7 +65,7 @@ def test_rw_r0_r15(self, mockcore): mockcore.write_core_registers_raw([r], [1+r]) for r in range(0, 16): assert mockcore.read_core_registers_raw([r]) == [1+r] - + def test_rw_cfbp(self, mockcore): mockcore.write_core_registers_raw([index_for_reg('cfbp')], [0x01020304]) assert mockcore.read_core_registers_raw([ diff --git a/test/unit/test_notification.py b/test/unit/test_notification.py index 4d60d35b6..b6d8877c4 100644 --- a/test/unit/test_notification.py +++ b/test/unit/test_notification.py @@ -28,7 +28,7 @@ class Subscriber(object): def __init__(self): self.was_called = False self.last_note = None - + def cb(self, note): self.was_called = True self.last_note = note @@ -111,4 +111,4 @@ def test_unsub_src(self, notifier, subscriber): notifier.notify(EVENT_A, self) assert not subscriber.was_called - + diff --git a/test/unit/test_options_manager.py b/test/unit/test_options_manager.py index beef8d8a4..518d2a96d 100644 --- a/test/unit/test_options_manager.py +++ b/test/unit/test_options_manager.py @@ -73,14 +73,14 @@ def test_convert_double_underscore(self, mgr): mgr.add_back({'debug__traceback': False}) assert 'debug.traceback' in mgr assert mgr.get('debug.traceback') == False - + def test_set(self, mgr, layer1): mgr.add_front(layer1) mgr.set('buzz', 1234) assert mgr['buzz'] == 1234 mgr.add_front({'buzz': 4321}) assert mgr.get('buzz') == 4321 - + def test_update(self, mgr, layer1, layer2): mgr.add_front(layer1) mgr.add_front(layer2) @@ -121,4 +121,4 @@ def cb(note): mgr.add_back(layer2) assert flag[0] == False - + diff --git a/test/unit/test_pack.py b/test/unit/test_pack.py index 2ac55468a..1608331b5 100644 --- a/test/unit/test_pack.py +++ b/test/unit/test_pack.py @@ -109,14 +109,14 @@ class Disabled_TestPack(object): def test_get_installed(self, pack_ref): p = pack_target.ManagedPacks.get_installed_packs() assert p == [pack_ref] - + def test_get_targets(self, k64dev): assert k64dev.part_number == K64F - + def test_pop_managed_k64(self): pack_target.ManagedPacks.populate_target(K64F) assert K64F.lower() in TARGET - + def test_k64_mem_map(self, k64dev): map = k64dev.memory_map raml = map.get_region_for_address(0x1fff0000) @@ -126,14 +126,14 @@ def test_k64_mem_map(self, k64dev): assert ramu.start == 0x20000000 and ramu.length == 0x30000 assert flash.start == 0 and flash.length == 0x100000 assert flash.sector_size == 0x1000 - + class TestPack(object): def test_devices(self, k64pack): devs = k64pack.devices pns = [x.part_number for x in devs] assert "MK64FN1M0xxx12" in pns assert "MK64FX512xxx12" in pns - + # Make sure CmsisPack can open a zip file too. def test_zipfile(self): z = zipfile.ZipFile(K64F_PACK_PATH, 'r') @@ -145,19 +145,19 @@ def test_parse_device_info(self, k64f1m0): assert k64f1m0.vendor == "NXP" assert k64f1m0.families == ["MK64F12"] assert k64f1m0.default_reset_type == target.Target.ResetType.SW - + def test_get_svd(self, k64f1m0): svd = k64f1m0.svd x = ElementTree.parse(svd) assert x.getroot().tag == 'device' - + def test_mem_map(self, k64f1m0): map = k64f1m0.memory_map bm = map.get_boot_memory() assert bm.start == 0 and bm.length == 1 * 1024 * 1024 ram = map.get_default_region_of_type(memory_map.MemoryType.RAM) assert ram.start == 0x20000000 and ram.length == 0x30000 - + # Verify the flash region was converted correctly. def test_flash(self, k64f1m0): map = k64f1m0.memory_map @@ -165,7 +165,7 @@ def test_flash(self, k64f1m0): assert isinstance(flash, memory_map.FlashRegion) assert flash.start == 0 and flash.length == 1 * 1024 * 1024 assert flash.sector_size == 4096 - + class TestFLM(object): def test_algo(self, k64algo): i = k64algo.flash_info @@ -175,7 +175,7 @@ def test_algo(self, k64algo): assert i.size == 1 * 1024 * 1024 assert i.page_size == 512 assert i.sector_info_list == [(0, 4 * 1024)] - + def test_algo_dict(self, k64algo, k64f1m0): map = k64f1m0.memory_map ram = map.get_default_region_of_type(memory_map.MemoryType.RAM) @@ -190,7 +190,7 @@ def test_algo_dict(self, k64algo, k64f1m0): assert d['pc_eraseAll'] == ram.start + STACK_SIZE + 0x95 assert d['pc_erase_sector'] == ram.start + STACK_SIZE + 0xcb assert d['pc_program_page'] == ram.start + STACK_SIZE + 0xdf - + def has_overlapping_regions(memmap): return any((len(memmap.get_intersecting_regions(r.start, r.end)) > 1) for r in memmap.regions) @@ -198,7 +198,7 @@ class TestNRF(): def test_regions(self, nrf5340): memmap = nrf5340.memory_map assert not has_overlapping_regions(memmap) - + class TestSTM32L4(): def test_regions(self, stm32l4r5): memmap = stm32l4r5.memory_map diff --git a/test/unit/test_regcache.py b/test/unit/test_regcache.py index 0fc152385..7ffa1be79 100644 --- a/test/unit/test_regcache.py +++ b/test/unit/test_regcache.py @@ -82,7 +82,7 @@ def set_core_regs(self, mockcore, modify=False): modifier = 0 mockcore.write_core_registers_raw([r], [get_expected_reg_value(r) + modifier]) assert mockcore.read_core_registers_raw([r]) == [get_expected_reg_value(r) + modifier] - + def test_r_1(self, mockcore, regcache): assert regcache.read_core_registers_raw(['r0']) == [0] # cache initial value of 0 mockcore.write_core_registers_raw(['r0'], [1234]) # modify reg behind the cache's back @@ -91,7 +91,7 @@ def test_r_1(self, mockcore, regcache): regcache.invalidate() # explicitly invalidate cache assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg assert regcache.read_core_registers_raw(['r0']) == [1234] # now should return updated 1234 value - + def test_run_token(self, mockcore, regcache): assert regcache.read_core_registers_raw(['r0']) == [0] # cache initial value of 0 mockcore.write_core_registers_raw(['r0'], [1234]) # modify reg behind the cache's back @@ -136,7 +136,7 @@ def test_read_cached_cfbp(self, mockcore, regcache): mockcore.write_core_registers_raw(['control', 'primask'], [0x55, 0xaa]) # cache should return original value assert regcache.read_core_registers_raw(['cfbp']) == [get_expected_cfbp()] - + def test_read_cached_xpsr(self, mockcore, regcache): self.set_core_regs(mockcore) # cache it @@ -153,14 +153,14 @@ def test_write_1(self, mockcore, regcache): regcache.write_core_registers_raw(['r0'], [1234]) assert mockcore.read_core_registers_raw(['r0']) == [1234] assert regcache.read_core_registers_raw(['r0']) == [1234] - + def test_write_regs(self, mockcore, regcache): self.set_core_regs(mockcore) for r in core_regs_composite_regs(mockcore): regcache.write_core_registers_raw([r], [get_expected_reg_value(r) + get_modifier(r)]) for r in core_regs_composite_regs(mockcore): assert mockcore.read_core_registers_raw([r]) == [get_expected_reg_value(r) + get_modifier(r)] - + def test_write_cfbp(self, mockcore, regcache): self.set_core_regs(mockcore) assert mockcore.read_core_registers_raw(['cfbp']) == [get_expected_cfbp()] @@ -170,7 +170,7 @@ def test_write_cfbp(self, mockcore, regcache): ((3 << 24) | (get_expected_reg_value('faultmask') << 16) | (get_expected_reg_value('basepri') << 8) | 19) ] - + def test_write_xpsr(self, mockcore, regcache): self.set_core_regs(mockcore) assert mockcore.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()] @@ -196,16 +196,16 @@ def test_invalid_reg_r(self, regcache): def test_invalid_reg_w(self, regcache): with pytest.raises(KeyError): regcache.write_core_registers_raw([132423], [1234]) - + def test_invalid_fpu_reg_r(self, regcache_no_fpu): with pytest.raises(KeyError): regcache_no_fpu.read_core_registers_raw(['s1']) - + def test_invalid_fpu_reg_w(self, regcache_no_fpu): with pytest.raises(KeyError): regcache_no_fpu.write_core_registers_raw(['s1'], [1.234]) - + diff --git a/test/unit/test_rom_table.py b/test/unit/test_rom_table.py index 5f2c6654d..4902f86c1 100644 --- a/test/unit/test_rom_table.py +++ b/test/unit/test_rom_table.py @@ -32,7 +32,7 @@ class MockCoreForMemCache(CoreSightCoreComponent): """! @brief Just enough of a core to satisfy MemoryCache. - + Most importantly, it defines a memory map with a single RAM region covering almost the full 4 GB address space. """ @@ -49,18 +49,18 @@ def is_running(self): class RomMemory(MemoryCache, MemoryInterface): """! @brief Memory interface for reading constant values. - + Uses the memory cache as readily-available component to store data at fixed addresses. We just have to make sure the cache is never invalidated. """ def __init__(self, ranges): """! @brief Constructor. - + @param self @param ranges Dict of start address -> list of word values. """ super(RomMemory, self).__init__(MockDebugContext(), MockCoreForMemCache()) - + # Fill in cache with data from ranges. for addr, data in ranges.items(): self.write_memory_block32(addr, data) @@ -73,25 +73,25 @@ class MockCoreSight(RomMemory): """! @brief RomMemory based on a list of MockCoreSightComponent objects.""" def __init__(self, components): """! @brief Constructor. - + @param self @param components List of component dicts, where each component dict consists of start address -> list of word values. """ ranges = {base: data for c in components for base, data in c.data.items()} super(MockCoreSight, self).__init__(ranges) - + @property def short_description(self): return "MockCoreSight" class MockCoreSightComponent(object): """! @brief Generates a data dict from CoreSight component ID register values.""" - + # Start offset within the 4 kB CoreSight component memory window of the ID registers # we care about, particularly those read by CoreSightComponentID. CMPID_REGS_OFFSET = 0xfbc - + def __init__(self, base, cidr, pidr, **kwargs): """! @brief Constructor. @param self @@ -111,7 +111,7 @@ def __init__(self, base, cidr, pidr, **kwargs): self._devid = kwargs.get('devid', [0, 0, 0]) self._devtype = kwargs.get('devtype', 0) self._extra = kwargs.get('extra', {}) - + @property def data(self): d = self._extra.copy() @@ -140,7 +140,7 @@ def data(self): class MockM4Components: """! @ brief Namespace for mock Cortex-M4 Class 0x1 ROM table and core complex components.""" - + # ROM table #0 @ 0xe00ff000 (designer=244 part=00d) M4_ROM_TABLE_BASE = 0xe00ff000 M4_ROM_TABLE = MockCoreSightComponent(M4_ROM_TABLE_BASE, cidr=0xb105100d, pidr=0x4000bb4c4, @@ -179,10 +179,10 @@ class MockM4Components: # [5] ETM_BASE = 0xe0041000 ETM = MockCoreSightComponent(ETM_BASE, cidr=0xb105900d, pidr=0x4000bb925, devtype=0x13) - + class MockCSSOC600Components: """! @ brief Namespace for mock Class 0x9 ROM table and CoreSight SoC-600 components.""" - + C9_ROM_TABLE_BASE = 0x00000000 C9_ROM_TABLE = MockCoreSightComponent(C9_ROM_TABLE_BASE, cidr=0xb105900d, pidr=0x4000bb7d5, devarch=0x47700af7, devid=[0x20, 0, 0], @@ -194,13 +194,13 @@ class MockCSSOC600Components: 0x00000000, 0x00000000, 0x00000000, 0x00000000, # (extra) ], }) - + SDC600_BASE = 0x00001000 SDC600 = MockCoreSightComponent(SDC600_BASE, cidr=0xb105900d, pidr=0x4000bb9ef, devarch=0x47700a57) - + C9_AHB_AP_BASE = 0x00002000 C9_AHB_AP = MockCoreSightComponent(C9_AHB_AP_BASE, cidr=0xb105900d, pidr=0x4002bb9e3, devarch=0x47700a17) - + # Complete set of components for a Cortex-M4 subsystem. @pytest.fixture(scope='function') @@ -266,7 +266,7 @@ def test_rb32(self, testrom): def test_rb8(self, testrom): assert testrom.read_memory_block8(0x1008, 6) == [3, 0, 0, 0, 4, 0] assert testrom.read_memory_block8(0x4001, 6) == [0, 0, 0, 0xef, 0xbe, 00] - + class TestMockCoreSight: def test_1(self, testcoresight): assert testcoresight.read32(0xe00ff000) == 0xfff0f003 @@ -284,7 +284,7 @@ def test_scs(self): assert cmp.part == 0xc assert cmp.devarch == 0 assert cmp.devid == [0, 0, 0] - + # Test parsing a CoreSight (class 9) component in isolation. def test_etm(self): cmp = CoreSightComponentID(None, MockCoreSight([MockM4Components.ETM]), @@ -297,7 +297,7 @@ def test_etm(self): assert cmp.archid == 0 assert cmp.devarch == 0 assert cmp.devid == [0, 0, 0] - + # Test parsing a CoreSight (class 9) component with a DEVID. def test_tpiu(self): cmp = CoreSightComponentID(None, MockCoreSight([MockM4Components.TPIU]), @@ -310,7 +310,7 @@ def test_tpiu(self): assert cmp.archid == 0 assert cmp.devarch == 0 assert cmp.devid == [0xca1, 0, 0] - + # Test parsing a Class 0x9 ROM table. def test_c9_rom(self): cmp = CoreSightComponentID(None, MockCoreSight([MockCSSOC600Components.C9_ROM_TABLE]), @@ -325,32 +325,32 @@ def test_m4_rom(self, m4_rom): # Read ROM table component ID. cmpid = CoreSightComponentID(None, m4_rom, MockM4Components.M4_ROM_TABLE_BASE) cmpid.read_id_registers() - + # Create the ROM table. rom_table = ROMTable.create(m4_rom, cmpid) rom_table.init() - + # Verify all components were parsed. assert len(rom_table.components) == 6 - + # Check SCS-M4. scs = rom_table.components[0] assert scs.component_class == 14 assert scs.designer == 0x43b assert scs.part == 0xc - + # Check TPIU. tpiu = rom_table.components[4] assert tpiu.component_class == 9 assert tpiu.part == 0x9a1 assert tpiu.devid == [0xca1, 0, 0] - + # Test a Class 0x9 ROM table and CS-600 components. def test_c9_rom(self, c9_top_rom): # Read ROM table component ID. cmpid = CoreSightComponentID(None, c9_top_rom, MockCSSOC600Components.C9_ROM_TABLE_BASE) cmpid.read_id_registers() - + # Create the ROM table. rom_table = ROMTable.create(c9_top_rom, cmpid) rom_table.init() @@ -363,14 +363,14 @@ def test_c9_rom(self, c9_top_rom): # Validate components. assert len(rom_table.components) == 2 - + # Validate SDC-600. sdc = rom_table.components[0] assert sdc.component_class == 9 assert sdc.designer == 0x43b assert sdc.part == 0x9ef assert sdc.archid == 0xa57 - + # Validate AHB-AP. ahb = rom_table.components[1] assert ahb.component_class == 9 diff --git a/test/unit/test_sdc600.py b/test/unit/test_sdc600.py index 66ceb06e2..91ebb294e 100644 --- a/test/unit/test_sdc600.py +++ b/test/unit/test_sdc600.py @@ -74,7 +74,7 @@ def test_destuff_nonflag(self, sdc): if i in FLAGS: continue assert sdc._destuff([i]) == [i] - + # Test stuffing a single byte. def test_stuff_flag(self, sdc): for i in FLAGS: diff --git a/test/unit/test_semihosting.py b/test/unit/test_semihosting.py index 6d799e968..d99658fcd 100644 --- a/test/unit/test_semihosting.py +++ b/test/unit/test_semihosting.py @@ -98,7 +98,7 @@ def run_til_halt(tgt, semihostagent): class RecordingSemihostIOHandler(semihost.SemihostIOHandler): """! @brief Semihost IO handler that records output. - + This handler is only meant to be used for console I/O since it doesn't implement open() or close(). """ @@ -479,7 +479,7 @@ def test_istty_non_stdio(self, semihost_builder, delete_testfile): # telnet.stop() # request.addfinalizer(stopit) # return telnet -# +# # @pytest.fixture(scope='function') # def semihost_telnet_agent(ctx, telnet, request): # agent = semihost.SemihostAgent(ctx, console=telnet) @@ -487,11 +487,11 @@ def test_istty_non_stdio(self, semihost_builder, delete_testfile): # agent.cleanup() # request.addfinalizer(cleanup) # return agent -# +# # @pytest.fixture(scope='function') # def semihost_telnet_builder(tgt, semihost_telnet_agent, ramrgn): # return SemihostRequestBuilder(tgt, semihost_telnet_agent, ramrgn) -# +# # @pytest.fixture(scope='function') # def telnet_conn(request): # from time import sleep @@ -502,47 +502,47 @@ def test_istty_non_stdio(self, semihost_builder, delete_testfile): # telnet.close() # request.addfinalizer(cleanup) # return telnet -# +# # class TestSemihostingTelnet: # def test_connect(self, semihost_telnet_builder, telnet_conn): # result = semihost_telnet_builder.do_no_args_call(semihost.TARGET_SYS_ERRNO) # assert result == 0 -# +# # def test_write(self, semihost_telnet_builder, telnet_conn): # result = semihost_telnet_builder.do_write(semihost.STDOUT_FD, 'hello world') # assert result == 0 -# +# # index, _, text = telnet_conn.expect(['hello world']) # assert index != -1 # assert text == 'hello world' -# +# # def test_writec(self, semihost_telnet_builder, telnet_conn): # for c in 'xyzzy': # result = semihost_telnet_builder.do_writec(c) # assert result == 0 -# +# # index, _, text = telnet_conn.expect([c]) # assert index != -1 # assert text == c -# +# # def test_write0(self, semihost_telnet_builder, telnet_conn): # result = semihost_telnet_builder.do_write0('hello world') # assert result == 0 -# +# # index, _, text = telnet_conn.expect(['hello world']) # assert index != -1 # assert text == 'hello world' -# +# # def test_read(self, semihost_telnet_builder, telnet_conn): # telnet_conn.write('hello world') -# +# # result, data = semihost_telnet_builder.do_read(semihost.STDIN_FD, 11) # assert result == 0 # assert data == 'hello world' -# +# # def test_readc(self, semihost_telnet_builder, telnet_conn): # telnet_conn.write('xyz') -# +# # for c in 'xyz': # rc = semihost_telnet_builder.do_no_args_call(semihost.TARGET_SYS_READC) # assert chr(rc) == c diff --git a/test/unit/test_sequencer.py b/test/unit/test_sequencer.py index df1308738..e612561e4 100644 --- a/test/unit/test_sequencer.py +++ b/test/unit/test_sequencer.py @@ -24,7 +24,7 @@ class TestCallSequence: def test_empty(self): cs = CallSequence() assert cs.count == 0 - + def test_a(self): results = [] cs = CallSequence( @@ -42,11 +42,11 @@ def test_append_1(self): ) assert cs.count == 1 - cs.append( + cs.append( ('b', lambda : results.append('b ran')), ) assert cs.count == 2 - + cs.invoke() assert results == ['a ran', 'b ran'] @@ -57,12 +57,12 @@ def test_append_2(self): ) assert cs.count == 1 - cs.append( + cs.append( ('b', lambda : results.append('b ran')), ('c', lambda : results.append('c ran')), ) assert cs.count == 3 - + cs.invoke() assert results == ['a ran', 'b ran', 'c ran'] @@ -73,10 +73,10 @@ def test_remove_1(self): ('b', lambda : results.append('b ran')), ) assert cs.count == 2 - + cs.remove_task('b') assert cs.count == 1 - + cs.invoke() assert results == ['a ran'] diff --git a/test/unit/test_strings_utility.py b/test/unit/test_strings_utility.py index 88b6f22e1..f6ad7590a 100644 --- a/test/unit/test_strings_utility.py +++ b/test/unit/test_strings_utility.py @@ -21,31 +21,31 @@ class TestUniquifyName: def test_empty_with_no_others(self): assert uniquify_name('', []) == '' - + def test_empty_with_others(self): assert uniquify_name('', ['bar', 'buz']) == '' - + def test_empty_with_another_empty(self): assert uniquify_name('', ['bar', 'buz', '']) == '_1' - + def test_no_others(self): assert uniquify_name('foo', []) == 'foo' - + def test_already_unique(self): assert uniquify_name('foo', ['bar']) == 'foo' - + def test_no_trailing_int(self): assert uniquify_name('foo', ['foo']) == 'foo_1' - + def test_1_trailing_int(self): assert uniquify_name('foo1', ['foo1']) == 'foo2' - + def test_multiple_trailing_ints(self): assert uniquify_name('foo1', ['foo1', 'foo2']) == 'foo3' - + def test_name_has_int(self): assert uniquify_name('foo2', ['foo2', 'bar', 'foo3']) == 'foo4' - + def test_multiple_ints_in_name(self): assert uniquify_name('baz 2 monkey-3', ['fun', 'baz 2 monkey-3']) == 'baz 2 monkey-4' diff --git a/test/unit/test_timeout.py b/test/unit/test_timeout.py index 216f3bfff..f889ef42b 100644 --- a/test/unit/test_timeout.py +++ b/test/unit/test_timeout.py @@ -39,7 +39,7 @@ def test_timeout_a(self): sleep(0.01) assert to.did_time_out assert (time() - s) >= 0.05 - + def test_timeout_b(self): timedout = False s = time() @@ -53,7 +53,7 @@ def test_timeout_b(self): assert timedout assert to.did_time_out assert (time() - s) >= 0.05 - + def test_timeout_c(self): timedout = False with Timeout(0.05) as to: @@ -64,7 +64,7 @@ def test_timeout_c(self): cnt += 1 assert not timedout assert not to.did_time_out - + def test_timeout_reset(self): cnt = 0 cnta = 0 diff --git a/test/user_script_test.py b/test/user_script_test.py index 5d4f962aa..dc2e42097 100644 --- a/test/user_script_test.py +++ b/test/user_script_test.py @@ -70,7 +70,7 @@ def user_script_test(board_id): boot_region = memory_map.get_boot_memory() ram_region = memory_map.get_default_region_of_type(MemoryType.RAM) binary_file = get_test_binary_path(board.test_binary) - + test_pass_count = 0 test_count = 0 result = UserScriptTestResult() @@ -79,10 +79,10 @@ def user_script_test(board_id): target.resume() target.halt() target.step() - + test_count += 1 test_pass_count += 1 - + print("\nTest Summary:") print("Pass count %i of %i tests" % (test_pass_count, test_count)) if test_pass_count == test_count: From 8f994d42fb49d188a6a10f2a46b3771829ec61ef Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 29 Nov 2021 13:09:42 -0600 Subject: [PATCH 031/123] docs: readme, installing, dev guide updates. - Replace install by setup.py with pip. - Show only built-in venv module usage in dev guide. - Clarify branch policy. - Note about setuptools_scm version override. - Link to contributing guide. - Fix some links that still referenced master. --- README.md | 2 +- docs/developers_guide.md | 59 ++++++++++++++++++++++++++++++---------- docs/installing.md | 23 ++++++++++------ 3 files changed, 61 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index dd15d2ce5..1c7d04b49 100644 --- a/README.md +++ b/README.md @@ -128,7 +128,7 @@ $ python3 -mpip install --pre -U git+https://github.com/pyocd/pyOCD.git@develop You can also install directly from the source by cloning the git repository and running: ``` -$ python3 setup.py install +$ python3 pip install . ``` Note that, depending on your operating system, you may run into permissions issues running these commands. diff --git a/docs/developers_guide.md b/docs/developers_guide.md index 142911e46..f00b655d3 100644 --- a/docs/developers_guide.md +++ b/docs/developers_guide.md @@ -4,8 +4,13 @@ title: Developers' guide ## Setup +
+Please familiarise yourself with the [contributing guide](https://github.com/pyocd/pyOCD/blob/main/CONTRIBUTING.md) +before beginning any development on pyOCD or related projects. +
+ PyOCD developers are strongly recommended to setup a working environment using either -[virtualenv](https://virtualenv.pypa.io/en/latest/) or the built-in `venv` module (only use of virtualenv is shown +[virtualenv](https://virtualenv.pypa.io/en/latest/) or the built-in `venv` module (only use of `venv` is shown below, but the two are equivalent). After cloning the code, you can setup a virtualenv and install the pyOCD dependencies for the current platform by following the detailed steps below. @@ -15,7 +20,6 @@ Install the necessary tools listed below. Skip any step where a compatible tool * Note that on Windows, the 32-bit Python 2.7 must be installed for the Python-enabled `arm-none-eabi-gdb-py` to work properly and for the `test/gdb_test.py` functional test to pass. * [Install Git](https://git-scm.com/downloads). Add to PATH. -* [Install virtualenv](https://virtualenv.pypa.io/en/latest/) in your global Python installation, eg: `pip install virtualenv`. Not needed if using the built-in `venv` module. * [Install GNU Arm Embedded toolchain](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm). This provides `arm-none-eabi-gdb` used for testing the gdbserver. Add to PATH. @@ -26,7 +30,7 @@ Install the necessary tools listed below. Skip any step where a compatible tool ``` $ git clone https://github.com/pyocd/pyOCD $ cd pyOCD -$ virtualenv venv +$ python3 -m venv venv ``` **Step 2.** Activate virtual environment @@ -37,24 +41,39 @@ the following. Linux or Mac: ``` $ source venv/bin/activate -$ pip install -e .[test] ``` Windows: ``` $ venv\Scripts\activate +``` + +**Step 3.** Install editable pyOCD + +``` $ pip install -e .[test] ``` -**Step 3.** Develop +If you switch branches, you may need to reinstall. + +Because the `develop` branch doesn't have version tags except older tags from the `develop` branch point, +the version number of pyOCD might be significantly out of date. If this is an issue, you can override the +version by setting the `SETUPTOOLS_SCM_PRETEND_VERSION` environmment variable to the desired version number +(without a "v" prefix). + +**Step 4.** Develop See the [porting guide]({% link _docs/adding_new_targets.md %}) for how to add new devices. Of course, we welcome all improvements and changes. See the [contributor statement](https://github.com/pyocd/pyOCD/blob/main/CONTRIBUTING.md) for some guidelines. -Normally you should work from the `develop` branch. See the [branch policy](#branch-configuration-policy) below for -more information about branches. +See the [branch policy](#branch-configuration-policy) below for details about branches and which branch you should +work from. + +If you'd like suggestions for something to work on, from small to large, the +[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) workspace is a great +way to engage with the community and maintainers. -**Step 4.** Test +**Step 5.** Test To run the unit tests, you can execute the following. @@ -76,21 +95,33 @@ $ cd test $ python ./automated_test.py ``` -**Step 5.** Pull request +**Step 6.** Pull request Once you are satisfied with your changes and all automated tests pass, please create a -[new pull request](https://github.com/pyocd/pyOCD/pull/new/develop) against the `develop` branch on GitHub to share your work. +[new pull request](https://github.com/pyocd/pyOCD/pull/new) on GitHub to share your work. Please see below for +which branch to target. -Pull requests should be made after a changeset is [rebased onto `develop`](https://www.atlassian.com/git/tutorials/merging-vs-rebasing/workflow-walkthrough). +Pull requests should be made after a changeset is +[rebased](https://www.atlassian.com/git/tutorials/merging-vs-rebasing/workflow-walkthrough). ## Branch configuration policy There are two primary branches: -- `main`: Stable branch reflecting the most recent release. -- `develop`: Active development branch for the next version. Merged into `main` at release time. +- `main`: Stable branch reflecting the most recent release. May contain bug fixes not yet released, but no new + feature commits are allowed. +- `develop`: Active development branch for the next minor version. Merged into `main` at release time. There may be other development branches present to host long term development of major new features and backwards incompatible changes, such as API changes. -Changes should generally be made against the `develop` branch. +The branch that your changes should be made against depends on the type and complexity of the changes: + +- Only a bug fix: please target `main`. +- Any other changes, or a mix of changes: target the `develop` branch. This is also a good choice if you aren't sure. + +Maintainers will cherry-pick commits between `main` and `develop` as necessary to keep fixes in sync. + +If you have any questions about how best to submit changes or the branch policy, please ask in the +[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) workspace or +[GitHub Discussions](https://github.com/pyocd/pyOCD/discussions). We'll be happy to help. diff --git a/docs/installing.md b/docs/installing.md index ccc487587..43081ed56 100644 --- a/docs/installing.md +++ b/docs/installing.md @@ -5,7 +5,7 @@ title: Installing PyOCD requires [Python](https://python.org/) 3.6 or later, and a recent version of [libusb](https://libusb.info/). It runs on macOS, Linux, FreeBSD, and Windows platforms. -The latest stable version of pyOCD may be installed via [pip](https://pip.pypa.io/en/stable/index.html) +The latest stable version of pyOCD may be installed or upgraded via [pip](https://pip.pypa.io/en/stable/index.html) as follows: ``` @@ -14,22 +14,25 @@ $ python3 -mpip install -U pyocd _Note: depending on your system, you may need to use `python` instead of `python3`._ -The latest pyOCD package is available [on PyPI](https://pypi.python.org/pypi/pyOCD/) as well as -[on GitHub](https://github.com/pyocd/pyOCD/releases). +The latest pyOCD package is available [on PyPI](https://pypi.python.org/pypi/pyOCD/). The +[GitHub releases](https://github.com/pyocd/pyOCD/releases) page details changes between versions. -To install the latest prerelease version from the HEAD of the master branch, you can do +To install the latest prerelease version from the HEAD of the `develop` branch, you can do the following: ``` -$ python3 -mpip install --pre -U git+https://github.com/pyocd/pyOCD.git +$ python3 -mpip install --pre -U git+https://github.com/pyocd/pyOCD.git@develop ``` You can also install directly from the source by cloning the git repository and running: ``` -$ python3 setup.py install +$ python3 -mpip install . ``` +See the [developer's guide]({% link _docs/developers_guide.md %}) for more about setting up a development +environment for pyOCD. + Note that, depending on your operating system, you may run into permissions issues running these commands. You have a few options here: @@ -43,6 +46,10 @@ You have a few options here: For notes about installing and using on non-x86 systems such as Raspberry Pi, see the [relevant documentation]({% link _docs/installing_on_non_x86.md %}). +(Note: Installing by running `setup.py` directly is deprecated since pyOCD migrated to PEP 517 based packaging. +In many cases it will not work at all. Installing with pip or another standards-compliant tool is the only +supported method.) + udev rules on Linux ------------------- @@ -52,8 +59,8 @@ probes from user space. Otherwise you will need to run pyOCD as root, using sudo highly discouraged. (You should _never_ run pyOCD as root on any OS.) To help with this, example udev rules files are included with pyOCD in the -[udev](https://github.com/pyocd/pyOCD/tree/master/udev) folder. The -[readme](https://github.com/pyocd/pyOCD/tree/master/udev/README.md) in this folder has detailed +[udev](https://github.com/pyocd/pyOCD/tree/main/udev) folder. The +[readme](https://github.com/pyocd/pyOCD/tree/main/udev/README.md) in this folder has detailed instructions. From c90d7bcb7274d5ebecc50e45dc36e6f7e0922c2d Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 29 Nov 2021 13:14:45 -0600 Subject: [PATCH 032/123] target family: imxrt: fix log exception if vectable_addr is None. (#1251) --- pyocd/target/family/target_imxrt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyocd/target/family/target_imxrt.py b/pyocd/target/family/target_imxrt.py index 2dc10f5c6..fa6144de8 100644 --- a/pyocd/target/family/target_imxrt.py +++ b/pyocd/target/family/target_imxrt.py @@ -107,7 +107,7 @@ def set_reset_catch(self, reset_type=None): value = self.read_memory(CortexM.DEMCR) self.write_memory(CortexM.DEMCR, (value & (~0x00000001))) vectable_addr = self._get_flash_vector_addr() - LOG.debug("vectable_addr: %x", vectable_addr) + LOG.debug("vectable_addr: %s", hex(vectable_addr) if (vectable_addr is not None) else "None") vectable = None imageentry = None From 314dcdaa585cf06c2b0ab1775dbfd976b01d32b4 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 1 Dec 2021 12:26:34 -0600 Subject: [PATCH 033/123] docs: update link to join Slack. This link should be non-expiring. --- README.md | 4 ++-- docs/developers_guide.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 1c7d04b49..ebd365b5f 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ pyOCD ### News - pyOCD has several new community resources: the [pyocd.io](https://pyocd.io/) website, - a [Slack workspace](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg), + a [Slack workspace](https://join.slack.com/t/pyocd/shared_invite/zt-zqjv6zr5-ZfGAXl_mFCGGmFlB_8riHA), and a [mailing list](https://groups.google.com/g/pyocd) for announcements. - Branch configuration changes: the default branch `master` has been renamed to `main`, and a `develop` branch has been added to be used for active development. New pull requests should generally target `develop`. See [this discussion](https://github.com/pyocd/pyOCD/discussions/1169) for more information about this change. @@ -187,7 +187,7 @@ Contributions We welcome contributions in any area, even if you just create an issue. If you would like to get involved but aren't sure what to start with, just ask on -[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) or [GitHub +[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-zqjv6zr5-ZfGAXl_mFCGGmFlB_8riHA) or [GitHub discussions](https://github.com/pyocd/pyOCD/discussions) and we'll be happy to help you. Or you can look for an open issue. Any work on major changes should be discussed with the maintainers to make everyone is aligned. diff --git a/docs/developers_guide.md b/docs/developers_guide.md index f00b655d3..5ff49b917 100644 --- a/docs/developers_guide.md +++ b/docs/developers_guide.md @@ -70,7 +70,7 @@ See the [branch policy](#branch-configuration-policy) below for details about br work from. If you'd like suggestions for something to work on, from small to large, the -[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) workspace is a great +[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-zqjv6zr5-ZfGAXl_mFCGGmFlB_8riHA) workspace is a great way to engage with the community and maintainers. **Step 5.** Test @@ -123,5 +123,5 @@ The branch that your changes should be made against depends on the type and comp Maintainers will cherry-pick commits between `main` and `develop` as necessary to keep fixes in sync. If you have any questions about how best to submit changes or the branch policy, please ask in the -[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) workspace or +[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-zqjv6zr5-ZfGAXl_mFCGGmFlB_8riHA) workspace or [GitHub Discussions](https://github.com/pyocd/pyOCD/discussions). We'll be happy to help. From bc54e8108d55e4c63dc9f88c4a659b230e5af0b0 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 2 Dec 2021 10:21:03 -0600 Subject: [PATCH 034/123] Color logging (#1250) * utilities: add color log formatter. * Color logging: command line support. * Commander: colorise the "Connected to" message. * docs: color logging. --- docs/configuring_logging.md | 17 ++++++- pyocd/__main__.py | 26 ++++++++-- pyocd/commands/commander.py | 15 +++--- pyocd/subcommands/base.py | 2 + pyocd/utility/color_log.py | 98 +++++++++++++++++++++++++++++++++++++ 5 files changed, 146 insertions(+), 12 deletions(-) create mode 100644 pyocd/utility/color_log.py diff --git a/docs/configuring_logging.md b/docs/configuring_logging.md index 7eb662ff3..a890e2bfe 100644 --- a/docs/configuring_logging.md +++ b/docs/configuring_logging.md @@ -36,6 +36,7 @@ Subcommand | Default level `list` | INFO `pack` | INFO `reset` | WARNING +`rtt` | INFO `server` | INFO @@ -47,6 +48,20 @@ logging verbosity level. For example, a single `--verbose` moves `pyocd flash` f level of WARNING to INFO. +## Color logging + +By default, log output to the console is colorised. Control over colorised log output is possible two ways. + +The command-line `--color` argument accepts an optional parameter that must be one of `auto`, `always`, or `never`. +The default is `auto`, which will enable color only when outputting to a tty. + +Another option for controlling color output is the `PYOCD_COLOR` environment variable. It should be set to one of the +same values supported by `--color`, or left empty. This environment variable changes the default color output setting, +and is overridden by `--color` on the command line. + +Currently, due to limitations in the colorisation support, `always` behaves the same as `auto`. + + ## Loggers Each module in pyOCD uses its own module-specific logger with a name matching the dotted module @@ -61,7 +76,7 @@ its package structure. ### Trace loggers Certain modules define additional sub-module loggers that output debug trace logs. These loggers always have the -suffix ".trace" and are set to critical log level by default. +suffix ".trace" and are disabled by default. This ensures the trace messages won't be seen unless explicitly enabled by the `--log-level` / `-L` argument described in the following section. Currently defined trace loggers: diff --git a/pyocd/__main__.py b/pyocd/__main__.py index 15fe028e8..f79e414dd 100644 --- a/pyocd/__main__.py +++ b/pyocd/__main__.py @@ -18,6 +18,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys import logging import argparse @@ -30,6 +31,7 @@ from .core import exceptions from .probe.pydapaccess import DAPAccess from .core import options +from .utility.color_log import ColorFormatter from .subcommands.base import SubcommandBase from .subcommands.commander_cmd import CommanderSubcommand from .subcommands.erase_cmd import EraseSubcommand @@ -42,9 +44,6 @@ from .subcommands.server_cmd import ServerSubcommand from .subcommands.rtt_cmd import RTTSubcommand -## @brief Default log format for all subcommands. -LOG_FORMAT = "%(relativeCreated)07d:%(levelname)s:%(module)s:%(message)s" - ## @brief Logger for this module. LOG = logging.getLogger("pyocd.tool") @@ -99,12 +98,31 @@ def build_parser(self) -> argparse.ArgumentParser: def _setup_logging(self) -> None: """! @brief Configure the logging module. + The color log formatter is set up, based on the --color argument and `PYOCD_COLOR` env variable. The --color + argument overrides `PYOCD_COLOR`. + The quiet and verbose argument counts are used to set the log verbosity level. Log level for specific loggers are also configured here. """ + is_tty = sys.stderr.isatty() + color_setting = ((hasattr(self._args, 'color') and self._args.color) or os.environ.get('PYOCD_COLOR', 'auto')) + use_color = (color_setting == "always") or (color_setting == "auto" and is_tty) + + # Compute global log level. level = max(1, self._args.command_class.DEFAULT_LOG_LEVEL + self._get_log_level_delta()) - logging.basicConfig(level=level, format=LOG_FORMAT) + + # Create handler to output logging to stderr. + console = logging.StreamHandler() + + # Create the color formatter and attach to our stream handler. + color_formatter = ColorFormatter(ColorFormatter.FORMAT, use_color, is_tty) + console.setFormatter(color_formatter) + + # Set stream handler and log level on root logger. + root_logger = logging.getLogger() + root_logger.addHandler(console) + root_logger.setLevel(level) # Handle settings for individual loggers from --log-level arguments. for logger_setting in self._args.log_level: diff --git a/pyocd/commands/commander.py b/pyocd/commands/commander.py index 5f9ac220b..47a18998f 100755 --- a/pyocd/commands/commander.py +++ b/pyocd/commands/commander.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import colorama import logging import os import traceback @@ -76,16 +77,16 @@ def run(self): status = self.session.target.get_state().name.capitalize() except (AttributeError, KeyError): status = "" - - # Say what we're connected to. - print("Connected to %s [%s]: %s" % (self.context.target.part_number, - status, self.session.board.unique_id)) except exceptions.TransferFaultError: - pass + status = "" else: # Say what we're connected to, but without status. - print("Connected to %s [no init mode]: %s" % (self.context.target.part_number, - self.session.board.unique_id)) + status = "no init mode" + + # Say what we're connected to. + print(colorama.Fore.GREEN + f"Connected to {self.context.target.part_number} " + + colorama.Fore.CYAN + f"[{status}]" + + colorama.Style.RESET_ALL + f": {self.session.board.unique_id}") # Run the REPL interface. console = PyocdRepl(self.context) diff --git a/pyocd/subcommands/base.py b/pyocd/subcommands/base.py index f0c55e6c2..d9f4c54af 100644 --- a/pyocd/subcommands/base.py +++ b/pyocd/subcommands/base.py @@ -48,6 +48,8 @@ class CommonOptions: help="Set log level of loggers whose name matches any of the comma-separated list of glob-style " "patterns. Log level must be one of (critical, error, warning, info, debug). Can be " "specified multiple times. Example: -L*.trace,pyocd.core.*=debug") + LOGGING_GROUP.add_argument('--color', choices=("always", "auto", "never"), default=None, nargs='?', + const="auto", help="Control color logging. Default is auto.") # Define config related options for all subcommands. CONFIG = argparse.ArgumentParser(description='common', add_help=False) diff --git a/pyocd/utility/color_log.py b/pyocd/utility/color_log.py new file mode 100644 index 000000000..4a5b45514 --- /dev/null +++ b/pyocd/utility/color_log.py @@ -0,0 +1,98 @@ +# pyOCD debugger +# Copyright (c) 2018-2020 Arm Limited +# Copyright (c) 2021 Chris Reed +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from colorama import (Fore, Style) +import logging +from shutil import get_terminal_size + +class ColorFormatter(logging.Formatter): + """@brief Log formatter that applies colours based on the record's log level.""" + + FORMAT = "{timecolor}{relativeCreated:07.0f}{_reset} {lvlcolor:s}{levelname:<{levelnamewidth}.{levelnamewidth}s}{_reset} {msgcolor}{message} {_dim}[{module:s}]{_reset}" + + ## Colors for the log level name. + LEVEL_COLORS = { + 'CRITICAL': Style.BRIGHT + Fore.LIGHTRED_EX, + 'ERROR': Fore.LIGHTRED_EX, + 'WARNING': Fore.LIGHTYELLOW_EX, + 'INFO': Fore.CYAN, + 'DEBUG': Style.DIM, + } + + ## Colors for the rest of the log message. + MESSAGE_COLORS = { + 'CRITICAL': Fore.LIGHTRED_EX, + 'ERROR': Fore.RED, + 'WARNING': Fore.YELLOW, + 'DEBUG': Style.DIM + Fore.LIGHTWHITE_EX, + } + + ## Fixed maximum length of the log level name in log messages. + MAX_LEVELNAME_WIDTH = 1 + + def __init__(self, msg, use_color: bool, is_tty: bool) -> None: + super().__init__(msg, style='{') + self._use_color = use_color + self._is_tty = is_tty + + # TODO: Handle resizing of terminal? + self._term_width = get_terminal_size()[0] + + # Note: we can't set the type of `record` param to LogRecord because that causes type errors for + # each time below when an attribute is set on the record. + def format(self, record) -> str: + # Capture and remove exc_info and stack_info so the superclass format() doesn't + # print it and we can control the formatting. + exc_info = record.exc_info + record.exc_info = None + stack_info = record.stack_info + record.stack_info = None + + # Add colors to the record. + if self._use_color: + record.lvlcolor = self.LEVEL_COLORS.get(record.levelname, '') + + # Colorise the line. + record.msgcolor = self.MESSAGE_COLORS.get(record.levelname, '') + + # Fixed colors. + record.timecolor = Fore.BLUE + record._reset = Style.RESET_ALL + record._dim = Style.DIM + else: + record.lvlcolor = "" + record.msgcolor = "" + record.timecolor = "" + record._reset = "" + record._dim = "" + + record.message = record.getMessage() + + # Add levelname alignment to record. + record.levelname_align = " " * max(self.MAX_LEVELNAME_WIDTH - len(record.levelname), 0) + record.levelnamewidth = self.MAX_LEVELNAME_WIDTH + + # Let superclass handle formatting. + log_msg = super().format(record) + + # Append uncolored exception/stack info. + if exc_info: + log_msg += "\n" + Style.DIM + self.formatException(exc_info) + Style.RESET_ALL + if stack_info: + log_msg += "\n" + Style.DIM + self.formatStack(stack_info) + Style.RESET_ALL + + return log_msg From 41a86b31d8aad2d3d3d7ef06238768f5677339e8 Mon Sep 17 00:00:00 2001 From: Ciro Cattuto Date: Fri, 3 Dec 2021 21:43:51 +0100 Subject: [PATCH 035/123] RTT host-to-target communication (#1253) Initial implementation of RTT host-to-target communication with minimal terminal features in the "rtt" subcommand. --- pyocd/subcommands/rtt_cmd.py | 72 ++++++++++++++++- pyocd/utility/kbhit.py | 147 +++++++++++++++++++++++++++++++++++ 2 files changed, 215 insertions(+), 4 deletions(-) create mode 100644 pyocd/utility/kbhit.py diff --git a/pyocd/subcommands/rtt_cmd.py b/pyocd/subcommands/rtt_cmd.py index c1c0067f8..1b40aba80 100644 --- a/pyocd/subcommands/rtt_cmd.py +++ b/pyocd/subcommands/rtt_cmd.py @@ -1,5 +1,7 @@ # pyOCD debugger # Copyright (c) 2021 mikisama +# Copyright (C) 2021 Ciro Cattuto +# Copyright (C) 2021 Simon D. Levy # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +24,10 @@ from pyocd.core.soc_target import SoCTarget from pyocd.subcommands.base import SubcommandBase from pyocd.utility.cmdline import convert_session_options, int_base_0 +from pyocd.utility.kbhit import KBHit from ctypes import Structure, c_char, c_int32, c_uint32, sizeof + LOG = logging.getLogger(__name__) @@ -134,14 +138,23 @@ def invoke(self) -> int: rtt_cb = SEGGER_RTT_CB.from_buffer(bytearray(data[pos:])) up_addr = rtt_cb_addr + SEGGER_RTT_CB.aUp.offset - # down_addr = up_addr + sizeof(SEGGER_RTT_BUFFER_UP) * rtt_cb.MaxNumUpBuffers + down_addr = up_addr + sizeof(SEGGER_RTT_BUFFER_UP) * rtt_cb.MaxNumUpBuffers LOG.info(f"_SEGGER_RTT @ {rtt_cb_addr:#08x} with {rtt_cb.MaxNumUpBuffers} aUp and {rtt_cb.MaxNumDownBuffers} aDown") + # some targets might need this here + #target.reset_and_halt() + target.resume() - while True: + # set up terminal input + kb = KBHit() + # byte array to send via RTT + cmd = bytes() + + while True: + # read data from up buffers (target -> host) data = target.read_memory_block8(up_addr, sizeof(SEGGER_RTT_BUFFER_UP)) up = SEGGER_RTT_BUFFER_UP.from_buffer(bytearray(data)) @@ -152,7 +165,7 @@ def invoke(self) -> int: """ data = target.read_memory_block8(up.pBuffer + up.RdOff, up.WrOff - up.RdOff) target.write_memory(up_addr + SEGGER_RTT_BUFFER_UP.RdOff.offset, up.WrOff) - print(bytes(data).decode(), end="") + print(bytes(data).decode(), end="", flush=True) elif up.WrOff < up.RdOff: """ @@ -162,10 +175,61 @@ def invoke(self) -> int: data = target.read_memory_block8(up.pBuffer + up.RdOff, up.SizeOfBuffer - up.RdOff) data += target.read_memory_block8(up.pBuffer, up.WrOff) target.write_memory(up_addr + SEGGER_RTT_BUFFER_UP.RdOff.offset, up.WrOff) - print(bytes(data).decode(), end="") + print(bytes(data).decode(), end="", flush=True) + + else: # up buffer is empty + + # try and fetch character + if not kb.kbhit(): + continue + c = kb.getch() + + if ord(c) == 8 or ord(c) == 127: # process backspace + print("\b \b", end="", flush=True) + cmd = cmd[:-1] + continue + elif ord(c) == 27: # process ESC + break + else: + print(c, end="", flush=True) + cmd += c.encode() + + # keep accumulating until we see CR or LF + if not c in "\r\n": + continue + + # SEND TO TARGET + + data = target.read_memory_block8(down_addr, sizeof(SEGGER_RTT_BUFFER_DOWN)) + down = SEGGER_RTT_BUFFER_DOWN.from_buffer(bytearray(data)) + + # compute free space in down buffer + if down.WrOff >= down.RdOff: + num_avail = down.SizeOfBuffer - (down.WrOff - down.RdOff) + else: + num_avail = down.RdOff - down.WrOff - 1 + + # wait until there's space for the entire string in the RTT down buffer + if (num_avail < len(cmd)): + continue + + # write data to down buffer (host -> target), char by char + for i in range(len(cmd)): + target.write_memory_block8(down.pBuffer + down.WrOff, cmd[i:i+1]) + down.WrOff += 1 + if down.WrOff == down.SizeOfBuffer: + down.WrOff = 0; + target.write_memory(down_addr + SEGGER_RTT_BUFFER_DOWN.WrOff.offset, down.WrOff) + + # clear it and start anew + cmd = bytes() + + except KeyboardInterrupt: + pass finally: if session: session.close() + kb.set_normal_term() return 0 diff --git a/pyocd/utility/kbhit.py b/pyocd/utility/kbhit.py new file mode 100644 index 000000000..b2a6c61dd --- /dev/null +++ b/pyocd/utility/kbhit.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 Simon D. Levy +# https://github.com/simondlevy/kbhit +# +# MIT License +# +# Copyright (c) 2021 Simon D. Levy +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +''' +A Python class implementing KBHIT, the standard keyboard-interrupt poller. +Works transparently on Windows and Posix (Linux, Mac OS X). Doesn't work +with IDLE. + +Copyright (c) 2021 Simon D. Levy + +MIT License +''' + +import os + +# Windows +if os.name == 'nt': + import msvcrt + +# Posix (Linux, OS X) +else: + import sys + import termios + import atexit + from select import select + + +class KBHit: + + def __init__(self): + '''Creates a KBHit object that you can call to do various keyboard things. + ''' + + if os.name == 'nt': + pass + + else: + + # Save the terminal settings + self.fd = sys.stdin.fileno() + self.new_term = termios.tcgetattr(self.fd) + self.old_term = termios.tcgetattr(self.fd) + + # New terminal setting unbuffered + self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO) + termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term) + + # Support normal-terminal reset at exit + atexit.register(self.set_normal_term) + + + def set_normal_term(self): + ''' Resets to normal terminal. On Windows this is a no-op. + ''' + + if os.name == 'nt': + pass + + else: + termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term) + + + def getch(self): + ''' Returns a keyboard character after kbhit() has been called. + Should not be called in the same program as getarrow(). + ''' + + if os.name == 'nt': + return msvcrt.getch().decode('utf-8') + + else: + return sys.stdin.read(1) + + + def getarrow(self): + ''' Returns an arrow-key code after kbhit() has been called. Codes are + 0 : up + 1 : right + 2 : down + 3 : left + Should not be called in the same program as getch(). + ''' + + if os.name == 'nt': + msvcrt.getch() # skip 0xE0 + c = msvcrt.getch() + vals = [72, 77, 80, 75] + + else: + c = sys.stdin.read(3)[2] + vals = [65, 67, 66, 68] + + return vals.index(ord(c.decode('utf-8'))) + + + def kbhit(self): + ''' Returns True if keyboard character was hit, False otherwise. + ''' + if os.name == 'nt': + return msvcrt.kbhit() + + else: + dr,dw,de = select([sys.stdin], [], [], 0) + return dr != [] + + +# Test +if __name__ == "__main__": + + kb = KBHit() + + print('Hit any key, or ESC to exit') + + while True: + + if kb.kbhit(): + c = kb.getch() + if ord(c) == 27: # ESC + break + print(c) + + kb.set_normal_term() + From c17d3654237f7b17d0781eb5cb71383123d117d2 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 3 Dec 2021 17:04:28 -0600 Subject: [PATCH 036/123] flash: remove the useless part of the flash algo blob header. (#1256) - Update both PackFlashAlgo and generate_flash_algo.py. - Update unit test that has fixed expected address offsets. --- pyocd/target/pack/flash_algo.py | 17 ++++++++++++----- scripts/generate_flash_algo.py | 23 +++++++++++++++-------- test/unit/test_pack.py | 10 +++++----- 3 files changed, 32 insertions(+), 18 deletions(-) diff --git a/pyocd/target/pack/flash_algo.py b/pyocd/target/pack/flash_algo.py index 656a0b752..830ced8a2 100644 --- a/pyocd/target/pack/flash_algo.py +++ b/pyocd/target/pack/flash_algo.py @@ -64,11 +64,18 @@ class PackFlashAlgo(object): ("PrgData", "SHT_NOBITS"), ) - ## @brief Standard flash blob header that starts with a breakpoint instruction. - _FLASH_BLOB_HEADER = [ - 0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, - 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x04770D1F - ] + ## @brief Standard flash blob header a breakpoint instruction. + # + # This header consists of two instructions: + # + # ``` + # bkpt #0 + # b .-2 # branch to the bkpt + # ``` + # + # Before running a flash algo operation, LR is set to the address of the `bkpt` instruction, + # so when the operation function returns it will halt the CPU. + _FLASH_BLOB_HEADER = [ 0xE7FDBE00 ] ## @brief Size of the flash blob header in bytes. _FLASH_BLOB_HEADER_SIZE = len(_FLASH_BLOB_HEADER) * 4 diff --git a/scripts/generate_flash_algo.py b/scripts/generate_flash_algo.py index c4e71b348..e98c00b25 100755 --- a/scripts/generate_flash_algo.py +++ b/scripts/generate_flash_algo.py @@ -25,10 +25,17 @@ import jinja2 from pyocd.target.pack.flash_algo import PackFlashAlgo -# TODO -# FIXED LENGTH - remove and these (shrink offset to 4 for bkpt only) -BLOB_HEADER = '0xe00abe00,' #, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,' -HEADER_SIZE = 4 #0x20 +# This header consists of two instructions: +# +# ``` +# bkpt #0 +# b .-2 # branch to the bkpt +# ``` +# +# Before running a flash algo operation, LR is set to the address of the `bkpt` instruction, +# so when the operation function returns it will halt the CPU. +BLOB_HEADER = '0xe7fdbe00,' +HEADER_SIZE = 4 STACK_SIZE = 0x200 @@ -176,11 +183,11 @@ def main(): "template for pyocd).") parser.add_argument('-c', '--copyright', help="Set copyright owner.") args = parser.parse_args() - + if not args.copyright: print(f"{colorama.Fore.YELLOW}Warning! No copyright owner was specified. Defaulting to \"PyOCD Authors\". " f"Please set via --copyright, or edit output.{colorama.Style.RESET_ALL}") - + if args.template: with open(args.template, "r") as tmpl_file: tmpl = tmpl_file.read() @@ -212,10 +219,10 @@ def main(): } text = algo.process_template(tmpl, data_dict) - + with open(args.output, "w") as file_handle: file_handle.write(text) - + print(f"Wrote flash algo dict to {args.output}") if __name__ == '__main__': diff --git a/test/unit/test_pack.py b/test/unit/test_pack.py index 1608331b5..d29a01d06 100644 --- a/test/unit/test_pack.py +++ b/test/unit/test_pack.py @@ -185,11 +185,11 @@ def test_algo_dict(self, k64algo, k64f1m0): # print(d) STACK_SIZE = 0x200 assert d['load_address'] == ram.start + STACK_SIZE - assert d['pc_init'] == ram.start + STACK_SIZE + 0x21 - assert d['pc_unInit'] == ram.start + STACK_SIZE + 0x71 - assert d['pc_eraseAll'] == ram.start + STACK_SIZE + 0x95 - assert d['pc_erase_sector'] == ram.start + STACK_SIZE + 0xcb - assert d['pc_program_page'] == ram.start + STACK_SIZE + 0xdf + assert d['pc_init'] == ram.start + STACK_SIZE + 0x5 + assert d['pc_unInit'] == ram.start + STACK_SIZE + 0x55 + assert d['pc_eraseAll'] == ram.start + STACK_SIZE + 0x79 + assert d['pc_erase_sector'] == ram.start + STACK_SIZE + 0xaf + assert d['pc_program_page'] == ram.start + STACK_SIZE + 0xc3 def has_overlapping_regions(memmap): return any((len(memmap.get_intersecting_regions(r.start, r.end)) > 1) for r in memmap.regions) From 3476de4cd6b77ec561d2ccdf9f2143bca43584c8 Mon Sep 17 00:00:00 2001 From: Mathias Brossard Date: Thu, 9 Dec 2021 15:02:32 -0600 Subject: [PATCH 037/123] Add new micro:bit board IDs (#1261) --- pyocd/board/board_ids.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyocd/board/board_ids.py b/pyocd/board/board_ids.py index dde7089d2..d7ca23be6 100644 --- a/pyocd/board/board_ids.py +++ b/pyocd/board/board_ids.py @@ -271,6 +271,8 @@ def __init__(self, name, target, binary): "9901": BoardInfo( "micro:bit", "nrf51", "l1_microbit.bin", ), "9903": BoardInfo( "micro:bit v2", "nrf52833", "microbitv2.bin", ), "9904": BoardInfo( "micro:bit v2", "nrf52833", "microbitv2.bin", ), + "9905": BoardInfo( "micro:bit v2", "nrf52833", "microbitv2.bin", ), + "9906": BoardInfo( "micro:bit v2", "nrf52833", "microbitv2.bin", ), "C004": BoardInfo( "tinyK20", "k20d50m", "l1_k20d50m.bin", ), "C006": BoardInfo( "VBLUno51", "nrf51", "l1_nrf51.bin", ), } From c5c18a76d6457112b5665dcbd0ff0ecaa89aa250 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 8 Dec 2021 12:19:34 -0600 Subject: [PATCH 038/123] utility: cmdline: fix bool session option issues. (#1259) - Make lowercase before testing value. - Log warning on invalid value. --- pyocd/utility/cmdline.py | 6 +++++- test/unit/test_cmdline.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/pyocd/utility/cmdline.py b/pyocd/utility/cmdline.py index 62ec13da4..6b8a2c7ad 100644 --- a/pyocd/utility/cmdline.py +++ b/pyocd/utility/cmdline.py @@ -108,7 +108,11 @@ def convert_session_options(option_list: Iterable[str]) -> Dict[str, Any]: continue # Convert string value to option type. elif info.type is bool: - value = value in ("true", "1", "yes", "on") + if value.lower() in ("true", "1", "yes", "on", "false", "0", "no", "off"): + value = value.lower() in ("true", "1", "yes", "on") + else: + LOG.warning("invalid value for option '%s'", name) + continue elif info.type is int: try: value = int(value, base=0) diff --git a/test/unit/test_cmdline.py b/test/unit/test_cmdline.py index 9b66bedd4..93932036d 100644 --- a/test/unit/test_cmdline.py +++ b/test/unit/test_cmdline.py @@ -96,7 +96,11 @@ def test_bool(self): assert convert_session_options(['auto_unlock=on']) == {'auto_unlock': True} assert convert_session_options(['auto_unlock=0']) == {'auto_unlock': False} assert convert_session_options(['auto_unlock=false']) == {'auto_unlock': False} - assert convert_session_options(['auto_unlock=anything-goes-here']) == {'auto_unlock': False} + assert convert_session_options(['auto_unlock=True']) == {'auto_unlock': True} + assert convert_session_options(['auto_unlock=False']) == {'auto_unlock': False} + assert convert_session_options(['auto_unlock=YES']) == {'auto_unlock': True} + assert convert_session_options(['auto_unlock=oFF']) == {'auto_unlock': False} + assert convert_session_options(['auto_unlock=anything-goes-here']) == {} def test_noncasesense(self): # Test separate paths for with and without a value. From 5d1eca6c4538aeb913853ebea2f44898a8bc2bf1 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 8 Dec 2021 17:00:03 -0600 Subject: [PATCH 039/123] commander: fix broken semicolon-separated commands from -c argument. (#1260) Add test case to commander_test.py, plus cleanup of that file. --- pyocd/commands/execution_context.py | 24 +++++++++----------- test/commander_test.py | 35 +++++++++++++---------------- 2 files changed, 25 insertions(+), 34 deletions(-) diff --git a/pyocd/commands/execution_context.py b/pyocd/commands/execution_context.py index 905104ab5..4751cf60f 100755 --- a/pyocd/commands/execution_context.py +++ b/pyocd/commands/execution_context.py @@ -307,22 +307,18 @@ def parse_command_line(self, line): def _split_commands(self, line): """! @brief Generator yielding commands separated by semicolons.""" - result = '' - i = 0 - while i < len(line): - c = line[i] - # Don't split on escaped semicolons. - if (c == '\\') and (i < len(line) - 1) and (line[i + 1] == ';'): - i += 1 - result += ';' - elif c == ';': - yield result - result = '' + # FIXME This is a big, inefficient hack to work around a bug splitting on quoted semicolons. Practically, + # though, it will never be noticeable. + parts = split_command_line(line) + result = [] + for p in parts: + if p == ';': + yield " ".join(f'"{a}"' for a in result) + result = [] else: - result += c - i += 1 + result.append(p) if result: - yield result + yield " ".join(f'"{a}"' for a in result) def parse_command(self, cmdline): """! @brief Create a CommandInvocation from a single command.""" diff --git a/test/commander_test.py b/test/commander_test.py index 0281ce022..51932b359 100644 --- a/test/commander_test.py +++ b/test/commander_test.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,28 +14,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import argparse import sys import traceback import logging -import six +from types import SimpleNamespace import os -from collections import UserDict -from pyocd.core.helpers import ConnectHelper from pyocd.probe.pydapaccess import DAPAccess -from pyocd.utility.mask import round_up_div -from pyocd.utility import conversion -from pyocd.core.memory_map import MemoryType from pyocd.commands.commander import PyOCDCommander -from pyocd.commands import commands from test_util import ( Test, TestResult, - get_session_options, - binary_to_elf_file, PYOCD_DIR, ) @@ -74,6 +66,9 @@ def commander_test(board_id): ["halt"], ["status"], + # semicolon separated + ["status", ";", "halt", ";", "continue", ";", "halt"], + # commander command group - these are not tested by commands_test.py. ["list"], ["exit"], # Must be last command! @@ -82,7 +77,7 @@ def commander_test(board_id): print("\n------ Testing commander ------\n") # Set up commander args. - args = UserDict() + args = SimpleNamespace() args.no_init = False args.frequency = 1000000 args.options = {} #get_session_options() @@ -103,19 +98,19 @@ def commander_test(board_id): cmdr.run() test_pass_count += 1 print("TEST PASSED") + + test_count += 1 + print("Testing exit code") + print("Exit code:", cmdr.exit_code) + if cmdr.exit_code == 0: + test_pass_count += 1 + print("TEST PASSED") + else: + print("TEST FAILED") except Exception: print("TEST FAILED") traceback.print_exc() - test_count += 1 - print("Testing exit code") - print("Exit code:", cmdr.exit_code) - if cmdr.exit_code == 0: - test_pass_count += 1 - print("TEST PASSED") - else: - print("TEST FAILED") - print("\n\nTest Summary:") print("Pass count %i of %i tests" % (test_pass_count, test_count)) if failed_commands: From 08fe203060dadb879af90ea0937686611c071c0a Mon Sep 17 00:00:00 2001 From: Ciro Cattuto Date: Sat, 11 Dec 2021 18:32:00 +0100 Subject: [PATCH 040/123] Exit gracefully if RTT block is not found (#1265) --- pyocd/subcommands/rtt_cmd.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyocd/subcommands/rtt_cmd.py b/pyocd/subcommands/rtt_cmd.py index 1b40aba80..29a0d8c0b 100644 --- a/pyocd/subcommands/rtt_cmd.py +++ b/pyocd/subcommands/rtt_cmd.py @@ -92,6 +92,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: def invoke(self) -> int: session = None + kb = None try: session = ConnectHelper.session_with_chosen_probe( @@ -132,7 +133,7 @@ def invoke(self) -> int: if pos == -1: LOG.error("No RTT control block available") - return + return 1 rtt_cb_addr = rtt_range_start + pos @@ -230,6 +231,7 @@ def invoke(self) -> int: finally: if session: session.close() + if kb: kb.set_normal_term() return 0 From 6f430bf247d5f7cbdbf770d33bd747b6d941b090 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 21 Nov 2021 15:44:42 -0600 Subject: [PATCH 041/123] docs: semihosting, swo/swv, env vars additions; some smaller changes. --- docs/debug_probes.md | 9 +- docs/developers_guide.md | 10 +- docs/env_vars.md | 40 +++ docs/resources/semihosting.svg | 485 +++++++++++++++++++++++++++++++++ docs/semihosting.md | 170 ++++++++++++ docs/swo_swv.md | 81 ++++++ 6 files changed, 789 insertions(+), 6 deletions(-) create mode 100644 docs/env_vars.md create mode 100644 docs/resources/semihosting.svg create mode 100644 docs/semihosting.md create mode 100644 docs/swo_swv.md diff --git a/docs/debug_probes.md b/docs/debug_probes.md index ecb39a7d6..8472ea71b 100644 --- a/docs/debug_probes.md +++ b/docs/debug_probes.md @@ -136,7 +136,7 @@ PyOCD supports automatic target type identification for debug probes built with ### STLink
-Note! Recent STLink firmware versions will only allow access to STM32 targets. If you are using a target +Recent STLink firmware versions will only allow access to STM32 targets. If you are using a target from a silicon vendor other than ST Micro, please use a different debug probe.
@@ -162,7 +162,12 @@ and Windows. [Firmware and driver installer and updates](https://www.segger.com/downloads/jlink/) -On macOS, you can install the `segger-jlink` cask with Homebrew to get automatic driver updates. +On macOS, you can install the `segger-jlink` cask with Homebrew to get managed driver updates. + +Please note that flash programming performance using a J-Link through pyOCD is currently slower than using the J-Link +software directly (or compared to CMSIS-DAP). This is because pyOCD uses the low-level DAP commands provided by J-Link, +which are inherently slower than higher level commands (which are less flexible and more difficult and complex to +integrate). #### Session options diff --git a/docs/developers_guide.md b/docs/developers_guide.md index 5ff49b917..e87c56151 100644 --- a/docs/developers_guide.md +++ b/docs/developers_guide.md @@ -2,13 +2,15 @@ title: Developers' guide --- -## Setup -
-Please familiarise yourself with the [contributing guide](https://github.com/pyocd/pyOCD/blob/main/CONTRIBUTING.md) -before beginning any development on pyOCD or related projects. +

+Please familiarise yourself with the +contributing guide before beginning any development on pyOCD or related projects. +

+## Setup + PyOCD developers are strongly recommended to setup a working environment using either [virtualenv](https://virtualenv.pypa.io/en/latest/) or the built-in `venv` module (only use of `venv` is shown below, but the two are equivalent). After cloning the code, you can setup a virtualenv and install the pyOCD diff --git a/docs/env_vars.md b/docs/env_vars.md new file mode 100644 index 000000000..c6dde0bf9 --- /dev/null +++ b/docs/env_vars.md @@ -0,0 +1,40 @@ +--- +title: Environment variables +--- + +
### News +- pyOCD has several new community resources: the [pyocd.io](https://pyocd.io/) website, + a [Slack workspace](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg), + and a [mailing list](https://groups.google.com/g/pyocd) for announcements. - Branch configuration changes: the default branch `master` has been renamed to `main`, and a `develop` branch has been added to be used for active development. New pull requests should generally target `develop`. See [this discussion](https://github.com/pyocd/pyOCD/discussions/1169) for more information about this change. See the [wiki news page](https://github.com/pyocd/pyOCD/wiki/News) for all recent news. @@ -19,8 +24,8 @@ A command line tool is provided that covers most use cases, or you can make use API to enable low-level target control. A common use for the Python API is to run and control CI tests. -Upwards of 70 popular MCUs are supported built-in. In addition, through the use of CMSIS-Packs, -[nearly every Cortex-M device](https://www.keil.com/dd2/pack/) on the market is supported. +Support for more than 70 popular MCUs is built-in. In addition, through the use of CMSIS Device +Family Packs, [nearly every Cortex-M device](https://www.keil.com/dd2/pack/) on the market is supported. The `pyocd` command line tool gives you total control over your device with these subcommands: @@ -79,16 +84,15 @@ Requirements Status ------ -PyOCD is functionally reliable and fully useable. +PyOCD is beta quality. -The Python API is considered partially unstable as we are restructuring and cleaning it up prior to -releasing version 1.0. +The Python API is considered stable for version 0.x, but will be changed in version 1.0. Documentation ------------- -The pyOCD documentation is located in [the docs directory](docs/). +The pyOCD documentation is available on the [pyocd.io website](https://pyocd.io/docs). In addition to user guides, you can generate reference documentation using Doxygen with the supplied [config file](docs/Doxyfile). @@ -97,6 +101,11 @@ supplied [config file](docs/Doxyfile). Installing ---------- +**The full installation guide is available [in the documentation](https://pyocd.io/docs/installing).** + +For notes about installing and using on non-x86 systems such as Raspberry Pi, see the +[relevant documentation](https://pyocd.io/docs/installing-on-non-x86). + The latest stable version of pyOCD may be installed via [pip](https://pip.pypa.io/en/stable/index.html) as follows: @@ -132,9 +141,6 @@ You have a few options here: 4. Run the command in a [virtualenv](https://virtualenv.pypa.io/en/latest/) local to a specific project working set. -For notes about installing and using on non-x86 systems such as Raspberry Pi, see the -[relevant documentation](docs/installing_on_non_x86.md). - ### libusb installation [pyusb](https://github.com/pyusb/pyusb) and its backend library [libusb](https://libusb.info/) are @@ -164,72 +170,45 @@ instructions. ### Target support -See the [target support documentation](docs/target_support.md) for information on how to check if +See the [target support documentation](https://pyocd.io/docs/target-support) for information on how to check if the MCU(s) you are using have built-in support, and how to install support for additional MCUs via CMSIS-Packs. -Standalone GDB server ---------------------- - -After you install pyOCD via pip or setup.py, you will be able to execute the following in order to -start a GDB server powered by pyOCD: - -``` -$ pyocd gdbserver -``` - -You can get additional help by running ``pyocd gdbserver --help``. +Using GDB +--------- -Example command line GDB session showing how to connect to a running `pyocd gdbserver` and load -firmware: +See the [GDB setup](https://pyocd.io/docs/gdb-setup) documentation for a guide for setting up +and using pyocd with gdb and IDEs. -``` -$ arm-none-eabi-gdb application.elf - - target remote localhost:3333 - load - monitor reset -``` -The `pyocd gdbserver` subcommand is also usable as a drop in place replacement for OpenOCD in -existing setups. The primary difference is the set of gdb monitor commands. +Community resources +------------------- +Join the pyOCD community! -Recommended GDB and IDE setup ------------------------------ +[pyocd.io website](https://pyocd.io) \ +[Documentation](https://pyocd.io/docs) \ +[Issues](https://github.com/pyocd/pyOCD/issues) \ +[Discussions](https://github.com/pyocd/pyOCD/discussions) \ +[Wiki](https://github.com/pyocd/pyOCD/wiki) \ +[Mailing list](https://groups.google.com/g/pyocd) for announcements -The recommended toolchain for embedded Arm Cortex-M development is [GNU Arm -Embedded](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm), -provided by Arm. GDB is included with this toolchain. - -For [Visual Studio Code](https://code.visualstudio.com), the -[cortex-debug](https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug) plugin is available -that supports pyOCD. - -The GDB server also works well with [Eclipse Embedded CDT](https://projects.eclipse.org/projects/iot.embed-cdt), -previously known as [GNU MCU/ARM Eclipse](https://gnu-mcu-eclipse.github.io/). It fully supports pyOCD with -an included pyOCD debugging plugin. - -To view peripheral register values either the built-in Eclipse Embedded CDT register view can be used, or -the Embedded System Register Viewer plugin can be installed. The latter can be installed from inside -Eclipse adding `http://embsysregview.sourceforge.net/update` as a software update server URL -under the "Help -> Install New Software..." menu item. +In order to foster a healthy and safe environment, we expect contributors and all members of the community to +follow our [Code of Conduct](https://github.com/pyocd/pyOCD/tree/main/CODE_OF_CONDUCT.md). Contributions ------------- -Join the pyOCD community! We welcome contributions in any area. Please see the [contribution -guidelines](CONTRIBUTING.md) for detailed requirements. In order to foster a healthy -and safe environment, we expect contributors and all members of the community to follow the -[code of conduct](CODE_OF_CONDUCT.md). - -To report bugs, please [create an issue](https://github.com/pyocd/pyOCD/issues/new) in the -GitHub project. +We welcome contributions in any area, even if you just create an issue. If you would like to get involved but +aren't sure what to start with, just ask on +[Slack](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) or [GitHub +discussions](https://github.com/pyocd/pyOCD/discussions) and we'll be happy to help you. Or you can look for +an open issue. Any work on major changes should be discussed with the maintainers to make everyone is aligned. -Please see the [Developers' Guide](docs/developers_guide.md) for instructions on how to set up a -development environment for pyOCD. +Please see the [contribution guidelines](https://github.com/pyocd/pyOCD/tree/main/CONTRIBUTING.md) for detailed requirements. The [developers' +Guide](https://pyocd.io/docs/developers-guide) has instructions on how to set up a development environment for pyOCD. New pull requests should be [created](https://github.com/pyocd/pyOCD/pull/new/develop) against the `develop` branch. @@ -237,7 +216,8 @@ New pull requests should be [created](https://github.com/pyocd/pyOCD/pull/new/de License ------- -PyOCD is licensed with the permissive Apache 2.0 license. See the [LICENSE](LICENSE) file for the -full text of the license. +PyOCD is licensed with the permissive Apache 2.0 license. See the +[LICENSE](https://github.com/pyocd/pyOCD/tree/main/LICENSE) file for the full text of the license. All +documentation and the website are licensed with [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/). Copyright © 2006-2021 PyOCD Authors From f1d1558dc2abd68723957307f3b7160c85e1ba05 Mon Sep 17 00:00:00 2001 From: Haly Date: Fri, 15 Oct 2021 21:59:32 +0800 Subject: [PATCH 010/123] LPC55xx: update flash algo for lpc55s36 (#1226) --- pyocd/target/builtin/target_LPC55S36.py | 133 +++++++++++++++++------- 1 file changed, 98 insertions(+), 35 deletions(-) diff --git a/pyocd/target/builtin/target_LPC55S36.py b/pyocd/target/builtin/target_LPC55S36.py index 0a3c626d5..f8871317b 100644 --- a/pyocd/target/builtin/target_LPC55S36.py +++ b/pyocd/target/builtin/target_LPC55S36.py @@ -17,7 +17,7 @@ from ...core.memory_map import (FlashRegion, RamRegion, RomRegion, MemoryMap) from ...debug.svd.loader import SVDFile -# Note: the DFP has both S and NS flash algos, but they are exactly the same except for the address range. + FLASH_ALGO = { 'load_address' : 0x20000000, @@ -26,30 +26,28 @@ 0xe00abe00, 0xf240b580, 0xf2c00004, 0xf6420000, 0xf84961e0, 0xf2401000, 0xf2c52000, 0x21000000, 0x1080f8c0, 0x1084f8c0, 0x1180f8c0, 0x71fbf647, 0xf6406001, 0x21ff6004, 0x0000f2c5, 0x01def2cc, 0xf04f6001, - 0x210240a0, 0xf2407001, 0xf2c0000c, 0x44480000, 0xf874f000, 0xbf182800, 0xbd802001, 0x47702000, - 0xf240b580, 0xf2c0000c, 0xf2460000, 0x4448636c, 0xf6c62100, 0xf44f3365, 0xf0003260, 0x2800f86d, + 0x210240a0, 0xf2407001, 0xf2c0000c, 0x44480000, 0xf860f000, 0xbf182800, 0xbd802001, 0x47702000, + 0xf240b580, 0xf2c0000c, 0xf2460000, 0x4448636c, 0xf6c62100, 0xf44f3365, 0xf0003276, 0x2800f859, 0x2001bf18, 0xbf00bd80, 0xf020b580, 0xf2404170, 0xf2c0000c, 0xf2460000, 0x4448636c, 0x3365f6c6, - 0x4200f44f, 0xf858f000, 0xbf182800, 0xbd802001, 0xb081b5f0, 0x070cf240, 0x460d4614, 0xf0200441, - 0xf2c04670, 0xd10a0700, 0x636cf246, 0x0007eb09, 0xf6c64631, 0xf44f3365, 0xf0004200, 0xf5b5f83d, - 0xbf987f00, 0x7500f44f, 0x0007eb09, 0x46224631, 0xf000462b, 0x2800f847, 0x2001bf18, 0xbdf0b001, - 0x460cb5b0, 0xf0204605, 0x46114070, 0xf0004622, 0x2800f8b8, 0x4425bf08, 0xbdb04628, 0x460ab580, - 0x4170f020, 0x000cf240, 0x0000f2c0, 0xf0004448, 0x2800f83f, 0x2001bf18, 0x0000bd80, 0x0108f240, - 0x0100f2c0, 0xf8092201, 0xf64f2001, 0xf2c101dc, 0x68093102, 0xbf004708, 0x0c08f240, 0x0c00f2c0, - 0xc00cf819, 0x0f00f1bc, 0xf64abf07, 0xf2c13c4f, 0xf64f3c00, 0xf2c10ce0, 0xbf183c02, 0xc000f8dc, - 0xbf004760, 0x0c08f240, 0x0c00f2c0, 0xc00cf819, 0x0f00f1bc, 0xf248bf07, 0xf2c17c9b, 0xf64f3c02, - 0xf2c10ce4, 0xbf183c02, 0xc000f8dc, 0xbf004760, 0x0308f240, 0x0300f2c0, 0x3003f819, 0xbf072b00, - 0x3381f64a, 0x3300f2c1, 0x03e8f64f, 0x3302f2c1, 0x681bbf18, 0xbf004718, 0x0c08f240, 0x0c00f2c0, - 0xc00cf819, 0x0f00f1bc, 0xf64abf07, 0xf2c14ca5, 0xf64f3c00, 0xf2c10cec, 0xbf183c02, 0xc000f8dc, - 0xbf004760, 0x03f0f64f, 0x3302f2c1, 0x4718681b, 0x01f4f64f, 0x3102f2c1, 0x47086809, 0x01f8f64f, - 0x3102f2c1, 0x47086809, 0x03fcf64f, 0x3302f2c1, 0x4718681b, 0x1c04f64f, 0x3c02f2c1, 0xc000f8dc, - 0xbf004760, 0x1208f64f, 0x3202f2c1, 0x47106812, 0x120cf64f, 0x3202f2c1, 0x47106812, 0x1310f64f, - 0x3302f2c1, 0x4718681b, 0x1200f64f, 0x3202f2c1, 0x47106812, 0x1c18f64f, 0x3c02f2c1, 0xc000f8dc, - 0xea404760, 0xb5100301, 0xd10f079b, 0xd30d2a04, 0xc908c810, 0x429c1f12, 0xba20d0f8, 0x4288ba19, - 0x2001d901, 0xf04fbd10, 0xbd1030ff, 0x07d3b11a, 0x1c52d003, 0x2000e007, 0xf810bd10, 0xf8113b01, - 0x1b1b4b01, 0xf810d107, 0xf8113b01, 0x1b1b4b01, 0x1e92d101, 0x4618d1f1, 0x0000bd10, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x7200f44f, 0xf844f000, 0xbf182800, 0xbd802001, 0x460bb580, 0x4170f020, 0x000cf240, 0x0000f2c0, + 0xf5b34448, 0xbf987f00, 0x7300f44f, 0xf846f000, 0xbf182800, 0xbd802001, 0x460cb5b0, 0xf0204605, + 0x46114070, 0xf0004622, 0x2800f8b8, 0x4425bf08, 0xbdb04628, 0x460ab580, 0x4170f020, 0x000cf240, + 0x0000f2c0, 0xf0004448, 0x2800f83f, 0x2001bf18, 0x0000bd80, 0x0108f240, 0x0100f2c0, 0xf8092201, + 0xf64f2001, 0xf2c101dc, 0x68093102, 0xbf004708, 0x0c08f240, 0x0c00f2c0, 0xc00cf819, 0x0f00f1bc, + 0xf64abf07, 0xf2c13c4f, 0xf64f3c00, 0xf2c10ce0, 0xbf183c02, 0xc000f8dc, 0xbf004760, 0x0c08f240, + 0x0c00f2c0, 0xc00cf819, 0x0f00f1bc, 0xf248bf07, 0xf2c17c9b, 0xf64f3c02, 0xf2c10ce4, 0xbf183c02, + 0xc000f8dc, 0xbf004760, 0x0308f240, 0x0300f2c0, 0x3003f819, 0xbf072b00, 0x3381f64a, 0x3300f2c1, + 0x03e8f64f, 0x3302f2c1, 0x681bbf18, 0xbf004718, 0x0c08f240, 0x0c00f2c0, 0xc00cf819, 0x0f00f1bc, + 0xf64abf07, 0xf2c14ca5, 0xf64f3c00, 0xf2c10cec, 0xbf183c02, 0xc000f8dc, 0xbf004760, 0x03f0f64f, + 0x3302f2c1, 0x4718681b, 0x01f4f64f, 0x3102f2c1, 0x47086809, 0x01f8f64f, 0x3102f2c1, 0x47086809, + 0x03fcf64f, 0x3302f2c1, 0x4718681b, 0x1c04f64f, 0x3c02f2c1, 0xc000f8dc, 0xbf004760, 0x1208f64f, + 0x3202f2c1, 0x47106812, 0x120cf64f, 0x3202f2c1, 0x47106812, 0x1310f64f, 0x3302f2c1, 0x4718681b, + 0x1200f64f, 0x3202f2c1, 0x47106812, 0x1c18f64f, 0x3c02f2c1, 0xc000f8dc, 0xea404760, 0xb5100301, + 0xd10f079b, 0xd30d2a04, 0xc908c810, 0x429c1f12, 0xba20d0f8, 0x4288ba19, 0x2001d901, 0xf04fbd10, + 0xbd1030ff, 0x07d3b11a, 0x1c52d003, 0x2000e007, 0xf810bd10, 0xf8113b01, 0x1b1b4b01, 0xf810d107, + 0xf8113b01, 0x1b1b4b01, 0x1e92d101, 0x4618d1f1, 0x0000bd10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 ], # Relative function addresses @@ -59,8 +57,8 @@ 'pc_erase_sector': 0x2000008d, 'pc_eraseAll': 0x20000065, - 'static_base' : 0x20000000 + 0x00000004 + 0x000002dc, - 'begin_stack' : 0x20000500, + 'static_base' : 0x20000000 + 0x00000004 + 0x000002b4, + 'begin_stack' : 0x200004d0, 'begin_data' : 0x20000000 + 0x1000, 'page_size' : 0x200, 'analyzer_supported' : False, @@ -68,41 +66,106 @@ 'page_buffers' : [0x20001000, 0x20001200], # Enable double buffering 'min_program_length' : 0x200, + # Relative region addresses and sizes 'ro_start': 0x0, - 'ro_size': 0x2dc, - 'rw_start': 0x2dc, + 'ro_size': 0x2b4, + 'rw_start': 0x2b4, 'rw_size': 0x4, - 'zi_start': 0x2e0, + 'zi_start': 0x2b8, 'zi_size': 0x44, # Flash information 'flash_start': 0x0, - 'flash_size': 0x40000, + 'flash_size': 0x3d800, 'sector_sizes': ( - (0x0, 0x8000), + (0x0, 0x200), ) } +S_FLASH_ALGO = { + 'load_address' : 0x20000000, + + # Flash algorithm as a hex string + 'instructions': [ + 0xe00abe00, + 0xf240b580, 0xf2c00004, 0xf6420000, 0xf84961e0, 0xf2401000, 0xf2c52000, 0x21000000, 0x1080f8c0, + 0x1084f8c0, 0x1180f8c0, 0x71fbf647, 0xf6406001, 0x21ff6004, 0x0000f2c5, 0x01def2cc, 0xf04f6001, + 0x210240a0, 0xf2407001, 0xf2c0000c, 0x44480000, 0xf860f000, 0xbf182800, 0xbd802001, 0x47702000, + 0xf240b580, 0xf2c0000c, 0xf2460000, 0x4448636c, 0xf6c62100, 0xf44f3365, 0xf0003276, 0x2800f859, + 0x2001bf18, 0xbf00bd80, 0xf020b580, 0xf2404170, 0xf2c0000c, 0xf2460000, 0x4448636c, 0x3365f6c6, + 0x7200f44f, 0xf844f000, 0xbf182800, 0xbd802001, 0x460bb580, 0x4170f020, 0x000cf240, 0x0000f2c0, + 0xf5b34448, 0xbf987f00, 0x7300f44f, 0xf846f000, 0xbf182800, 0xbd802001, 0x460cb5b0, 0xf0204605, + 0x46114070, 0xf0004622, 0x2800f8b8, 0x4425bf08, 0xbdb04628, 0x460ab580, 0x4170f020, 0x000cf240, + 0x0000f2c0, 0xf0004448, 0x2800f83f, 0x2001bf18, 0x0000bd80, 0x0108f240, 0x0100f2c0, 0xf8092201, + 0xf64f2001, 0xf2c101dc, 0x68093102, 0xbf004708, 0x0c08f240, 0x0c00f2c0, 0xc00cf819, 0x0f00f1bc, + 0xf64abf07, 0xf2c13c4f, 0xf64f3c00, 0xf2c10ce0, 0xbf183c02, 0xc000f8dc, 0xbf004760, 0x0c08f240, + 0x0c00f2c0, 0xc00cf819, 0x0f00f1bc, 0xf248bf07, 0xf2c17c9b, 0xf64f3c02, 0xf2c10ce4, 0xbf183c02, + 0xc000f8dc, 0xbf004760, 0x0308f240, 0x0300f2c0, 0x3003f819, 0xbf072b00, 0x3381f64a, 0x3300f2c1, + 0x03e8f64f, 0x3302f2c1, 0x681bbf18, 0xbf004718, 0x0c08f240, 0x0c00f2c0, 0xc00cf819, 0x0f00f1bc, + 0xf64abf07, 0xf2c14ca5, 0xf64f3c00, 0xf2c10cec, 0xbf183c02, 0xc000f8dc, 0xbf004760, 0x03f0f64f, + 0x3302f2c1, 0x4718681b, 0x01f4f64f, 0x3102f2c1, 0x47086809, 0x01f8f64f, 0x3102f2c1, 0x47086809, + 0x03fcf64f, 0x3302f2c1, 0x4718681b, 0x1c04f64f, 0x3c02f2c1, 0xc000f8dc, 0xbf004760, 0x1208f64f, + 0x3202f2c1, 0x47106812, 0x120cf64f, 0x3202f2c1, 0x47106812, 0x1310f64f, 0x3302f2c1, 0x4718681b, + 0x1200f64f, 0x3202f2c1, 0x47106812, 0x1c18f64f, 0x3c02f2c1, 0xc000f8dc, 0xea404760, 0xb5100301, + 0xd10f079b, 0xd30d2a04, 0xc908c810, 0x429c1f12, 0xba20d0f8, 0x4288ba19, 0x2001d901, 0xf04fbd10, + 0xbd1030ff, 0x07d3b11a, 0x1c52d003, 0x2000e007, 0xf810bd10, 0xf8113b01, 0x1b1b4b01, 0xf810d107, + 0xf8113b01, 0x1b1b4b01, 0x1e92d101, 0x4618d1f1, 0x0000bd10, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 + ], + + # Relative function addresses + 'pc_init': 0x20000005, + 'pc_unInit': 0x20000061, + 'pc_program_page': 0x200000b5, + 'pc_erase_sector': 0x2000008d, + 'pc_eraseAll': 0x20000065, + + 'static_base' : 0x20000000 + 0x00000004 + 0x000002b4, + 'begin_stack' : 0x200004d0, + 'begin_data' : 0x20000000 + 0x1000, + 'page_size' : 0x200, + 'analyzer_supported' : False, + 'analyzer_address' : 0x00000000, + 'page_buffers' : [0x20001000, 0x20001200], # Enable double buffering + 'min_program_length' : 0x200, + + # Relative region addresses and sizes + 'ro_start': 0x0, + 'ro_size': 0x2b4, + 'rw_start': 0x2b4, + 'rw_size': 0x4, + 'zi_start': 0x2b8, + 'zi_size': 0x44, + + # Flash information + 'flash_start': 0x10000000, + 'flash_size': 0x3d800, + 'sector_sizes': ( + (0x0, 0x200), + ) +} + class LPC55S36(LPC5500Family): MEMORY_MAP = MemoryMap( - FlashRegion(name='nsflash', start=0x00000000, length=0x040000, access='rx', + FlashRegion(name='nsflash', start=0x00000000, length=0x3d800, access='rx', page_size=0x200, - sector_size=0x8000, + sector_size=0x200, is_boot_memory=True, are_erased_sectors_readable=False, algo=FLASH_ALGO), RomRegion( name='nsrom', start=0x03000000, length=0x020000, access='rx'), RamRegion( name='nscoderam', start=0x04000000, length=0x4000, access='rwx', default=False), - FlashRegion(name='sflash', start=0x10000000, length=0x040000, access='rx', + FlashRegion(name='sflash', start=0x10000000, length=0x3d800, access='rx', page_size=0x200, - sector_size=0x8000, + sector_size=0x200, is_boot_memory=True, are_erased_sectors_readable=False, - algo=FLASH_ALGO, + algo=S_FLASH_ALGO, alias='nsflash'), RomRegion( name='srom', start=0x13000000, length=0x020000, access='srx', alias='nsrom'), From 566dd376001dcb54dab496423c5383f4700548f0 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 17 Oct 2021 12:41:49 -0500 Subject: [PATCH 011/123] Packaging: bump pyyaml to 6.0; other version increments; Python 3.10 classifier. (#1229) - Bump minimum pyyaml version to 6.0, maximum <7.0. - Set minimum version for hidapi. - Increment pylink-square minimum to 0.11.1. - Add Python 3.10 to classifiers. --- setup.cfg | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 47e9339b2..67d07b11d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,6 +22,7 @@ classifiers = Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 Topic :: Software Development Topic :: Software Development :: Debuggers Topic :: Software Development :: Embedded Systems @@ -43,16 +44,16 @@ install_requires = cmsis-pack-manager>=0.3.0 colorama<1.0 dataclasses; python_version < "3.7" - hidapi; platform_system != "Linux" + hidapi>=0.10.1,<1.0; platform_system != "Linux" intelhex>=2.0,<3.0 intervaltree>=3.0.2,<4.0 naturalsort>=1.5,<2.0 prettytable>=2.0,<3.0 pyelftools<1.0 - pylink-square>=0.8.2,<1.0 + pylink-square>=0.11.1,<1.0 pyocd_pemicro>=1.0.6 pyusb>=1.2.1,<2.0 - pyyaml>=5.1,<6.0 + pyyaml>=6.0,<7.0 six>=1.15.0,<2.0 [options.extras_require] From d5f5d542c6f8b3fbf44514d3f6b4fbc1137fa977 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 17 Oct 2021 13:14:33 -0500 Subject: [PATCH 012/123] Packs: correct which vendor is output for find and show. (#1230) - For find, report the family Dvendor and combined the pack vendor into the pack name column. - For show, remove the Vendor column and move pack vendor to the Pack column to match the find command. --- pyocd/subcommands/pack_cmd.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pyocd/subcommands/pack_cmd.py b/pyocd/subcommands/pack_cmd.py index c087188cf..650c45357 100644 --- a/pyocd/subcommands/pack_cmd.py +++ b/pyocd/subcommands/pack_cmd.py @@ -135,11 +135,10 @@ def invoke(self) -> int: cache = self._get_cache() packs = pack_target.ManagedPacks.get_installed_packs(cache) - pt = self._get_pretty_table(["Vendor", "Pack", "Version"]) + pt = self._get_pretty_table(["Pack", "Version"]) for ref in packs: pt.add_row([ - ref.vendor, - ref.pack, + f"{ref.vendor}.{ref.pack}", ref.version, ]) print(pt) @@ -198,8 +197,8 @@ def invoke(self) -> int: ref, = cache.packs_for_devices([info]) pt.add_row([ info['name'], - ref.vendor, - ref.pack, + info['vendor'].split(':')[0], + f"{ref.vendor}.{ref.pack}", ref.version, info['name'].lower() in installed_target_names, ]) @@ -348,8 +347,8 @@ def invoke(self) -> int: ref, = cache.packs_for_devices([info]) pt.add_row([ info['name'], - ref.vendor, - ref.pack, + info['vendor'].split(':')[0], + f"{ref.vendor}.{ref.pack}", ref.version, info['name'].lower() in installed_target_names, ]) From fb41c1be18bc2384159a24c088ad02c4986a4959 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 10 Oct 2021 14:57:09 -0500 Subject: [PATCH 013/123] Docs: a few updates and minor corrections. - Correct capitalisation on dev guide and user scripts titles. - User scripts: insert some missing line breaks. - Command-reference: regenerate. --- docs/command_reference.md | 81 +++++++++++++++++++++++++-------------- docs/developers_guide.md | 2 +- docs/user_scripts.md | 13 ++++--- 3 files changed, 61 insertions(+), 35 deletions(-) diff --git a/docs/command_reference.md b/docs/command_reference.md index 9a7020e2b..9870b5912 100644 --- a/docs/command_reference.md +++ b/docs/command_reference.md @@ -17,11 +17,10 @@ precedence even when it is a prefix of multiple other commands. - All commands ------------ - +
@@ -357,6 +356,16 @@ ADDR DATA+ Write 8-bit bytes to memory. + + + + + + + + + +""") + +def gen_targets() -> None: + for target_name in sorted(BUILTIN_TARGETS.keys()): + target = BUILTIN_TARGETS[target_name] + gen_one_target(target_name, target) + + +def main() -> None: + print("""--- +title: Built-in targets +--- + +
CommandArgumentsDescription
Openocd_compatibility
+init + +init + +Ignored; for OpenOCD compatibility. +
Registers
@@ -474,6 +483,14 @@ command can be read, written, or both.
ValueAccessDescription
+aps + +read-only + +List discovered Access Ports. +
cores @@ -633,37 +650,37 @@ Commands ##### `break` **Usage**: ADDR \ -Set a breakpoint address. +Set a breakpoint address. ##### `lsbreak` **Usage**: \ -List breakpoints. +List breakpoints. ##### `lswatch` **Usage**: \ -List watchpoints. +List watchpoints. ##### `rmbreak` **Usage**: ADDR \ -Remove a breakpoint. +Remove a breakpoint. ##### `rmwatch` **Usage**: ADDR \ -Remove a watchpoint. +Remove a watchpoint. ##### `watch` **Usage**: ADDR [r|w|rw] [1|2|4] \ -Set a watchpoint address, and optional access type (default rw) and size (4). +Set a watchpoint address, and optional access type (default rw) and size (4). ### Bringup @@ -672,7 +689,7 @@ These commands are meant to be used when starting up Commander in no-init mode. ##### `initdp` **Usage**: \ -Init DP and power up debug. +Init DP and power up debug. ##### `makeap` @@ -684,7 +701,7 @@ Creates a new AP object for the given APSEL. The type of AP, MEM-AP or generic, ##### `reinit` **Usage**: \ -Reinitialize the target object. +Reinitialize the target object. ### Commander @@ -693,13 +710,13 @@ Reinitialize the target object. **Aliases**: `quit` \ **Usage**: \ -Quit pyocd commander. +Quit pyocd commander. ##### `list` **Usage**: \ -Show available targets. +Show available targets. ### Core @@ -714,21 +731,21 @@ Resume execution of the target. The target's state is read back after resuming. ##### `core` **Usage**: [NUM] \ -Select CPU core by number or print selected core. +Select CPU core by number or print selected core. ##### `halt` **Aliases**: `h` \ **Usage**: \ -Halt the target. +Halt the target. ##### `step` **Aliases**: `s` \ **Usage**: [COUNT] \ -Step one or more instructions. +Step one or more instructions. ### Dap @@ -737,28 +754,28 @@ Step one or more instructions. **Aliases**: `rap` \ **Usage**: [APSEL] ADDR \ -Read AP register. +Read AP register. ##### `readdp` **Aliases**: `rdp` \ **Usage**: ADDR \ -Read DP register. +Read DP register. ##### `writeap` **Aliases**: `wap` \ **Usage**: [APSEL] ADDR DATA \ -Write AP register. +Write AP register. ##### `writedp` **Aliases**: `wdp` \ **Usage**: ADDR DATA \ -Write DP register. +Write DP register. ### Device @@ -772,7 +789,7 @@ Reset the target, optionally specifying the reset type. The reset type must be o ##### `unlock` **Usage**: \ -Unlock security on the target. +Unlock security on the target. ### General @@ -781,7 +798,7 @@ Unlock security on the target. **Aliases**: `?` \ **Usage**: [CMD] \ -Show help for commands. +Show help for commands. ### Memory @@ -803,7 +820,7 @@ Disassemble instructions at an address. Only available if the capstone library i ##### `erase` **Usage**: [ADDR] [COUNT] \ -Erase all internal flash or a range of sectors. +Erase all internal flash or a range of sectors. ##### `fill` @@ -821,7 +838,7 @@ Search for a value in memory within the given address range. A pattern of any nu ##### `load` **Usage**: FILENAME [ADDR] \ -Load a binary, hex, or elf file with optional base address. +Load a binary, hex, or elf file with optional base address. ##### `loadmem` @@ -861,7 +878,7 @@ Read 8-bit bytes. Optional length parameter is the number of bytes to read. If t ##### `savemem` **Usage**: ADDR LEN FILENAME \ -Save a range of memory to a binary file. +Save a range of memory to a binary file. ##### `write16` @@ -892,6 +909,14 @@ Write 64-bit double-words to memory. The data arguments are 64-bit words in big- Write 8-bit bytes to memory. The data arguments are 8-bit bytes. Can write to both RAM and flash. Flash writes are subject to minimum write size and alignment, and the flash page must have been previously erased. +### Openocd_compatibility + +##### `init` + +**Usage**: init \ +Ignored; for OpenOCD compatibility. + + ### Registers ##### `reg` @@ -949,7 +974,7 @@ Show symbol, file, and line for address. The symbol name, source file path, and **Aliases**: `st` \ **Usage**: \ -Show the target's current state. +Show the target's current state. ### Threads @@ -957,7 +982,7 @@ Show the target's current state. ##### `threads` **Usage**: {flush,enable,disable,status} \ -Control thread awareness. +Control thread awareness. ### Values @@ -965,11 +990,11 @@ Control thread awareness. ##### `set` **Usage**: NAME VALUE \ -Set a value. +Set a value. ##### `show` **Usage**: NAME \ -Display a value. +Display a value. diff --git a/docs/developers_guide.md b/docs/developers_guide.md index 1422c0aab..142911e46 100644 --- a/docs/developers_guide.md +++ b/docs/developers_guide.md @@ -1,5 +1,5 @@ --- -title: Developers' Guide +title: Developers' guide --- ## Setup diff --git a/docs/user_scripts.md b/docs/user_scripts.md index 2ec04a2fa..d89e4789e 100644 --- a/docs/user_scripts.md +++ b/docs/user_scripts.md @@ -1,5 +1,5 @@ --- -title: User Scripts +title: User scripts --- ## Introduction @@ -146,7 +146,7 @@ This section documents all functions that user scripts can provide to modify pyO Hook to enable debug for the given core. *core* - A `CortexM` object about to be initialized.
- **Result** - *True* Do not perform the normal procedure to start core debug. + **Result** - *True* Do not perform the normal procedure to start core debug. \ *False/None* Continue with normal behaviour. - `did_start_debug_core(core)`
@@ -159,7 +159,7 @@ This section documents all functions that user scripts can provide to modify pyO Pre-cleanup hook for the core. *core* - A `CortexM` object.
- **Result** - *True* Do not perform the normal procedure to disable core debug. + **Result** - *True* Do not perform the normal procedure to disable core debug. \ *False/None* Continue with normal behaviour. - `did_stop_debug_core(core)`
@@ -187,7 +187,8 @@ This section documents all functions that user scripts can provide to modify pyO *core* - A CortexM instance.
*reset_type* - One of the `Target.ResetType` enumerations.
- **Result** - *True* The hook performed the reset. *False/None* Caller should perform the normal + **Result** - *True* The hook performed the reset. \ + *False/None* Caller should perform the normal reset procedure. - `did_reset(core, reset_type)`
@@ -202,7 +203,7 @@ This section documents all functions that user scripts can provide to modify pyO *core* - A CortexM instance.
*reset_type* - One of the `Target.ResetType` enumerations.
- **Result** - *True* This hook handled setting up reset catch, caller should do nothing. + **Result** - *True* This hook handled setting up reset catch, caller should do nothing. \ *False/None* Perform the default reset catch set using vector catch. - `clear_reset_catch(core, reset_type)`
@@ -216,7 +217,7 @@ This section documents all functions that user scripts can provide to modify pyO Hook to override mass erase. *target* - A `CoreSightTarget` object.
- **Result** - *True* Indicate that mass erase was performed by the hook. + **Result** - *True* Indicate that mass erase was performed by the hook. \ *False/None* Mass erase was not overridden and the caller should proceed with the standard mass erase procedure. From 03de5f74f875394dbab646fdea2beaf3bada1c0c Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 10 Oct 2021 14:58:19 -0500 Subject: [PATCH 014/123] Docs: generate_builtin_target_docs.py script and output. --- docs/builtin-targets.md | 726 ++++++++++++++++++++++++ scripts/generate_builtin_target_docs.py | 53 ++ 2 files changed, 779 insertions(+) create mode 100644 docs/builtin-targets.md create mode 100755 scripts/generate_builtin_target_docs.py diff --git a/docs/builtin-targets.md b/docs/builtin-targets.md new file mode 100644 index 000000000..e01e78c2f --- /dev/null +++ b/docs/builtin-targets.md @@ -0,0 +1,726 @@ +--- +title: Built-in targets +--- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Target Type NameVendorName
cc3220sfTexas InstrumentsCC3220SF
cortex_mGenericCoreSightTarget
cy8c64_sysapCypresscy8c64_sysap
cy8c64x5_cm0Cypresscy8c64x5_cm0
cy8c64x5_cm0_full_flashCypresscy8c64x5_cm0_full_flash
cy8c64x5_cm4Cypresscy8c64x5_cm4
cy8c64x5_cm4_full_flashCypresscy8c64x5_cm4_full_flash
cy8c64xa_cm0Cypresscy8c64xA_cm0
cy8c64xa_cm0_full_flashCypresscy8c64xA_cm0_full_flash
cy8c64xa_cm4Cypresscy8c64xA_cm4
cy8c64xa_cm4_full_flashCypresscy8c64xA_cm4_full_flash
cy8c64xx_cm0Cypresscy8c64xx_cm0
cy8c64xx_cm0_full_flashCypresscy8c64xx_cm0_full_flash
cy8c64xx_cm0_nosmifCypresscy8c64xx_cm0_nosmif
cy8c64xx_cm0_s25hx512tCypresscy8c64xx_cm0_s25hx512t
cy8c64xx_cm4Cypresscy8c64xx_cm4
cy8c64xx_cm4_full_flashCypresscy8c64xx_cm4_full_flash
cy8c64xx_cm4_nosmifCypresscy8c64xx_cm4_nosmif
cy8c64xx_cm4_s25hx512tCypresscy8c64xx_cm4_s25hx512t
cy8c6xx5CypressCY8C6xx5
cy8c6xx7CypressCY8C6xx7
cy8c6xx7_nosmifCypressCY8C6xx7_nosmif
cy8c6xx7_s25fs512sCypressCY8C6xx7_S25FS512S
cy8c6xxaCypressCY8C6xxA
hc32f003HDSCHC32F003
hc32f005HDSCHC32F005
hc32f030HDSCHC32F030
hc32f072HDSCHC32F072
hc32f120x6HDSCHC32F120x6TA
hc32f120x8HDSCHC32F120x8TA
hc32f160xaHDSCHC32F160xA
hc32f160xcHDSCHC32F160xC
hc32f190HDSCHC32F190
hc32f196HDSCHC32F196
hc32f460xcHDSCHC32F460xC
hc32f460xeHDSCHC32F460xE
hc32f4a0xgHDSCHC32F4A0xG
hc32f4a0xiHDSCHC32F4A0xI
hc32l072HDSCHC32L072
hc32l073HDSCHC32L073
hc32l110HDSCHC32L110
hc32l130HDSCHC32L130
hc32l136HDSCHC32L136
hc32l190HDSCHC32L190
hc32l196HDSCHC32L196
hc32m120HDSCHC32M120
hc32m423xaHDSCHC32M423xA
k20d50mNXPK20D50M
k22fNXPK22F
k22fa12NXPK22FA12
k28f15NXPK28F15
k32l2b3NXPK32L2B3
k32w042sNXPK32W042S
k64fNXPK64F
k66f18NXPK66F18
k82f25615NXPK82F25615
ke15z7NXPKE15Z7
ke17z7NXPKE17Z7
ke18f16NXPKE18F16
kinetisNXPKinetis
kl02zNXPKL02Z
kl05zNXPKL05Z
kl25zNXPKL25Z
kl26zNXPKL26Z
kl27z4NXPKL27Z4
kl28zNXPKL28x
kl43z4NXPKL43Z4
kl46zNXPKL46Z
kl82z7NXPKL82Z7
kv10z7NXPKV10Z7
kv11z7NXPKV11Z7
kw01z4NXPKW01Z4
kw24d5NXPKW24D5
kw36z4NXPKW36Z4
kw40z4NXPKW40Z4
kw41z4NXPKW41Z4
lpc11u24NXPLPC11U24
lpc11xx_32NXPLPC11XX_32
lpc1768NXPLPC1768
lpc4088NXPLPC4088
lpc4088dmNXPLPC4088dm
lpc4088qsbNXPLPC4088qsb
lpc4330NXPLPC4330
lpc54114NXPLPC54114
lpc54608NXPLPC54608
lpc5526NXPLPC5526
lpc55s28NXPLPC55S28
lpc55s36NXPLPC55S36
lpc55s69NXPLPC55S69
lpc800NXPLPC800
lpc824NXPLPC824
lpc845NXPLPC845
m2354kjfaeNuvotonM2354KJFAE
m252kg6aeNuvotonM252KG6AE
m263kiaaeNuvotonM263KIAAE
m487jidaeNuvotonM487JIDAE
max32600MaximMAX32600
max32620MaximMAX32620
max32625MaximMAX32625
max32630MaximMAX32630
mimxrt1010NXPMIMXRT1011xxxxx
mimxrt1015NXPMIMXRT1015xxxxx
mimxrt1020NXPMIMXRT1021xxxxx
mimxrt1024NXPMIMXRT1024xxxxx
mimxrt1050NXPMIMXRT1052xxxxB_hyperflash
mimxrt1050_hyperflashNXPMIMXRT1052xxxxB_hyperflash
mimxrt1050_quadspiNXPMIMXRT1052xxxxB_quadspi
mimxrt1060NXPMIMXRT1062xxxxA
mimxrt1064NXPMIMXRT1064xxxxA
mimxrt1170_cm4NXPMIMXRT1176xxxxx_CM4
mimxrt1170_cm7NXPMIMXRT1176xxxxx_CM7
mps3_an522ArmAN522
mps3_an540ArmAN540
musca_a1ArmMuscaA1
musca_b1ArmMuscaB1
musca_s1ArmMuscaS1
ncs36510ONSemiconductorNCS36510
nrf51Nordic SemiconductorNRF51
nrf51822Nordic SemiconductorNRF51
nrf52Nordic SemiconductorNRF52832
nrf52832Nordic SemiconductorNRF52832
nrf52833Nordic SemiconductorNRF52833
nrf52840Nordic SemiconductorNRF52840
rp2040Raspberry PiRP2040Core0
rp2040_core0Raspberry PiRP2040Core0
rp2040_core1Raspberry PiRP2040Core1
rtl8195amRealtek SemiconductorRTL8195AM
s5js100SamsungS5JS100
stm32f051STMicroelectronicsSTM32F051
stm32f103rcSTMicroelectronicsSTM32F103RC
stm32f412xeSTMicroelectronicsSTM32F412xE
stm32f412xgSTMicroelectronicsSTM32F412xG
stm32f429xgSTMicroelectronicsSTM32F429xG
stm32f429xiSTMicroelectronicsSTM32F429xI
stm32f439xgSTMicroelectronicsSTM32F439xG
stm32f439xiSTMicroelectronicsSTM32F439xI
stm32f767ziSTMicroelectronicsSTM32F767xx
stm32l031x6STMicroelectronicsSTM32L031x6
stm32l432kcSTMicroelectronicsSTM32L432xC
stm32l475xcSTMicroelectronicsSTM32L475xC
stm32l475xeSTMicroelectronicsSTM32L475xE
stm32l475xgSTMicroelectronicsSTM32L475xG
w7500WIZnetW7500
+ diff --git a/scripts/generate_builtin_target_docs.py b/scripts/generate_builtin_target_docs.py new file mode 100755 index 000000000..343e4637a --- /dev/null +++ b/scripts/generate_builtin_target_docs.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2021 Chris Reed +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pyocd.core.soc_target import SoCTarget +from pyocd.target.builtin import BUILTIN_TARGETS + + +def gen_one_target(name: str, target: type[SoCTarget]) -> None: + print(f"""
{name.lower()}{target.VENDOR}{target.__name__}
+ + +""") + gen_targets() + print(""" +
Target Type NameVendorName
+""") + + +if __name__ == '__main__': + main() + + From e37b6fdf8a7db50e93123bc205e23d1831773b2f Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 18 Oct 2021 14:03:26 -0500 Subject: [PATCH 015/123] Packaging: bump cmsis-pack-manager to v0.4.0 release. (#1231) This also restricts CPM to <1.0. --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 67d07b11d..1b3449262 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,7 +41,7 @@ python_requires = >=3.6.0 # Use hidapi on macOS and Windows, not needed on Linux. install_requires = capstone>=4.0,<5.0 - cmsis-pack-manager>=0.3.0 + cmsis-pack-manager>=0.4.0,<1.0 colorama<1.0 dataclasses; python_version < "3.7" hidapi>=0.10.1,<1.0; platform_system != "Linux" From 0388837d04ba3af2e3f4fb7fd05f6b542c5c81a8 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 15 Oct 2021 09:12:13 -0500 Subject: [PATCH 016/123] Workflows: add Python 3.10 to basic test. --- .github/workflows/basic_test.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/basic_test.yaml b/.github/workflows/basic_test.yaml index e95140806..de7ab4b2b 100644 --- a/.github/workflows/basic_test.yaml +++ b/.github/workflows/basic_test.yaml @@ -12,7 +12,12 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.6, 3.7, 3.8, 3.9] + python-version: + - 3.6 + - 3.7 + - 3.8 + - 3.9 + - "3.10" steps: - uses: actions/checkout@v2 From 2af2e4ba77bdb8a5a501a86fbc62d13d974f4699 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 25 Oct 2021 10:32:37 -0500 Subject: [PATCH 017/123] RTOS: remove unused pkg_resources import. (#1233) --- pyocd/rtos/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyocd/rtos/__init__.py b/pyocd/rtos/__init__.py index f8424ee0d..1bd594670 100644 --- a/pyocd/rtos/__init__.py +++ b/pyocd/rtos/__init__.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2016,2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pkg_resources - from .provider import ThreadProvider from .argon import ArgonThreadProvider from .freertos import FreeRTOSThreadProvider From 4135275b9dcd588f2151886a99677991151e789e Mon Sep 17 00:00:00 2001 From: mikisama <41532794+mikisama@users.noreply.github.com> Date: Thu, 28 Oct 2021 21:44:28 +0800 Subject: [PATCH 018/123] Add Basic SEGGER RTT Real Time Transfer Support. (#1234) * Add SEGGER RTT Real Time Transfer Support. * few minor changes --- pyocd/__main__.py | 2 + pyocd/subcommands/rtt_cmd.py | 171 +++++++++++++++++++++++++++++++++++ 2 files changed, 173 insertions(+) create mode 100644 pyocd/subcommands/rtt_cmd.py diff --git a/pyocd/__main__.py b/pyocd/__main__.py index da09f310d..140252c8b 100644 --- a/pyocd/__main__.py +++ b/pyocd/__main__.py @@ -40,6 +40,7 @@ from .subcommands.pack_cmd import PackSubcommand from .subcommands.reset_cmd import ResetSubcommand from .subcommands.server_cmd import ServerSubcommand +from .subcommands.rtt_cmd import RTTSubcommand ## @brief Default log format for all subcommands. LOG_FORMAT = "%(relativeCreated)07d:%(levelname)s:%(module)s:%(message)s" @@ -64,6 +65,7 @@ class PyOCDTool(SubcommandBase): PackSubcommand, ResetSubcommand, ServerSubcommand, + RTTSubcommand, ] ## @brief Logging level names. diff --git a/pyocd/subcommands/rtt_cmd.py b/pyocd/subcommands/rtt_cmd.py new file mode 100644 index 000000000..c1c0067f8 --- /dev/null +++ b/pyocd/subcommands/rtt_cmd.py @@ -0,0 +1,171 @@ +# pyOCD debugger +# Copyright (c) 2021 mikisama +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +from typing import List +import logging +from pyocd.core.helpers import ConnectHelper +from pyocd.core.memory_map import MemoryMap, MemoryRegion, MemoryType +from pyocd.core.soc_target import SoCTarget +from pyocd.subcommands.base import SubcommandBase +from pyocd.utility.cmdline import convert_session_options, int_base_0 +from ctypes import Structure, c_char, c_int32, c_uint32, sizeof + +LOG = logging.getLogger(__name__) + + +class SEGGER_RTT_BUFFER_UP(Structure): + """! @brief `SEGGER RTT Ring Buffer` target to host.""" + + _fields_ = [ + ("sName", c_uint32), + ("pBuffer", c_uint32), + ("SizeOfBuffer", c_uint32), + ("WrOff", c_uint32), + ("RdOff", c_uint32), + ("Flags", c_uint32), + ] + + +class SEGGER_RTT_BUFFER_DOWN(Structure): + """! @brief `SEGGER RTT Ring Buffer` host to target.""" + + _fields_ = [ + ("sName", c_uint32), + ("pBuffer", c_uint32), + ("SizeOfBuffer", c_uint32), + ("WrOff", c_uint32), + ("RdOff", c_uint32), + ("Flags", c_uint32), + ] + + +class SEGGER_RTT_CB(Structure): + """! @brief `SEGGER RTT control block` structure. """ + + _fields_ = [ + ("acID", c_char * 16), + ("MaxNumUpBuffers", c_int32), + ("MaxNumDownBuffers", c_int32), + ("aUp", SEGGER_RTT_BUFFER_UP * 3), + ("aDown", SEGGER_RTT_BUFFER_DOWN * 3), + ] + + +class RTTSubcommand(SubcommandBase): + """! @brief `pyocd rtt` subcommand.""" + + NAMES = ["rtt"] + HELP = "SEGGER RTT Viewer." + + @classmethod + def get_args(cls) -> List[argparse.ArgumentParser]: + """! @brief Add this subcommand to the subparsers object.""" + + rtt_parser = argparse.ArgumentParser(cls.HELP, add_help=False) + + rtt_options = rtt_parser.add_argument_group("rtt options") + rtt_options.add_argument("-a", "--address", type=int_base_0, default=None, + help="Start address of RTT control block search range.") + rtt_options.add_argument("-s", "--size", type=int_base_0, default=None, + help="Size of RTT control block search range.") + + return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, rtt_parser] + + def invoke(self) -> int: + + session = None + + try: + session = ConnectHelper.session_with_chosen_probe( + project_dir=self._args.project_dir, + config_file=self._args.config, + user_script=self._args.script, + no_config=self._args.no_config, + pack=self._args.pack, + unique_id=self._args.unique_id, + target_override=self._args.target_override, + frequency=self._args.frequency, + blocking=(not self._args.no_wait), + connect_mode=self._args.connect_mode, + options=convert_session_options(self._args.options)) + + if session is None: + LOG.error("No target device available") + return 1 + + with session: + + target: SoCTarget = session.board.target + + memory_map: MemoryMap = target.get_memory_map() + ram_region: MemoryRegion = memory_map.get_default_region_of_type(MemoryType.RAM) + + if self._args.address is None or self._args.size is None: + rtt_range_start = ram_region.start + rtt_range_size = ram_region.length + elif ram_region.start <= self._args.address and self._args.size <= ram_region.length: + rtt_range_start = self._args.address + rtt_range_size = self._args.size + + LOG.info(f"RTT control block search range [{rtt_range_start:#08x}, {rtt_range_size:#08x}]") + + data = target.read_memory_block8(rtt_range_start, rtt_range_size) + pos = bytes(data).find(b"SEGGER RTT") + + if pos == -1: + LOG.error("No RTT control block available") + return + + rtt_cb_addr = rtt_range_start + pos + + rtt_cb = SEGGER_RTT_CB.from_buffer(bytearray(data[pos:])) + up_addr = rtt_cb_addr + SEGGER_RTT_CB.aUp.offset + # down_addr = up_addr + sizeof(SEGGER_RTT_BUFFER_UP) * rtt_cb.MaxNumUpBuffers + + LOG.info(f"_SEGGER_RTT @ {rtt_cb_addr:#08x} with {rtt_cb.MaxNumUpBuffers} aUp and {rtt_cb.MaxNumDownBuffers} aDown") + + target.resume() + + while True: + + data = target.read_memory_block8(up_addr, sizeof(SEGGER_RTT_BUFFER_UP)) + up = SEGGER_RTT_BUFFER_UP.from_buffer(bytearray(data)) + + if up.WrOff > up.RdOff: + """ + |oooooo|xxxxxxxxxxxx|oooooo| + 0 rdOff WrOff SizeOfBuffer + """ + data = target.read_memory_block8(up.pBuffer + up.RdOff, up.WrOff - up.RdOff) + target.write_memory(up_addr + SEGGER_RTT_BUFFER_UP.RdOff.offset, up.WrOff) + print(bytes(data).decode(), end="") + + elif up.WrOff < up.RdOff: + """ + |xxxxxx|oooooooooooo|xxxxxx| + 0 WrOff RdOff SizeOfBuffer + """ + data = target.read_memory_block8(up.pBuffer + up.RdOff, up.SizeOfBuffer - up.RdOff) + data += target.read_memory_block8(up.pBuffer, up.WrOff) + target.write_memory(up_addr + SEGGER_RTT_BUFFER_UP.RdOff.offset, up.WrOff) + print(bytes(data).decode(), end="") + + finally: + if session: + session.close() + + return 0 From 74e5d3e2a4c05c311cb07cfa74610a93857021bf Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 1 Nov 2021 13:03:46 -0500 Subject: [PATCH 019/123] utility: hex: correct alignment of ascii column with ragged end. (#1235) --- pyocd/utility/hex.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pyocd/utility/hex.py b/pyocd/utility/hex.py index 1e2ea77b8..3ceb858b9 100644 --- a/pyocd/utility/hex.py +++ b/pyocd/utility/hex.py @@ -83,6 +83,13 @@ def dump_hex_data(data, start_address=0, width=8, output=None, print_ascii=True) line_width = 4 elif width == 64: line_width = 2 + else: + raise RuntimeError(f"unsupported width of {width}") + + def line_width_in_chars(elements: int) -> int: + return elements * ((2 * width // 8) + 1) + + max_line_width = line_width_in_chars(line_width) i = 0 while i < len(data): if start_address is not None: @@ -104,7 +111,8 @@ def dump_hex_data(data, start_address=0, width=8, output=None, print_ascii=True) output.write("%016x " % d) if i % line_width == 0: break - + actual_line_width = line_width_in_chars(i - start_i) + if print_ascii: s = "|" for n in range(start_i, start_i + line_width): @@ -117,7 +125,7 @@ def dump_hex_data(data, start_address=0, width=8, output=None, print_ascii=True) d = conversion.nbit_le_list_to_byte_list([d], width) d.reverse() s += "".join((chr(b) if (chr(b) in _PRINTABLE) else '.') for b in d) - output.write(" " + s + "|") + output.write(" " * (max_line_width - actual_line_width) + " " + s + "|") output.write("\n") From 310b42b894fe1294a74e2925872709cdf9ef175a Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 2 Nov 2021 15:12:52 -0500 Subject: [PATCH 020/123] udev: add rule for Cypress KitProg2 CMSIS-DAP mode PID f148. --- udev/50-cmsis-dap.rules | 3 +++ 1 file changed, 3 insertions(+) diff --git a/udev/50-cmsis-dap.rules b/udev/50-cmsis-dap.rules index 435bead70..9e32cb7b9 100644 --- a/udev/50-cmsis-dap.rules +++ b/udev/50-cmsis-dap.rules @@ -1,6 +1,9 @@ # 04b4:f138 Cypress KitProg1/KitProg2 CMSIS-DAP mode SUBSYSTEM=="usb", ATTR{idVendor}=="04b4", ATTR{idProduct}=="f138", MODE:="666" +# 04b4:f148 Cypress KitProg1/KitProg2 CMSIS-DAP mode +SUBSYSTEM=="usb", ATTR{idVendor}=="04b4", ATTR{idProduct}=="f148", MODE:="666" + # 04b4:f151 Cypress MiniProg4 CMSIS-DAPv2 Bulk + I2C/SPI/UART SUBSYSTEM=="usb", ATTR{idVendor}=="04b4", ATTR{idProduct}=="f151", MODE:="666" From 2112be39ef6f4551bcce8f1fe64dfdd2103c6937 Mon Sep 17 00:00:00 2001 From: michieldwitte Date: Tue, 9 Nov 2021 18:48:38 +0100 Subject: [PATCH 021/123] Fix STM32l0x6 erase (#1237) Co-authored-by: Michiel De Witte --- pyocd/target/builtin/target_STM32L031x6.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyocd/target/builtin/target_STM32L031x6.py b/pyocd/target/builtin/target_STM32L031x6.py index bc3c42d5b..fed49eff8 100644 --- a/pyocd/target/builtin/target_STM32L031x6.py +++ b/pyocd/target/builtin/target_STM32L031x6.py @@ -82,7 +82,7 @@ class STM32L031x6(CoreSightTarget): VENDOR = "STMicroelectronics" MEMORY_MAP = MemoryMap( - FlashRegion(name='Flash', start=0x08000000, length=0x8000, blocksize=0x1000, is_boot_memory=True, algo=FLASH_ALGO), + FlashRegion(name='Flash', start=0x08000000, length=0x8000, blocksize=0x80, is_boot_memory=True, algo=FLASH_ALGO), RamRegion(name='RAM', start=0x20000000, length=0x2000), FlashRegion(name='EEPROM', start=0x08080000, length=0x400, blocksize=0x400, algo=FLASH_ALGO) ) From 8500a7801f405c15582ad5582bbb26aa94a9f7e5 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 24 Oct 2021 14:37:47 -0500 Subject: [PATCH 022/123] docs: installing requirements. --- docs/installing.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/installing.md b/docs/installing.md index ae8f25d1c..82d75a92f 100644 --- a/docs/installing.md +++ b/docs/installing.md @@ -2,6 +2,9 @@ title: Installing --- +PyOCD requires [Python](https://python.org/) 3.6 or later, and a recent version of [libusb](https://libusb.info/). It runs on macOS, +Linux, FreeBSD, and Windows platforms. + The latest stable version of pyOCD may be installed via [pip](https://pip.pypa.io/en/stable/index.html) as follows: From 3de94e9fcd1dac40f53e595fd2c1ce8005f7179a Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 2 Nov 2021 09:34:11 -0500 Subject: [PATCH 023/123] docs: configuring logging: add little note about no wildcards for 'logging' option. --- docs/configuring_logging.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/configuring_logging.md b/docs/configuring_logging.md index b7b40546f..7eb662ff3 100644 --- a/docs/configuring_logging.md +++ b/docs/configuring_logging.md @@ -77,7 +77,7 @@ Trace logger | Trace output `pyocd.probe.stlink.usb.trace` | STLink USB transfers `pyocd.probe.tcp_client_probe.trace` | Remote probe client requests and responses `pyocd.probe.tcp_probe_server.trace` | Remote probe server requests and responses -`pyocd.utility.notification.trace` | Sent notifications +`pyocd.utility.notification.trace` | Sent notifications ## Logger-level control @@ -149,6 +149,9 @@ parent logger such as `pyocd` will set the level for all children—this is an easy way to control the log level for all of pyOCD. +Note that because the `logging` option is passed to and handled by the Python logging module, it does not support +wildcard matching against loggers like the `--log-level` argument. + ### Full control From 5480226b04ee97fb88212842427fc56efb1a6c7f Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 24 Oct 2021 14:38:34 -0500 Subject: [PATCH 024/123] docs: debug probes. --- docs/debug_probes.md | 181 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 docs/debug_probes.md diff --git a/docs/debug_probes.md b/docs/debug_probes.md new file mode 100644 index 000000000..ecb39a7d6 --- /dev/null +++ b/docs/debug_probes.md @@ -0,0 +1,181 @@ +--- +title: Debug probes +--- + +The debug probe is the interface between pyOCD and the target, and it drives the SWD or JTAG signals that control +the target. By way of the connection between the debug probe and target, selecting the debug probe implicitly controls +which target pyOCD debugs. + +There are two major flavours of debug probe: + +- **On-board probes**. Many evaluation boards include an on-board debug probe, so you can plug it in and start using + it without needing any other devices. +- **Standalone probes**. For debugging custom hardware you typically need a standalone probe that connects via an + SWD/JTAG cable. Most commercially available debug probes, such as the SEGGER J-Link or Arm ULINKplus, are standalone. + + +PyOCD uses debug probe driver plug-ins to enable support for different kinds of debug probes. It comes with plug-ins for +these types of debug probes: + + Plug-in Name | Debug Probe Type +---------------------|-------------------- +`cmsisdap` | [CMSIS-DAP](http://www.keil.com/pack/doc/CMSIS/DAP/html/index.html) +`pemicro` | [PE Micro](https://pemicro.com/) Cyclone and Multilink +`picoprobe` | Raspberry Pi [Picoprobe](https://github.com/raspberrypi/picoprobe) +`jlink` | [SEGGER](https://segger.com/) [J-Link](https://www.segger.com/products/debug-trace-probes/) +`stlink` | [STMicro](https://st.com/) [STLinkV2](https://www.st.com/en/development-tools/st-link-v2.html) and [STLinkV3](https://www.st.com/en/development-tools/stlink-v3set.html) +`remote` | pyOCD [remote debug probe client]({% link _docs/remote_probe_access.md %}) + + +## Unique IDs + +Every debug probe has a **unique ID**. For debug probes that connect with USB, this is nominally the same as +its USB serial number. However, every debug probe plugin determines for itself what the unique ID means. Some +debug probes types are not connected with USB but are accessed across the network. In this case, the unique ID +is the probe's network address. + +The unique ID parameter is actually a simple form of URL. It can be prefixed with the name of a debug probe plugin +followed by a colon, e.g. `cmsisdap:`, to restrict the type of debug probe that will match. This form is also a +requirement for certain probe types, such as the remote probe client, where the unique ID is a host address rather than +serial number. + + +## Auto target type identification + +Certain types of on-board debug probes can report the type of the target to which they are connected. + +Debug probes that support automatic target type reporting: + +- CMSIS-DAP probes based on the DAPLink firmware +- STLinkV2-1 and STLinkV3 + + +## Listing available debug probes + +To view the connected probes and their unique IDs, run `pyocd list`. This command will produce output looking like this: + + # Probe Unique ID + ------------------------------------------------------------------------------------------ + 0 Arm LPC55xx DAPLink CMSIS-DAP 000000803f7099a85fdf51158d5dfcaa6102ef474c504355 + 1 Arm Musca-B1 [musca_b1] 500700001c16fcd400000000000000000000000097969902 + +For those debug probes that support automatic target type reporting, the default target type is visible in brackets +next to the probe's name. In addition, the name of the board is printed instead of the type of debug probe. This can be +seen in the example output above for the "Arm Musca-B1" board, which has a default target type of `musca_b1`. + +If no target type appears in brackets, as can be seen above for the "Arm LPC55xx DAPLink CMSIS-DAP" probe (because it +is a standalone probe), it means the debug probe does not report the type of its connected target. In this +case, the target type must be manually specified either on the command line with the `-t` / `--target` +argument, or by setting the `target_override` session option (possibly in a config file). + +Note that the printed list includes only those probes that pyOCD can actively query for, which currently means only USB +based probes. + + +## Selecting the debug probe + +All of the pyOCD subcommands that communicate with a target require the user to either implicitly or explicitly +specify a debug probe. + +There are three ways the debug probe is selected: + +1. Implicitly, if only one probe is connected to the host, pyOCD can use it automatically without further configuration. + +2. If there is more than one probe connected and pyOCD is not told which to use, it will ask on the console. It presents + the same list of probes reported by `pyocd list`, plus this question: + + Enter the number of the debug probe or 'q' to quit> + + and waits until a probe index is entered. + +3. Explicitly, with the use of `-u UID` / `--uid=UID` / `--probe=UID` command line arguments. These arguments accept + either a whole or partial unique ID. + +If no probes are currently connected and pyOCD is executed without explicitly specifying the probe to use, it will +by default print a message asking for a probe to be connected and wait. If the `-W` / `--no-wait` argument is passed, +pyOCD will exit with an error instead. + + + +## Probe driver plug-in notes + +This section contains notes on the use of different types of debug probes and the corresponding driver plug-ins. + +### CMSIS-DAP + +There are two major versions of CMSIS-DAP, which use different USB classes: + +- v1: USB HID. This version is slower than v2. Still the most common version. +- v2: USB vendor-specific using bulk pipes. Higher performance than v1. WinUSB-enabled to allow driverless usage on Windows 8 and above. Can be used with Windows 7 only if a driver is installed with a tool such as Zadig. + +These are several commercial probes using the CMSIS-DAP protocol: + +- Microchip EDBG/nEDBG +- Microchip Atmel-ICE +- Cypress KitProg3 +- Cypress MiniProg4 +- Keil ULINKplus +- NXP LPC-LinkII +- NXP MCU-Link +- NXP MCU-Link Pro +- NXP OpenSDA + +In addition, there are numerous other commercial and open source debug probes based on CMSIS-DAP. + +PyOCD supports automatic target type identification for debug probes built with the +[DAPLink](https://github.com/ARMmbed/DAPLink) firmware. + +#### Session options + +- `cmsis_dap.deferred_transfers` (bool, default True) Whether to use deferred transfers in the CMSIS-DAP probe backend. + By disabling deferred transfers, all writes take effect immediately. However, performance is negatively affected. +- `cmsis_dap.limit_packets` (bool, default False) Restrict CMSIS-DAP backend to using a single in-flight command at a + time. This is useful on some systems where USB is problematic, in particular virtual machines. + + +### STLink + +
+Note! Recent STLink firmware versions will only allow access to STM32 targets. If you are using a target +from a silicon vendor other than ST Micro, please use a different debug probe. +
+ +No host resident drivers need to be installed to use STLink probes; only libusb is required. (This may not be true for Windows 7, but has not been verified.) + +The minimum supported STLink firmware version is V2J24, or any V3 version. However, upgrading to the latest version +is strongly recommended. Numerous bugs have been fixed, and new commands added for feature and performance improvements. + +- V2J26: Adds 16-bit transfer support. If not supported, pyOCD will fall back to 8-bit transfers—it is possible this + will produce unexpected behaviour if used to access Device memory (e.g. memory mapped registers). +- V2J28: Minimum version for multicore target support. +- V2J32/V3J6: Allows access to banked DP registers. Usually not needed. + +[Firmware updates](https://www.st.com/en/development-tools/stsw-link007.html) + +PyOCD supports automatic target type identification for on-board STLink probes that report a board ID. + + +### J-Link + +To use a Segger J-Link probe, the driver package must be installed. Segger makes drivers available for Linux, macOS, +and Windows. + +[Firmware and driver installer and updates](https://www.segger.com/downloads/jlink/) + +On macOS, you can install the `segger-jlink` cask with Homebrew to get automatic driver updates. + +#### Session options + +- `jlink.device` (str, no default) + If this option is set to a supported J-Link device name, then the J-Link will be asked connect + using this name. Otherwise, the J-Link is configured for only the low-level CoreSight operations + required by pyOCD. Ordinarily, it does not need to be set. +- `jlink.power` (bool, default True) + Enable target power when connecting via a J-Link probe, and disable power when + disconnecting. +- `jlink.non_interactive` (bool, default True) + Controls whether the J-Link DLL is allowed to present UI dialog boxes and its control + panel. Note that dialog boxes will actually still be visible, but the default option + will be chosen automatically after 5 seconds. + + From e3e7e3900a99aea64da7b038cbc3b69a2525002a Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 13 Nov 2021 17:06:34 -0600 Subject: [PATCH 025/123] STLink: use command to read board ID faster. (#1236) * STLink: use command to read board ID faster. * stlink: fix some typos in exception error messages. --- pyocd/probe/stlink/constants.py | 13 ++++++- pyocd/probe/stlink/stlink.py | 68 ++++++++++++++++++++++++++------- pyocd/probe/stlink/usb.py | 12 ++++++ pyocd/probe/stlink_probe.py | 53 +++++++++++++++---------- 4 files changed, 112 insertions(+), 34 deletions(-) diff --git a/pyocd/probe/stlink/constants.py b/pyocd/probe/stlink/constants.py index f4a151fe6..61c538795 100644 --- a/pyocd/probe/stlink/constants.py +++ b/pyocd/probe/stlink/constants.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2018-2019 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,10 +57,20 @@ class Commands: JTAG_WRITE_DAP_REG = 0x46 # From V2J24 JTAG_READMEM_16BIT = 0x47 # From V2J26 JTAG_WRITEMEM_16BIT = 0x48 # From V2J26 + JTAG_BLINK_LED = 0x49 # From V2J28 + JTAG_GET_DISK_NAME = 0x4a # From V2J28 JTAG_INIT_AP = 0x4b # From V2J28 JTAG_CLOSE_AP_DBG = 0x4c # From V2J28 + JTAG_WRITEMEM_32BIT_NO_ADDR_INC = 0x50 # From V2J26 + JTAG_READWRITEMISC_OUT = 0x51 # From V2J32 or from V3J2 + JTAG_READWRITEMISC_IN = 0x52 # Internal from V2J32 or from V3J2 + JTAG_READWRITEMISC_GET_MAX = 0x53 # Internal from V2J32 or from V3J2 + JTAG_READMEM_32BIT_NO_ADDR_INC = 0x54 # From V2J32 or from V3J2 + JTAG_WRITE_DFTREG = 0x55 # From V2J35 or from V3J5 + JTAG_GET_BOARD_IDENTIFIERS = 0x56 # From V2J36 or from V3J6 SET_COM_FREQ = 0x61 # V3 only, replaces SWD/JTAG_SET_FREQ GET_COM_FREQ = 0x62 # V3 only + SWITCH_STLINK_FREQ = 0x63 # V3 only # Parameters for JTAG_ENTER2. JTAG_ENTER_SWD = 0xa3 @@ -78,7 +89,7 @@ class Commands: JTAG_STLINK_SWD_COM = 0x00 JTAG_STLINK_JTAG_COM = 0x01 -class Status(object): +class Status: """! @brief STLink status codes and messages. """ diff --git a/pyocd/probe/stlink/stlink.py b/pyocd/probe/stlink/stlink.py index 1a390ff32..18a1c5f39 100644 --- a/pyocd/probe/stlink/stlink.py +++ b/pyocd/probe/stlink/stlink.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2018-2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,16 +15,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .constants import (Commands, Status, SWD_FREQ_MAP, JTAG_FREQ_MAP) -from ...core import exceptions -from ...coresight import dap -from ...utility import conversion -from ...utility.mask import bfx import logging import struct import six import threading from enum import Enum +from typing import Optional +import usb.core + +from .constants import (Commands, Status, SWD_FREQ_MAP, JTAG_FREQ_MAP) +from ...core import exceptions +from ...coresight import dap +from ...utility import conversion +from ...utility.mask import bfx LOG = logging.getLogger(__name__) @@ -56,6 +60,11 @@ class Protocol(Enum): # # Keys are the hardware version, value is the minimum JTAG version. MIN_JTAG_VERSION_DPBANKSEL = {2: 32, 3: 2} + + ## Firmware version that supports JTAG_GET_BOARD_IDENTIFIERS. + # + # Keys are the hardware version, value is the minimum JTAG version. + MIN_JTAG_VERSION_GET_BOARD_IDS = {2: 36, 3: 6} ## Port number to use to indicate DP registers. DP_PORT = 0xffff @@ -110,6 +119,38 @@ def close(self): self.enter_idle() self._device.close() + def get_board_id(self) -> Optional[str]: + """@brief Return the Mbed board ID by command. + + If the device is not already open, it will be temporarily opened in order to read the board ID. + + @retval Board ID as a 4-character string. + @retval None is returned if the board ID cannot be read. + """ + with self._lock: + did_open = False + try: + if not self._device.is_open: + self.open() + did_open = True + if self._jtag_version < self.MIN_JTAG_VERSION_GET_BOARD_IDS[self._hw_version]: + return None + response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_GET_BOARD_IDENTIFIERS], + readSize=36) + self._check_status(response[:2]) + + # Extract and return the board ID. If the ID field consists of all 0 bytes then we didn't + # get a valid ID, so return None instead. + board_id = response[2:6] + if board_id == b'\x00\x00\x00\x00': + return None + return board_id.decode('ascii') + except usb.core.USBError: + return None + finally: + if did_open: + self.close() + def get_version(self): # GET_VERSION response structure: # Byte 0-1: @@ -144,12 +185,12 @@ def get_version(self): # Check versions. if self._jtag_version == 0: - raise exceptions.ProbeError("%s firmware does not support JTAG/SWD. Please update" - "to a firmware version that supports JTAG/SWD" % (self._version_str)) + raise exceptions.ProbeError(f"{self._version_str} firmware does not support JTAG/SWD. Please update" + "to a firmware version that supports JTAG/SWD.") if not self._check_version(self.MIN_JTAG_VERSION): - raise exceptions.ProbeError("STLink %s is using an unsupported, older firmware version. " - "Please update to the latest STLink firmware. Current version is %s, must be at least version v2J%d.)" - % (self.serial_number, self._version_str, self.MIN_JTAG_VERSION)) + raise exceptions.ProbeError(f"STLink {self.serial_number} is using an unsupported, older firmware version. " + f"Please update to the latest STLink firmware. Current version is {self._version_str}, must be at " + f"least version v2J{self.MIN_JTAG_VERSION}.") def _check_version(self, min_version): return (self._hw_version >= 3) or (self._jtag_version >= min_version) @@ -263,6 +304,8 @@ def enter_debug(self, protocol): protocolParam = Commands.JTAG_ENTER_SWD elif protocol == self.Protocol.JTAG: protocolParam = Commands.JTAG_ENTER_JTAG_NO_CORE_RESET + else: + raise ValueError(protocol) response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_ENTER2, protocolParam, 0], readSize=2) self._check_status(response) self._protocol = protocol @@ -418,9 +461,8 @@ def _check_dp_bank(self, port, addr): doesn't support that. """ if ((port == self.DP_PORT) and ((addr & 0xf0) != 0) and not self.supports_banked_dp): - raise exceptions.ProbeError("this STLinkV%d firmware version does not support accessing" - " banked DP registers; please upgrade to the latest STLinkV%d firmware release", - self._hw_version, self._hw_version) + raise exceptions.ProbeError(f"this STLinkV{self._hw_version} firmware version does not support accessing" + f" banked DP registers; please upgrade to the latest STLinkV{self._hw_version} firmware release") def read_dap_register(self, port, addr): assert (addr >> 16) == 0, "register address must be 16-bit" diff --git a/pyocd/probe/stlink/usb.py b/pyocd/probe/stlink/usb.py index be96a10e9..61139497d 100644 --- a/pyocd/probe/stlink/usb.py +++ b/pyocd/probe/stlink/usb.py @@ -101,6 +101,7 @@ def get_all_connected_devices(cls): common.show_no_libusb_warning() return [] + assert devices is not None intfList = [] for dev in devices: try: @@ -178,6 +179,10 @@ def close(self): self._ep_out = None self._ep_in = None + @property + def is_open(self) -> bool: + return not self._closed + @property def serial_number(self): return self._serial_number @@ -199,6 +204,8 @@ def max_packet_size(self): return self._max_packet_size def _flush_rx(self): + assert self._ep_in + # Flush the RX buffers by reading until timeout exception try: while True: @@ -208,12 +215,16 @@ def _flush_rx(self): pass def _read(self, size, timeout=1000): + assert self._ep_in + # Minimum read size is the maximum packet size. read_size = max(size, self._max_packet_size) data = self._ep_in.read(read_size, timeout) return bytearray(data)[:size] def transfer(self, cmd, writeData=None, readSize=None, timeout=1000): + assert self._ep_out + # Pad command to required 16 bytes. assert len(cmd) <= self.CMD_SIZE paddedCmd = bytearray(self.CMD_SIZE) @@ -242,6 +253,7 @@ def transfer(self, cmd, writeData=None, readSize=None, timeout=1000): return None def read_swv(self, size, timeout=1000): + assert self._ep_swv return bytearray(self._ep_swv.read(size, timeout)) def __repr__(self): diff --git a/pyocd/probe/stlink_probe.py b/pyocd/probe/stlink_probe.py index d07bef543..b75dd9b98 100644 --- a/pyocd/probe/stlink_probe.py +++ b/pyocd/probe/stlink_probe.py @@ -16,6 +16,7 @@ # limitations under the License. from time import sleep +from typing import (List, Optional) from .debug_probe import DebugProbe from ..core.memory_interface import MemoryInterface @@ -32,43 +33,54 @@ class StlinkProbe(DebugProbe): """! @brief Wraps an STLink as a DebugProbe.""" @classmethod - def get_all_connected_probes(cls, unique_id=None, is_explicit=False): + def get_all_connected_probes(cls, unique_id: Optional[str] = None, + is_explicit: bool = False) -> List["StlinkProbe"]: return [cls(dev) for dev in STLinkUSBInterface.get_all_connected_devices()] @classmethod - def get_probe_with_id(cls, unique_id, is_explicit=False): + def get_probe_with_id(cls, unique_id: str, is_explicit: bool = False) -> Optional["StlinkProbe"]: for dev in STLinkUSBInterface.get_all_connected_devices(): if dev.serial_number == unique_id: return cls(dev) return None - def __init__(self, device): - super(StlinkProbe, self).__init__() + def __init__(self, device: STLinkUSBInterface) -> None: + super().__init__() self._link = STLink(device) self._is_open = False self._is_connected = False self._nreset_state = False self._memory_interfaces = {} self._mbed_info = None - self._board_id = None + self._board_id = self._get_board_id() self._caps = set() - # Try to detect associated board info via the STLinkV2-1 MSD volume. - detector = create_mbed_detector() - for info in detector.list_mbeds(): - if info['target_id_usb_id'] == self._link.serial_number: - self._mbed_info = info - - # Some STLink probes provide an MSD volume, but not the mbed.htm file. - # We can live without the board ID, so just ignore any error. - try: - self._board_id = info['target_id_mbed_htm'][0:4] - except KeyError: - pass - break - + def _get_board_id(self) -> Optional[str]: + # Try to get the board ID first by sending a command, since it is much faster. This requires + # opening the USB device, however, and requires a recent STLink firmware version. + board_id = self._link.get_board_id() + if board_id is None: + # Try to detect associated board info via the STLinkV2-1 MSD volume. + detector = create_mbed_detector() + if detector is not None: + for info in detector.list_mbeds(): + if info['target_id_usb_id'] == self._link.serial_number: + self._mbed_info = info + + # Some STLink probes provide an MSD volume, but not the mbed.htm file. + # We can live without the board ID, so just ignore any error. + try: + board_id = info['target_id_mbed_htm'][0:4] + except KeyError: + pass + break + return board_id + @property - def description(self): + def description(self) -> str: + if self._board_id is None: + return self.product_name + try: board_info = BOARD_ID_TO_INFO[self._board_id] except KeyError: @@ -147,6 +159,7 @@ def set_clock(self, frequency): self._link.set_swd_frequency(frequency) def reset(self): + assert self.session self._link.drive_nreset(True) sleep(self.session.options.get('reset.hold_time')) self._link.drive_nreset(False) From ce05b146b3ec01fe4855c2f0e89b5e01f3a0bb1d Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 13 Nov 2021 18:17:13 -0600 Subject: [PATCH 026/123] Use libusb-package as wrapper around pyusb. (#1240) * usb: use libusb-package as wrapper around pyusb. * docs: remove libusb installation instructions. --- README.md | 16 ---------------- docs/installing.md | 18 ------------------ pyocd/probe/picoprobe.py | 3 ++- .../pydapaccess/interface/pyusb_backend.py | 5 +++-- .../pydapaccess/interface/pyusb_v2_backend.py | 5 +++-- pyocd/probe/stlink/usb.py | 3 ++- setup.cfg | 1 + 7 files changed, 11 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 315c46eab..dd15d2ce5 100644 --- a/README.md +++ b/README.md @@ -141,22 +141,6 @@ You have a few options here: 4. Run the command in a [virtualenv](https://virtualenv.pypa.io/en/latest/) local to a specific project working set. -### libusb installation - -[pyusb](https://github.com/pyusb/pyusb) and its backend library [libusb](https://libusb.info/) are -dependencies on all supported operating systems. pyusb is a regular Python package and will be -installed along with pyOCD. However, libusb is a binary shared library that does not get installed -automatically via pip dependency management. - -How to install libusb depends on your OS: - -- macOS: use Homebrew: `brew install libusb` -- Linux and BSD: should already be installed. -- Windows: download libusb from [libusb.info](https://libusb.info/) and place the .dll file in your Python - installation folder next to python.exe. Make sure to use the same 32- or 64-bit architecture as - your Python installation. The latest release is [available on GitHub](https://github.com/libusb/libusb/releases); - download the .7z archive under Assets. Use the library from the VS2019 folder in the archive. - ### udev rules on Linux On Linux, particularly Ubuntu 16.04+, you must configure udev rules to allow pyOCD to access debug diff --git a/docs/installing.md b/docs/installing.md index 82d75a92f..ccc487587 100644 --- a/docs/installing.md +++ b/docs/installing.md @@ -44,24 +44,6 @@ For notes about installing and using on non-x86 systems such as Raspberry Pi, se [relevant documentation]({% link _docs/installing_on_non_x86.md %}). -libusb installation -------------------- - -[pyusb](https://github.com/pyusb/pyusb) and its backend library [libusb](https://libusb.info/) are -dependencies on all supported operating systems. pyusb is a regular Python package and will be -installed along with pyOCD. However, libusb is a binary shared library that does not get installed -automatically via pip dependency management. - -How to install libusb depends on your OS: - -- macOS: use Homebrew: `brew install libusb` -- Linux: should already be installed. -- Windows: download libusb from [libusb.info](https://libusb.info/) and place the .dll file in your Python - installation folder next to python.exe. Make sure to use the same 32- or 64-bit architecture as - your Python installation. The latest release is [available on GitHub](https://github.com/libusb/libusb/releases); - download the .7z archive under Assets. Use the library from the VS2019 folder in the archive. - - udev rules on Linux ------------------- diff --git a/pyocd/probe/picoprobe.py b/pyocd/probe/picoprobe.py index 6b0aea0c4..87f29898a 100644 --- a/pyocd/probe/picoprobe.py +++ b/pyocd/probe/picoprobe.py @@ -19,6 +19,7 @@ from time import sleep from usb import core, util +import libusb_package import platform import errno @@ -107,7 +108,7 @@ def enumerate_picoprobes(cls, uid=None) -> List["PicoLink"]: """! @brief Find and return all Picoprobes """ try: # Use a custom matcher to make sure the probe is a Picoprobe and accessible. - return [PicoLink(probe) for probe in core.find(find_all=True, custom_match=FindPicoprobe(uid))] + return [PicoLink(probe) for probe in libusb_package.find(find_all=True, custom_match=FindPicoprobe(uid))] except core.NoBackendError: show_no_libusb_warning() return [] diff --git a/pyocd/probe/pydapaccess/interface/pyusb_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_backend.py index 0727781c9..812780787 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_backend.py @@ -35,6 +35,7 @@ LOG = logging.getLogger(__name__) try: + import libusb_package import usb.core import usb.util except ImportError: @@ -68,7 +69,7 @@ def open(self): assert self.closed is True # Get device handle - dev = usb.core.find(custom_match=FindDap(self.serial_number)) + dev = libusb_package.find(custom_match=FindDap(self.serial_number)) if dev is None: raise DAPAccessIntf.DeviceError("Device %s not found" % self.serial_number) @@ -153,7 +154,7 @@ def get_all_connected_interfaces(): """ # find all cmsis-dap devices try: - all_devices = usb.core.find(find_all=True, custom_match=FindDap()) + all_devices = libusb_package.find(find_all=True, custom_match=FindDap()) except usb.core.NoBackendError: if not PyUSB.did_show_no_libusb_warning: LOG.warning("CMSIS-DAPv1 probes may not be detected because no libusb library was found.") diff --git a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py index 7109ea872..ec60a9ea0 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py @@ -36,6 +36,7 @@ LOG = logging.getLogger(__name__) try: + import libusb_package import usb.core import usb.util except ImportError: @@ -78,7 +79,7 @@ def open(self): assert self.closed is True # Get device handle - dev = usb.core.find(custom_match=HasCmsisDapv2Interface(self.serial_number)) + dev = libusb_package.find(custom_match=HasCmsisDapv2Interface(self.serial_number)) if dev is None: raise DAPAccessIntf.DeviceError("Device %s not found" % self.serial_number) @@ -176,7 +177,7 @@ def get_all_connected_interfaces(): """! @brief Returns all the connected devices with a CMSIS-DAPv2 interface.""" # find all cmsis-dap devices try: - all_devices = usb.core.find(find_all=True, custom_match=HasCmsisDapv2Interface()) + all_devices = libusb_package.find(find_all=True, custom_match=HasCmsisDapv2Interface()) except usb.core.NoBackendError: common.show_no_libusb_warning() return [] diff --git a/pyocd/probe/stlink/usb.py b/pyocd/probe/stlink/usb.py index 61139497d..5f98a60dc 100644 --- a/pyocd/probe/stlink/usb.py +++ b/pyocd/probe/stlink/usb.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import libusb_package import usb.core import usb.util import logging @@ -96,7 +97,7 @@ def _usb_match(cls, dev): @classmethod def get_all_connected_devices(cls): try: - devices = usb.core.find(find_all=True, custom_match=cls._usb_match) + devices = libusb_package.find(find_all=True, custom_match=cls._usb_match) except usb.core.NoBackendError: common.show_no_libusb_warning() return [] diff --git a/setup.cfg b/setup.cfg index 1b3449262..ade269d37 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,6 +47,7 @@ install_requires = hidapi>=0.10.1,<1.0; platform_system != "Linux" intelhex>=2.0,<3.0 intervaltree>=3.0.2,<4.0 + libusb-package>=1.0,<2.0 naturalsort>=1.5,<2.0 prettytable>=2.0,<3.0 pyelftools<1.0 From e94bda4a646d2a2fe27915406be50ef6eb3d0f1f Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 21 Nov 2021 13:38:20 -0600 Subject: [PATCH 027/123] Gdbserver fault resilience (#1244) * gdbserver: fixed some simple type errors. * gdbserver: retry with timeout for faults waiting for target to stop running. After getting a transfer error or fault inside GDBServer.resume() while checking the target status, a timeout timer is started. A successful read of target status disables the timeout. If the timeout expires without a successful read, the resume() call exits and tells gdb the target got an error. Added a 'debug.status_fault_retry_timeout' option to control the retry timeout in seconds. The default is 1 second, so the user- visible behaviour is currently very similar to before this change. --- docs/options.md | 9 ++++ pyocd/core/options.py | 4 ++ pyocd/gdbserver/gdbserver.py | 80 ++++++++++++++++++++++++++++-------- 3 files changed, 75 insertions(+), 18 deletions(-) diff --git a/docs/options.md b/docs/options.md index 27c8f11de..4b2a88ac9 100644 --- a/docs/options.md +++ b/docs/options.md @@ -396,6 +396,15 @@ Whether to enable SWV printf output over the semihosting console. Requires the < option to be set. The SWO baud rate can be controlled with the swv_clock option.
debug.status_fault_retry_timeoutfloat1 +Duration in seconds that a failed target status check will be retried before an error is raised. Only +applies while the target is running after a resume operation in the debugger and pyOCD is waiting for +it to halt again. +
gdbserver_port int 3333
+ + + + + + + + + + + +
VariableDescription
+

PYOCD_PROJECT_DIR

+
+

Sets the path to pyOCD's project directory. This variable acts as a fallback if the project_dir +session option is not specified.

+
+

PYOCD_USB_BACKEND

+
+

This variable overrides the default selection of the USB backend for CMSIS-DAP v1 probes. The accepted +values are hidapiusb, pyusb, and pywinusb. An empty value is the same as +unset. CMSIS-DAP v2 probes are unaffected by the environment variable; pyusb is always used.

+

Forcing the USB backend is really only useful on Windows, because both hidapiusb and +pywinusb backends are available. Note that pyOCD only installs the hidapiusb backend +by default.

+
+

PYOCD_HISTORY

+
+

Path to the pyocd commander command history file. The default is ~/.pyocd_history.

+
+

PYOCD_HISTORY_LENGTH

+
+

Maximum number of entries in the command history file. Set to -1 for unlimited. Default is 1000.

+
+ diff --git a/docs/resources/semihosting.svg b/docs/resources/semihosting.svg new file mode 100644 index 000000000..119789555 --- /dev/null +++ b/docs/resources/semihosting.svg @@ -0,0 +1,485 @@ + + + + + + + + + + + + + + Produced by OmniGraffle 7.19.2\n2021-11-25 20:07:21 +0000 + + semihosting (text as shapes) + + + Layer 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 3. printf request is output + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2. debugger handles breakpoint + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1. function call + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Target + + + + + + + + + + + + + + + + + + + + + Host + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/semihosting.md b/docs/semihosting.md new file mode 100644 index 000000000..3c97118cc --- /dev/null +++ b/docs/semihosting.md @@ -0,0 +1,170 @@ +--- +title: Semihosting +--- + +Semihosting is a technique for routing I/O requests performed on the target to the host. In addition to I/O, some other +operations are supported. Most commonly, this means integration of the standard C library with the host's console and +file system. It allows you to log debug messages before you have peripheral drivers fully functioning or on hardware +that does not have other debug output capabilities. It can be used to redirect `printf()` and other standard I/O to or +from the host, read keyboard input, access configuration files, transfer data to and from the target, and so on. + +The primary downside to semihosting is that it can be quite slow, depending on how it is used and considerations such +as the debug probe's latency and SWD/JTAG clock speed. + +The [Arm semihosting documentation](https://developer.arm.com/documentation/dui0471/i/semihosting/what-is-semihosting-?lang=en) +has more information about semihosting. + + +### How it works + +To implement semihosting, the debugger intercepts requests from code running on the target and performs +the specified action on the target's behalf. Figure 1 below shows how semihosting requests work. + +![](resources/semihosting.svg)\ +**Figure 1**. Semihosting block diagram. + +The figure demonstrates and example of how a C standard library call is made to `printf()`. This is routed to a +semihosting handler, which prepares the request parameters and triggers the debugger to perform the request, in this +case writing "hello world" to the console. + +For Arm Cortex-M devices, a `BKPT` instruction with a parameter of 0xAB serves as the trigger detected by the debugger. + + +### Enabling + +To use semihosting with pyOCD, it first must be enabled. This can be done in several ways: + +- Pass `-S`/`--semihosting` on the `pyocd gdbserver` command line. +- Set the `enable_semihosting` session option to true. +- From gdb, issue a `monitor arm semihosting enable` command. This command is supported for compatibility with OpenOCD. + +Assuming the default routing options and telnet port number are acceptable, no further configuration is required. When + +
+Currently semihosting only works when pyOCD's gdbserver is running and the target is resumed, with the gdbserver +waiting for it to halt. However, the pyocd.debug.semihost.SemihostAgent class is reusable if a custom +target control loop is implemented. +
+ + +### Routing + +There several ways to route semihosting I/O requests in pyOCD, depending on the file on which the request is operating. + +First, an I/O request is classified as either "console" or "syscall". Console requests are those that operate on the +first three file descriptions: stdin (0), stdout (1), and stderr (2). Certain semihosting requests are always console. +All other requests, which are always for explicitly opened files, are classified as syscalls. + +Console requests have two options for routing, controlled with the `semihost_console_type` session option's value. + +- "telnet": I/O is connected to the "telnet" server. (In fact, it's not a true telnet server because it doesn't implement + the telnet control requests.) This server runs on the port indicated by the `telnet_port` session option or the + `-T`/`--telnet-port` command line argument of the gdbserver subcommand. +- "console": pyOCD's standard I/O is used. + +Syscall requests also have two routing options. The `semihost_use_syscalls` option controls this depending on whether it +is true or false. + +- _false_: File I/O requests are handled by pyOCD itself, and therefore are performed on the system on which the pyOCD + process is running. +- _true_: File I/O is passed to gdb for handling. + +Relative paths passed to the `SYS_OPEN` request are interpreted relative to the syscall handler's working directory. +In addition, if pyOCD and gdb are running on different systems, absolute paths will be interpreter according to the +appropriate system's root filesystem. + +Defaults are for console to be routed to telnet and syscalls handled by gdb. + + +### Building into firmware + +The blog articles listed earlier have details of how to include semihosting support when linking firmware with gcc +and newlib. These are the steps in short: + +- On the linker command line, add `--specs=rdimon.specs` and ensure `-nostartfiles` is not present. +- Call `initialise_monitor_handles()` from your firmware before using semihosting. + +This small example shows how to init and use semihosting from gcc. + +```c +#include + +extern void initialise_monitor_handles(void); + +void main(void) { + // init semihosting console + initialise_monitor_handles(); + + // say hello via semihosting + printf("hello, world!\n"); +} +``` + +These blog articles are also very helpful for gcc: + +- [Introduction to ARM Semihosting](https://interrupt.memfault.com/blog/arm-semihosting) is an excellent + overall introduction, including how to link in semihosting support for gcc and newlib-based projects. +- [Semihosting with GNU ARM Embedded (LaunchPad) and GNU ARM Eclipse Debug Plugins](https://mcuoneclipse.com/2014/09/11/semihosting-with-gnu-arm-embedded-launchpad-and-gnu-arm-eclipse-debug-plugins/) + +For other toolchains, please see the vendor's documentation. + + +### Trace logging + +The semihosting agent supports a trace logger, `pyocd.debug.semihost.trace` that will output a log message every time a +semihosting request is processed. This can be enabled on the command line with `-Lpyocd.debug.semihost.trace=debug`. + + +### Session options + +These are the session options that control semihosting: + +- `enable_semihosting` - Set to true to handle semihosting requests. +- `semihost_console_type` - If set to 'telnet' then the semihosting telnet server will be started. If set to 'console' then semihosting will print to pyOCD's console. +- `semihost_use_syscalls` - Whether to use GDB syscalls for semihosting file access operations, or to have pyOCD perform the operations.) +- `telnet_port` - Base TCP port number for the semihosting telnet server. The core number, which will be 0 for the primary core, is added to this value. + + +### Supported requests + +The majority of standard Arm-defined semihosting requests are supported by pyOCD. + +- `SYS_OPEN` (0x01): syscall + - The special file name `:tt` is used to open standard I/O files, per the Arm semihosting specification. The open + mode selects which standard I/O file is opened. "r" (0) is stdin, "w" (4) is stdout, "a" (8) is stderr. With pyOCD's + implementation, explicitly opening the standard I/O files is not required. + - Standard I/O files opened via this request are only accessible when not routing console to the telnet server. +- `SYS_CLOSE` (0x02): syscall +- `SYS_WRITEC` (0x03): console +- `SYS_WRITE0` (0x04): console +- `SYS_WRITE` (0x05): console for stdout and stderr, otherwise syscall +- `SYS_READ` (0x06): console for stdin, otherwise syscall +- `SYS_READC` (0x07): console +- `SYS_ISTTY` (0x09): syscall +- `SYS_SEEK` (0x0a): syscall +- `SYS_FLEN` (0x0c): syscall +- `SYS_REMOVE` (0x0e): syscall +- `SYS_RENAME` (0x0f): syscall +- `SYS_CLOCK` (0x10): returns the number of centiseconds since pyOCD started (technically, since the semihosting agent + object was created, so this will not line up with timestamps in pyOCD's log output) +- `SYS_TIME` (0x11): returns the number of seconds since midnight, January 1, 1970 +- `SYS_ERRNO` (0x13): syscall + + +The following semihosting requests are not supported. If invoked, the return code is -1 and pyOCD logs a +warning message, such as "Semihost: unimplemented request pc=\ r0=\ r1=\". + +- `SYS_ISERROR` (0x08) +- `SYS_TMPNAM` (0x0d) +- `SYS_SYSTEM` (0x12) +- `SYS_GET_CMDLINE` (0x15) +- `SYS_HEAPINFO` (0x16) +- `angel_SWIreason_EnterSVC` (0x17) +- `SYS_EXIT` (0x18), also called `angel_SWIreason_ReportException` +- `SYS_ELAPSED` (0x30) +- `SYS_TICKFREQ` (0x31) + +The [Arm semihosting operations](https://developer.arm.com/documentation/dui0471/i/semihosting/semihosting-operations?lang=en) +documentation has the full specification of each request. + + diff --git a/docs/swo_swv.md b/docs/swo_swv.md new file mode 100644 index 000000000..5d0e25ecd --- /dev/null +++ b/docs/swo_swv.md @@ -0,0 +1,81 @@ +--- +title: SWO/SWV +--- + +The Arm Cortex-M and CoreSight architectures support a single-wire trace output called Serial Wire Output (SWO). +This SWO trace feature can be used for everything from printf debugging to PC-sampling based profiling and various +performance measurements. + +SWO supports two wire protocols, asynchronous UART and Manchester encoding, although in practise UART is used almost +exclusively. (SWO is also available as a standalone CoreSight component, but this is relatively rare.) + +The Arm Cortex-M DWT and ITM core peripherals generate packets that can be output over SWO when a configurable set of +events occur. The combination of DWT/ITM packets transmitted via SWO is called the Serial Wire Viewer (SWV). A common +use case for SWV is printf-style log output, so much so that "SWV" has more or less come to mean exactly that. + +The major features are: + +- The gdbserver supports SWV printf-style log output to console or telnet, muxed with semihosting stdout. +- Raw SWO data can be served through a TCP port while the gdbserver is running, allowing other tools such as + [Orbuculum](https://github.com/orbcode/orbuculum) to process it. +- The Python API has a set of classes for building a trace event data flow graph. + + +### SWO support + +PyOCD supports SWO and SWV for those debug probes that support it. This includes CMSIS-DAP, J-Link, and STLink. + +Be aware that even if a probe type supports SWO, the MCU (and its CPU) must also support SWO, and the board must route +the SWO signal from the MCU to the debug header. In a surprising number of cases, even for silicon vendor evaluation +kits, the probe and MCU support it but the signal simply wasn't routed. + +Not all versions of the Arm M-profile architecture support SWO. The Arm v7-M and Arm v8-M Mainline architectures do +support SWO, while the Arm v6-M and Arm v8-M Baseline, architectures do not. + + + Core | Architecture | Supports SWO +----------------|-------------------|-------------- + Cortex-M0 | v6-M | - + Cortex-M0+ | v6-M | - + Cortex-M1 | v6-M | - + Cortex-M3 | v7-M | ✓ + Cortex-M4 | v7-M | ✓ + Cortex-M7 | v7-M | ✓ + Cortex-M23 | v8.0-M Baseline | - + Cortex-M33 | v8.0-M Mainline | ✓ + Cortex-M55 | v8.1-M Mainline | ✓ + + + +### Configuration + +If `enable_swv` is true, pyOCD will set up ITM and TPIU to output ITM stimulus ports over SWO at the specified baud +rate. Currently, [semihosting]({% link _docs/semihosting.md %}) must also be enabled for SWV to work, so the +`enable_semihosting` option must be on. A thread reads the data from the probe in the background and parses it. + +The SWV stream from ITM port 0 will be output to the semihosting console (see the [Routing]({% link _docs/semihosting.md +%}#routing) section of the [semihosting documentation]({% link _docs/semihosting.md %})), which is either the telnet +server or stdout depending on the `semihost_console_type` option. + + + + +An example of running the gdbserver with SWV output is: + +``` +pyocd gdb -S -Oenable_swv=1 -Oswv_system_clock=80000000 -Osemihost_console_type=console +``` + +This will turn on semihosting and SWV with the default 1 MHz baud rate, an 80 MHz system clock, and output to stdout. + + +### Session options + +Several session options are used to control and configure SWV: + +- `enable_swv` - Flag to enable SWV output. +- `swv_clock` - Optional baud rate for SWO, which defaults to 1 MHz if not set. +- `swv_system_clock` - Required system clock frequency. Used to compute TPIU baud rate divider. +- `swv_raw_enable` - Enable flag for the raw SWV stream server. +- `swv_raw_port` - TCP port number for the raw SWV stream server. The default port is 3443, which is the default port for the Orbuculum client. + From c254663d3504505f2ecf521598a15e3772e5fdf1 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 12 Dec 2021 12:44:42 -0600 Subject: [PATCH 042/123] trace: fix invalid reference to collections.Iterable. (#1266) --- pyocd/trace/sink.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pyocd/trace/sink.py b/pyocd/trace/sink.py index c909259b6..8047c1db6 100644 --- a/pyocd/trace/sink.py +++ b/pyocd/trace/sink.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2017-2019 Arm Limited +# COpyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections +import collections.abc class TraceEventSink(object): """! @brief Abstract interface for a trace event sink.""" @@ -46,7 +47,7 @@ def receive(self, event): """ event = self.filter(event) if (event is not None) and (self._sink is not None): - if isinstance(event, collections.Iterable): + if isinstance(event, collections.abc.Iterable): for event_item in event: self._sink.receive(event_item) else: @@ -75,7 +76,7 @@ def connect(self, sinks): downstream trace event sinks. If it is an iterable (list, tuple, etc.), then it will completely replace the current list of trace event sinks. """ - if isinstance(sinks, collections.Iterable): + if isinstance(sinks, collections.abc.Iterable): self._sinks = sinks elif sinks not in self._sinks: self._sinks.append(sinks) From a70c3140698d6632a67299da24be47ff1d314cc7 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 14 Dec 2021 19:52:46 +0000 Subject: [PATCH 043/123] Fix ROM table negative addresses (#1267) * coresight: rom_table: adjust component addresses that go negative. This is for cases where the address is intended to wrap around from the ROM table's base. For instance, the Microchip SAML11's root ROM table is at 0x41003000, containing an entry with offset -0x60f04000 to the standard Cortex-M23 ROM table at 0xe00ff000. Without the adjustment, the M23 table base is -0x1ff01000. Somewhat magically, it still worked without this adjustment, as the Python bit-and operator converts to unsigned. * coresight: rom_table: debug logs for skipped ROM table entries. --- pyocd/coresight/rom_table.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pyocd/coresight/rom_table.py b/pyocd/coresight/rom_table.py index 3128fa2d1..9bdf9a713 100644 --- a/pyocd/coresight/rom_table.py +++ b/pyocd/coresight/rom_table.py @@ -384,9 +384,11 @@ def _power_component(self, number, powerid, entry): def _handle_table_entry(self, entry, number): # Nonzero entries can still be disabled, so check the present bit before handling. if (entry & self.ROM_TABLE_ENTRY_PRESENT_MASK) == 0: + LOG.debug("%s[%d]<%08x not present>", self.depth_indent, number, entry) return # Verify the entry format is 32-bit. if (entry & self.ROM_TABLE_32BIT_FORMAT_MASK) == 0: + LOG.debug("%s[%d]<%08x unsupported 8-bit format>", self.depth_indent, number, entry) return # Get the component's top 4k address. @@ -394,6 +396,9 @@ def _handle_table_entry(self, entry, number): if (entry & self.ROM_TABLE_ADDR_OFFSET_NEG_MASK) != 0: offset = ~bit_invert(offset) address = self.address + offset + # Handle address going negative, since python doesn't have unsigned ints. + if address < 0: + address = 0x100000000 + address # Check power ID. if (entry & self.ROM_TABLE_POWERIDVALID_MASK) != 0: @@ -551,6 +556,8 @@ def _read_table(self): LOG.error("Error attempting to probe CoreSight component referenced by " "ROM table entry #%d: %s", entryNumber, err, exc_info=self.ap.dp.session.get_current().log_tracebacks) + else: + LOG.debug("%s[%d]<%08x not present>", self.depth_indent, entryNumber, entry) entryAddress += 4 * entrySizeMultiplier entryNumber += 1 @@ -582,6 +589,9 @@ def _handle_table_entry(self, entry, number): if (entry & self.ROM_TABLE_ADDR_OFFSET_NEG_MASK[self._width]) != 0: offset = ~bit_invert(offset, width=self._width) address = self.address + offset + # Handle address going negative, since python doesn't have unsigned ints. + if address < 0: + address = (1 << self._width) + address # Check power ID. if (entry & self.ROM_TABLE_ENTRY_POWERIDVALID_MASK) != 0: From d7698b8b074bf4241da906b8b57fe1ef38dfa365 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 15 Dec 2021 16:23:25 -0600 Subject: [PATCH 044/123] setup.cfg: update urls and classifiers. --- setup.cfg | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index ade269d37..4efc2c38d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,13 +9,17 @@ url = https://github.com/pyocd/pyOCD keywords = embedded debugger arm gdbserver license = Apache 2.0 classifiers = - Development Status :: 4 - Beta + Development Status :: 5 - Production/Stable + Environment :: Console Intended Audience :: Developers + Intended Audience :: Manufacturing + Intended Audience :: Science/Research License :: OSI Approved :: Apache Software License - Operating System :: MacOS + Operating System :: MacOS :: MacOS X Operating System :: Microsoft :: Windows Operating System :: POSIX - Operating System :: Unix + Operating System :: POSIX :: BSD + Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 @@ -26,9 +30,12 @@ classifiers = Topic :: Software Development Topic :: Software Development :: Debuggers Topic :: Software Development :: Embedded Systems + Topic :: Software Development :: Testing Topic :: Utilities project_urls = - Documentation = https://github.com/pyocd/pyOCD/docs + Website = https://pyocd.io/ + Documentation = https://pyocd.io//docs + Source = https://github.com/pyocd/pyOCD Issues = https://github.com/pyocd/pyOCD/issues Discussions = https://github.com/pyocd/pyOCD/discussions Releases = https://github.com/pyocd/pyOCD/releases From c72f2e2d5b52ce488a45187793c3d53007706fdd Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 19 Dec 2021 01:26:37 +0000 Subject: [PATCH 045/123] Commander: command files and some fixes (#1268) * commands: type annotations and type fixes. * utility: cmdline: replace shlex with custom lexer. The new lexer is simpler and more predictable, while better meeting the needs of pyocd. It splits words on whitespace and any of ';!@#$%^&*()+=[]{}|<>,?'. Single and double quotes are supported, and backslash-escapes are allowed. * commander: --execute arg; fix parsing issues. - Add -x/--execute arg to run commands from a file. - Add -i/--interactive arg to enable staying in REPL after running commands from the command line or files. - Reworked PyOCDCommander.run() to support --interactive option. - Add process_command_file() to CommandExecutionContext. - Fix broken Python and system commands. * test: update commander_test and commands_test. - Add command file test to commander_test. - Add more commands to test. - Test semicolon separated commands. --- pyocd/commands/base.py | 22 +++-- pyocd/commands/commander.py | 108 ++++++++++++++-------- pyocd/commands/execution_context.py | 134 ++++++++++++++++++---------- pyocd/subcommands/commander_cmd.py | 22 +++-- pyocd/utility/cmdline.py | 62 ++++++++++++- test/commander_test.py | 72 +++++++++++++-- test/commands_test.py | 6 +- test/unit/test_cmdline.py | 15 +++- 8 files changed, 331 insertions(+), 110 deletions(-) diff --git a/pyocd/commands/base.py b/pyocd/commands/base.py index 8301aad38..eaa2479c9 100755 --- a/pyocd/commands/base.py +++ b/pyocd/commands/base.py @@ -17,6 +17,7 @@ import logging import textwrap +from typing import (Any, Dict, Set, Tuple, Type, Union) from ..core import exceptions from ..utility import conversion @@ -25,7 +26,7 @@ LOG = logging.getLogger(__name__) ## @brief Dict of command group names to a set of command classes. -ALL_COMMANDS = {} +ALL_COMMANDS: Dict[str, Set[Union["CommandBase", "ValueBase"]]] = {} class CommandMeta(type): """! @brief Metaclass for commands. @@ -34,13 +35,13 @@ class CommandMeta(type): "ALL_COMMANDS" table. """ - def __new__(mcs, name, bases, dict): + def __new__(mcs: Type, name: str, bases: Tuple[type, ...], objdict: Dict[str, Any]) -> "CommandMeta": # Create the new type. - new_type = type.__new__(mcs, name, bases, dict) + new_type = type.__new__(mcs, name, bases, objdict) # The Command base class won't have an INFO. - if 'INFO' in dict: - info = dict['INFO'] + if 'INFO' in objdict: + info = objdict['INFO'] # Validate the INFO dict. assert (('names' in info) @@ -69,6 +70,16 @@ class CommandBase(metaclass=CommandMeta): - `extra_help`: Optional key for a string with more detailed help. """ + ## Default, empty info dict. + INFO = { + 'names': [], + 'group': '', + 'category': '', + 'nargs': 0, + 'usage': "", + 'help': "", + } + def __init__(self, context): """! @brief Constructor.""" self._context = context @@ -124,6 +135,7 @@ def _convert_value(self, arg): '[r3,8]'. The offset can be positive or negative, and any supported base. """ try: + offset = 0 deref = (arg[0] == '[') if deref: if not self.context.selected_core: diff --git a/pyocd/commands/commander.py b/pyocd/commands/commander.py index 47a18998f..eadb3ef3c 100755 --- a/pyocd/commands/commander.py +++ b/pyocd/commands/commander.py @@ -16,9 +16,11 @@ # limitations under the License. import colorama +import io import logging import os import traceback +from typing import (IO, Optional, Sequence, TYPE_CHECKING, Union) from ..core.helpers import ConnectHelper from ..core import (exceptions, session) @@ -27,12 +29,15 @@ from ..commands.repl import (PyocdRepl, ToolExitException) from ..commands.execution_context import CommandExecutionContext +if TYPE_CHECKING: + import argparse + LOG = logging.getLogger(__name__) ## Default SWD clock in Hz. DEFAULT_CLOCK_FREQ_HZ = 1000000 -class PyOCDCommander(object): +class PyOCDCommander: """! @brief Manages the commander interface. Responsible for connecting the execution context, REPL, and commands, and handles connection. @@ -47,24 +52,46 @@ class PyOCDCommander(object): @todo Replace use of args from argparse with something cleaner. """ - def __init__(self, args, cmds=None): + CommandsListType = Sequence[Union[str, IO[str]]] + + ## Commands that can run without requiring a connection. + _CONNECTIONLESS_COMMANDS = ('list', 'help', 'exit') + + def __init__( + self, + args: "argparse.Namespace", + cmds: Optional[CommandsListType] = None + ) -> None: """! @brief Constructor.""" # Read command-line arguments. self.args = args - self.cmds = cmds + self.cmds: PyOCDCommander.CommandsListType = cmds or [] self.context = CommandExecutionContext(no_init=self.args.no_init) self.context.command_set.add_command_group('commander') - self.session = None - self.exit_code = 0 + self.session: Optional[session.Session] = None + self.exit_code: int = 0 - def run(self): + def run(self) -> int: """! @brief Main entry point.""" try: - # If no commands, enter interactive mode. - if self.cmds is None: - if not self.connect(): - return self.exit_code + # If no commands, enter interactive mode. If there are commands, use the --interactive arg. + enter_interactive = (not self.cmds) or self.args.interactive + + # Connect unless we are only running commands that don't require a connection. + do_connect = enter_interactive or self._commands_require_connect() + if do_connect and not self.connect(): + return self.exit_code + + # Run the list of commands we were given. + if self.cmds: + self.run_commands() + + # Enter the interactive REPL. + if enter_interactive: + assert self.session + assert self.session.board + assert self.context.target # Print connected message, unless not initing. if not self.args.no_init: @@ -84,7 +111,7 @@ def run(self): status = "no init mode" # Say what we're connected to. - print(colorama.Fore.GREEN + f"Connected to {self.context.target.part_number} " + + print(colorama.Fore.GREEN + f"Connected to {self.session.target.part_number} " + colorama.Fore.CYAN + f"[{status}]" + colorama.Style.RESET_ALL + f": {self.session.board.unique_id}") @@ -92,11 +119,6 @@ def run(self): console = PyocdRepl(self.context) console.run() - # Otherwise, run the list of commands we were given and exit. We only connect when - # there is a command that requires a connection (most do). - else: - self.run_commands() - except ToolExitException: self.exit_code = 0 except exceptions.TransferError: @@ -115,34 +137,42 @@ def run(self): return self.exit_code - def run_commands(self): - """! @brief Run commands specified on the command line.""" - did_connect = False - + def _commands_require_connect(self) -> bool: + """@brief Determine whether a connection is needed to run commands.""" for args in self.cmds: - # Extract the command name. - cmd = args[0].lower() + # Always assume connection required for command files. + if isinstance(args, io.IOBase): + return True + + # Check for connectionless commands. + else: + assert isinstance(args, str) + + if not ((len(args) == 1) and (args[0].lower() in self._CONNECTIONLESS_COMMANDS)): + return True - # Handle certain commands without connecting. - needs_connect = (cmd not in ('list', 'help', 'exit')) + # No command was found that needs a connection. + return False - # For others, connect first. - if needs_connect and not did_connect: - if not self.connect(): - return self.exit_code - did_connect = True + def run_commands(self) -> None: + """! @brief Run commands specified on the command line.""" + for args in self.cmds: + # Open file containing commands. + if isinstance(args, io.IOBase) and not isinstance(args, str): + self.context.process_command_file(args) + + # List of command and argument strings. + else: + assert isinstance(args, str) - # Merge commands args back to one string. - # FIXME this is overly complicated - cmdline = " ".join('"{}"'.format(a) for a in args) + # Skip empty args lists. + if len(args) == 0: + continue - # Invoke action handler. - result = self.context.process_command_line(cmdline) - if result is not None: - self.exit_code = result - break + # Run the command line. + self.context.process_command_line(args) - def connect(self): + def connect(self) -> bool: """! @brief Connect to the probe.""" if (self.args.frequency is not None) and (self.args.frequency != DEFAULT_CLOCK_FREQ_HZ): self.context.writei("Setting SWD clock to %d kHz", self.args.frequency // 1000) @@ -199,7 +229,7 @@ def connect(self): self.exit_code = 1 return result - def _post_connect(self): + def _post_connect(self) -> bool: """! @brief Finish the connect process. The session is opened. The `no_init` parameter passed to the constructor determines whether the diff --git a/pyocd/commands/execution_context.py b/pyocd/commands/execution_context.py index 4751cf60f..9bd154e8f 100755 --- a/pyocd/commands/execution_context.py +++ b/pyocd/commands/execution_context.py @@ -16,11 +16,10 @@ # limitations under the License. import logging -import os import sys +from typing import (IO, Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Sequence, TYPE_CHECKING) import six import pprint -from collections import namedtuple import subprocess from shutil import get_terminal_size @@ -29,9 +28,12 @@ from ..utility.strings import UniquePrefixMatcher from ..utility.cmdline import split_command_line +if TYPE_CHECKING: + from ..debug.svd.model import SVDPeripheral + LOG = logging.getLogger(__name__) -class CommandSet(object): +class CommandSet: """! @brief Holds a set of command classes.""" ## Whether command and infos modules have been loaded yet. @@ -95,7 +97,7 @@ def add_commands(self, commands): @param self The command set. @param commands List of command classes. """ - from .base import (CommandBase, ValueBase) + from .base import ValueBase value_classes = {klass for klass in commands if issubclass(klass, ValueBase)} cmd_classes = commands - value_classes cmd_names = {name: klass for klass in cmd_classes for name in klass.INFO['names']} @@ -108,21 +110,24 @@ def add_commands(self, commands): self._value_classes.update(value_classes) self._value_matcher.add_items(value_names.keys()) -CommandInvocation = namedtuple('CommandInvocation', ['cmd', 'args', 'handler']) -"""! @brief Groups the command name with an iterable of args and a handler function. +class CommandInvocation(NamedTuple): + """! @brief Groups the command name with an iterable of args and a handler function. -The handler is a callable that will evaluate the command. It accepts a single argument of the -CommandInvocation instance. -""" + The handler is a callable that will evaluate the command. It accepts a single argument of the + CommandInvocation instance. + """ + cmd: str + args: Sequence[str] + handler: Callable[["CommandInvocation"], None] # type:ignore # mypy doesn't support recursive types yet! -class CommandExecutionContext(object): +class CommandExecutionContext: """! @brief Manages command execution. This class holds persistent state for command execution, and provides the interface for executing commands and command lines. """ - def __init__(self, no_init=False, output_stream=None): + def __init__(self, no_init: bool = False, output_stream: Optional[IO[str]] = None): """! @brief Constructor. @param self This object. @param no_init Whether the board and target will be initialized when attach_session() is called. @@ -133,14 +138,14 @@ def __init__(self, no_init=False, output_stream=None): """ self._no_init = no_init self._output = output_stream or sys.stdout - self._python_namespace = None + self._python_namespace: Dict[str, Any] = {} self._command_set = CommandSet() # State attributes. self._session = None self._selected_core = None self._selected_ap_address = None - self._peripherals = {} + self._peripherals: Dict[str, "SVDPeripheral"] = {} self._loaded_peripherals = False # Add in the standard commands. @@ -204,6 +209,7 @@ def attach_session(self, session): assert self._session is None assert session.is_open or self._no_init self._session = session + assert self.target # Select the first core's MEM-AP by default. if not self._no_init: @@ -255,6 +261,7 @@ def command_set(self): @property def peripherals(self): """! @brief Dict of SVD peripherals.""" + assert self.target if self.target.svd_device and not self._loaded_peripherals: for p in self.target.svd_device.peripherals: self._peripherals[p.name.lower()] = p @@ -291,54 +298,88 @@ def selected_ap(self): if self.selected_ap_address is None: return None else: + assert self.target return self.target.aps[self.selected_ap_address] - def process_command_line(self, line): - """! @brief Run a command line consisting of one or more semicolon-separated commands.""" - for invoc in self.parse_command_line(line): + def process_command_line(self, line: str) -> None: + """! @brief Run a command line consisting of one or more semicolon-separated commands. + + @param self + @param line Complete command line string. + """ + for args in self._split_commands(line): + assert args + invoc = self.parse_command(args) invoc.handler(invoc) - def parse_command_line(self, line): - """! @brief Generator yielding CommandInvocations for commands separated by semicolons.""" - for cmd in self._split_commands(line): - invoc = self.parse_command(cmd) - if invoc is not None: - yield invoc - - def _split_commands(self, line): - """! @brief Generator yielding commands separated by semicolons.""" - # FIXME This is a big, inefficient hack to work around a bug splitting on quoted semicolons. Practically, - # though, it will never be noticeable. - parts = split_command_line(line) - result = [] + def process_command_file(self, cmd_file: IO[str]) -> None: + """! @brief Run commands contained in a file. + + @param self + @param cmd_file File object containing commands to run. Must be opened in text mode. When this method returns, + the file will be closed. This is true even if an exception is raised during command execution. + """ + try: + for line in cmd_file: + line = line.strip() + + # Skip empty or comment lines. + if (len(line) == 0) or (line[0] == '#'): + continue + + self.process_command_line(line) + finally: + cmd_file.close() + + def _split_commands(self, line: str) -> Iterator[List[str]]: + """! @brief Generator yielding commands separated by semicolons. + + Python and system commands are handled specially. For these we yield a list of 2 elements: the command, + either "$" or "!", followed by the unmodified remainder of the command line. For these commands, + splitting on semicolons is not supported. + """ + parts = split_command_line(line.strip()) + + # Check for Python or system command. For these we yield a list of 2 elements: the command + # followed by the rest of the command line as it was originally. + if parts and (parts[0] in '$!'): + line_remainder = line.removeprefix(parts[0]).strip() + yield [parts[0], line_remainder] + return + + result: List[str] = [] + for p in parts: if p == ';': - yield " ".join(f'"{a}"' for a in result) - result = [] + if result: + yield result + result = [] else: result.append(p) if result: - yield " ".join(f'"{a}"' for a in result) + yield result - def parse_command(self, cmdline): + def parse_command(self, cmdline: List[str]) -> CommandInvocation: """! @brief Create a CommandInvocation from a single command.""" - cmdline = cmdline.strip() - - # Check for Python or shell command lines. + # Check for Python or system command lines. first_char = cmdline[0] if first_char in '$!': - cmdline = cmdline[1:] + # cmdline parameters that are for Python and system commands must be a 2-element list, + # as generated by _split_commands(). + assert len(cmdline) == 2 + + # Return the invocation instance with the handler set appropriately. if first_char == '$': - return CommandInvocation(cmdline, None, self.handle_python) + return CommandInvocation(cmdline[1], [], self.handle_python) elif first_char == '!': - return CommandInvocation(cmdline, None, self.handle_system) + return CommandInvocation(cmdline[1], [], self.handle_system) # Split command into words. args = split_command_line(cmdline) cmd = args[0].lower() args = args[1:] - # Look up shorted unambiguous match for the command name. + # Look up shortened unambiguous match for the command name. matched_command = self._command_set.command_matcher.find_one(cmd) # Check for valid command. @@ -352,7 +393,7 @@ def parse_command(self, cmdline): return CommandInvocation(matched_command, args, self.execute_command) - def execute_command(self, invocation): + def execute_command(self, invocation: CommandInvocation) -> None: """! @brief Execute a single command.""" # Must have an attached session to run commands, except for certain commands. assert (self.session is not None) or (invocation.cmd in ('list', 'help', 'exit')) @@ -364,9 +405,10 @@ def execute_command(self, invocation): cmd_object.parse(invocation.args) cmd_object.execute() - def _build_python_namespace(self): + def _build_python_namespace(self) -> None: """! @brief Construct the dictionary used as the namespace for python commands.""" import pyocd + assert self.target self._python_namespace = { 'session': self.session, 'board': self.board, @@ -379,11 +421,11 @@ def _build_python_namespace(self): 'pyocd': pyocd, } - def handle_python(self, invocation): + def handle_python(self, invocation: CommandInvocation) -> None: """! @brief Evaluate a python expression.""" try: # Lazily build the python environment. - if self._python_namespace is None: + if not self._python_namespace: self._build_python_namespace() result = eval(invocation.cmd, globals(), self._python_namespace) @@ -395,11 +437,11 @@ def handle_python(self, invocation): self.write(pprint.pformat(result, indent=2, width=w, depth=10)) except Exception as e: # Log the traceback before raising the exception. - if self.session.log_tracebacks: + if self.session and self.session.log_tracebacks: LOG.error("Exception while executing expression: %s", e, exc_info=True) raise exceptions.CommandError("exception while executing expression: %s" % e) - def handle_system(self, invocation): + def handle_system(self, invocation: CommandInvocation) -> None: """! @brief Evaluate a system call command.""" try: output = subprocess.check_output(invocation.cmd, stderr=subprocess.STDOUT, shell=True) diff --git a/pyocd/subcommands/commander_cmd.py b/pyocd/subcommands/commander_cmd.py index 44c68ce16..a7730cedc 100644 --- a/pyocd/subcommands/commander_cmd.py +++ b/pyocd/subcommands/commander_cmd.py @@ -16,14 +16,11 @@ import argparse import logging +import io from typing import List from .base import SubcommandBase from ..commands.commander import PyOCDCommander -from ..utility.cmdline import ( - flatten_args, - split_command_line, -) class CommanderSubcommand(SubcommandBase): """! @brief `pyocd commander` subcommand.""" @@ -32,6 +29,13 @@ class CommanderSubcommand(SubcommandBase): HELP = "Interactive command console." DEFAULT_LOG_LEVEL = logging.WARNING + EPILOG = """Commands specified by the -c/--command and -x/--execute arguments are run in the order they are listed + on the command line, and the two types can be mixed freely and in any order. Normally, pyOCD will exit after + running such commands. If the -i/--interactive flag is set, then the interactive REPL will be instead be + started when the commands have finished. In command files each line is either a complete command, a comment + started with '#', or empty. + """ + @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: """! @brief Add this subcommand to the subparsers object.""" @@ -46,6 +50,11 @@ def get_args(cls) -> List[argparse.ArgumentParser]: help="Optionally specify ELF file being debugged.") commander_options.add_argument("-c", "--command", dest="commands", metavar="CMD", action='append', nargs='+', help="Run commands.") + commander_options.add_argument("-x", "--execute", dest="commands", metavar="FILE", action='append', + type=argparse.FileType('r'), + help="Execute commands from file. Pass - for stdin.") + commander_options.add_argument("-i", "--interactive", action="store_true", + help="Stay in interactive mode after running commands specified from command line or file.") return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser] @@ -55,7 +64,10 @@ def invoke(self) -> int: if self._args.commands is not None: cmds = [] for cmd in self._args.commands: - cmds.append(flatten_args(split_command_line(arg) for arg in cmd)) + if isinstance(cmd, io.IOBase): + cmds.append(cmd) + else: + cmds.append(" ".join(cmd)) else: cmds = None diff --git a/pyocd/utility/cmdline.py b/pyocd/utility/cmdline.py index 6b8a2c7ad..fc1057d07 100644 --- a/pyocd/utility/cmdline.py +++ b/pyocd/utility/cmdline.py @@ -16,7 +16,6 @@ # limitations under the License. import logging -import shlex from typing import (Any, Dict, Iterable, List, Union) from ..core.target import Target @@ -25,15 +24,72 @@ LOG = logging.getLogger(__name__) +def split_command(cmd: str) -> List[str]: + """! @brief Split command by whitespace, supporting quoted strings.""" + result: List[str] = [] + state = 0 + word = '' + open_quote = '' + pos = 0 + while pos < len(cmd): + c = cmd[pos] + pos += 1 + if state == 0: + if c.isspace(): + if word: + result.append(word) + word = '' + elif c in ('"', "'"): + if word: + result.append(word) + word = '' + open_quote = c + state = 1 + elif c in ';!@#$%^&*()+=[]{}|<>,?': + if word: + result.append(word) + word = c + state = 2 + elif c == '\\': + if pos < len(cmd): + c = cmd[pos] + pos += 1 + word += c + else: + word += c + elif state == 1: + if c == open_quote: + result.append(word) + word = '' + state = 0 + # Only honour escapes in double quotes. + elif open_quote == '"' and c == '\\': + if pos < len(cmd): + c = cmd[pos] + pos += 1 + word += c + else: + word += c + elif state == 2: + if word: + result.append(word) + # Back up to reprocess this char in state 0. + word = '' + pos -= 1 + state = 0 + if word: + result.append(word) + return result + def split_command_line(cmd_line: Union[str, List[str]]) -> List[str]: """! @brief Split command line by whitespace, supporting quoted strings.""" - result = [] + result: List[str] = [] if isinstance(cmd_line, str): args = [cmd_line] else: args = cmd_line for cmd in args: - result += shlex.split(cmd) + result += split_command(cmd) return result ## Map of vector char characters to masks. diff --git a/test/commander_test.py b/test/commander_test.py index 51932b359..8d7467db3 100644 --- a/test/commander_test.py +++ b/test/commander_test.py @@ -21,6 +21,7 @@ import logging from types import SimpleNamespace import os +import tempfile from pyocd.probe.pydapaccess import DAPAccess from pyocd.commands.commander import PyOCDCommander @@ -61,21 +62,25 @@ def commander_test(board_id): COMMANDS_TO_TEST = [ # general commands - ["continue"], - ["status"], - ["halt"], - ["status"], + "continue", + "status", + "halt", + "status", # semicolon separated - ["status", ";", "halt", ";", "continue", ";", "halt"], + "status ; halt ; continue", + "halt;continue", + "halt; continue", + + # Python and shell + "$ 2+2", + "!echo 'hi mom'" # commander command group - these are not tested by commands_test.py. - ["list"], - ["exit"], # Must be last command! + "list", + "exit", # Must be last command! ] - print("\n------ Testing commander ------\n") - # Set up commander args. args = SimpleNamespace() args.no_init = False @@ -91,6 +96,12 @@ def commander_test(board_id): args.unique_id = board_id args.target_override = None args.elf = GDB_TEST_ELF + args.interactive = False + + # + # Test basic functionality. + # + print("\n------ Testing basic functionality ------\n") test_count += 1 try: @@ -111,6 +122,49 @@ def commander_test(board_id): print("TEST FAILED") traceback.print_exc() + # + # Test running command files. + # + print("\n------ Testing command files ------\n") + + with tempfile.NamedTemporaryFile('w+') as cmdfile: + cmdfile.write("""# here is a comment +halt +reg +continue + +# semicolons +halt ; status + +# Python and system +$ {'a': 1, 'b': 2} +!echo "hello, world!" +$target.part_number +!echo first ; echo second +""") + + # Jump back to the start of the file. + cmdfile.seek(0, 0) + + test_count += 1 + try: + cmdr = PyOCDCommander(args, [cmdfile.file]) + cmdr.run() + test_pass_count += 1 + print("TEST PASSED") + + test_count += 1 + print("Testing exit code") + print("Exit code:", cmdr.exit_code) + if cmdr.exit_code == 0: + test_pass_count += 1 + print("TEST PASSED") + else: + print("TEST FAILED") + except Exception: + print("TEST FAILED") + traceback.print_exc() + print("\n\nTest Summary:") print("Pass count %i of %i tests" % (test_pass_count, test_count)) if failed_commands: diff --git a/test/commands_test.py b/test/commands_test.py index c6e94baad..88d120af0 100644 --- a/test/commands_test.py +++ b/test/commands_test.py @@ -186,11 +186,15 @@ def commands_test(board_id): # Semicolon-separated commands. 'rw 0x%08x ; rw 0x%08x' % (ram_base, ram_base + 4), + 'rb 0x%08x;rb 0x%08x' % (ram_base, ram_base + 1), + 'rb 0x%08x; rb 0x%08x' % (ram_base, ram_base + 1), # Python and system commands. '$2+ 2', + '$ target', '!echo hello', - '!echo hi \; echo there', # using escaped semicolon in a sytem command + '!echo hi ; echo there', # semicolon in a sytem command (because semicolon separation is not supported for Python/system command lines) + '! ls -d .', # Commands not tested: # "list", diff --git a/test/unit/test_cmdline.py b/test/unit/test_cmdline.py index 93932036d..d9ee08fd0 100644 --- a/test/unit/test_cmdline.py +++ b/test/unit/test_cmdline.py @@ -50,13 +50,24 @@ def test_split_whitespace(self): (r'\h\e\l\l\o', ['hello']), (r'"\"hello\""', ['"hello"']), ('x "a\\"b" y', ['x', 'a"b', 'y']), - ('hello"there"', ['hellothere']), + ('hello"there"', ['hello', 'there']), (r"'raw\string'", [r'raw\string']), - ('"foo said \\"hi\\"" and \'C:\\baz\'', ['foo said "hi"', 'and', 'C:\\baz']) + ('"foo said \\"hi\\"" and \'C:\\baz\'', ['foo said "hi"', 'and', 'C:\\baz']), + ("foo;bar", ["foo", ";", "bar"]), ]) def test_em(self, input, result): assert split_command_line(input) == result + # ;!@#$%^&*()+=[]{}|<>,? + @pytest.mark.parametrize("sep", + list(c for c in ';!@#$%^&*()+=[]{}|<>,?') + ) + def test_word_separators(self, sep): + assert split_command_line(f"foo{sep}bar") == ["foo", sep, "bar"] + assert split_command_line(f"foo{sep} bar") == ["foo", sep, "bar"] + assert split_command_line(f"foo {sep}bar") == ["foo", sep, "bar"] + assert split_command_line(f"foo {sep} bar") == ["foo", sep, "bar"] + class TestConvertVectorCatch(object): def test_none_str(self): assert convert_vector_catch('none') == 0 From 2ec15c83c263440e81850d244e754e4b11a83e65 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 19 Dec 2021 21:44:01 +0000 Subject: [PATCH 046/123] Fix flash unmodified page detection regression; change keep_unwritten default to false (#1269) * flash: builder: fix _FlashPage.same attribute default value. Also fix some type issues by annotating _FlashPage and _FlashSector attributes. * options: change default value of 'keep_unwritten' to false. --- docs/options.md | 4 ++-- pyocd/core/options.py | 6 +++--- pyocd/flash/builder.py | 28 ++++++++++++++-------------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/options.md b/docs/options.md index 4b2a88ac9..2cc66c320 100644 --- a/docs/options.md +++ b/docs/options.md @@ -217,9 +217,9 @@ Disables flash programming progress bar when True. keep_unwritten bool -True +False -Whether to load existing flash content for ranges of sectors that will be erased but not written +Whether to preserve existing flash content for ranges of sectors that will be erased but not written with new data. diff --git a/pyocd/core/options.py b/pyocd/core/options.py index 0392117c6..c64d2873f 100644 --- a/pyocd/core/options.py +++ b/pyocd/core/options.py @@ -76,9 +76,9 @@ "SWD/JTAG frequency in Hertz."), OptionInfo('hide_programming_progress', bool, False, "Disables flash programming progress bar."), - OptionInfo('keep_unwritten', bool, True, - "Whether to load existing flash content for ranges of sectors that will be erased but not " - "written with new data. Default is True."), + OptionInfo('keep_unwritten', bool, False, + "Whether to preserve existing flash content for ranges of sectors that will be erased but not " + "written with new data. Default is False."), OptionInfo('logging', (str, dict), None, "Logging configuration dictionary, or path to YAML file containing logging configuration."), OptionInfo('no_config', bool, False, diff --git a/pyocd/flash/builder.py b/pyocd/flash/builder.py index 469a93929..82dd95017 100644 --- a/pyocd/flash/builder.py +++ b/pyocd/flash/builder.py @@ -20,7 +20,7 @@ from dataclasses import dataclass from time import time from binascii import crc32 -from typing import (Any, Union) +from typing import (Any, List, Optional, Union) from ..core.target import Target from ..core.exceptions import (FlashFailure, FlashProgramFailure) @@ -95,11 +95,11 @@ def _stub_progress(percent): class _FlashSector: """! @brief Info about an erase sector and all pages to be programmed within it.""" def __init__(self, sector_info): - self.addr = sector_info.base_addr - self.size = sector_info.size - self.max_page_count = 0 - self.page_list = [] - self.erase_weight = sector_info.erase_weight + self.addr: int = sector_info.base_addr + self.size: int = sector_info.size + self.max_page_count: int = 0 + self.page_list: List[_FlashPage] = [] + self.erase_weight: float = sector_info.erase_weight def add_page(self, page): # The first time a page is added, compute the page count for this sector. This @@ -128,14 +128,14 @@ def __repr__(self): class _FlashPage: """! @brief A page to be programmed and its data.""" def __init__(self, page_info): - self.addr = page_info.base_addr - self.size = page_info.size - self.data = [] - self.program_weight = page_info.program_weight - self.erased = None # Whether the data all matches the erased value. - self.same = False - self.crc = 0 - self.cached_estimate_data = None + self.addr: int = page_info.base_addr + self.size: int = page_info.size + self.data: List[int] = [] + self.program_weight: float = page_info.program_weight + self.erased: Optional[bool] = None # Whether the data all matches the erased value. + self.same: Optional[bool] = None + self.crc: int = 0 + self.cached_estimate_data: Optional[List[int]] = None def get_program_weight(self): """! @brief Get time to program a page including the data transfer.""" From 4b106c01904da7349d8a9450a638383e283168c1 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 19 Dec 2021 21:44:12 +0000 Subject: [PATCH 047/123] commands: replace use of str.removeprefix() that isn't available pre-Python 3.9. (#1270) --- pyocd/commands/execution_context.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyocd/commands/execution_context.py b/pyocd/commands/execution_context.py index 9bd154e8f..c43c7e7b2 100755 --- a/pyocd/commands/execution_context.py +++ b/pyocd/commands/execution_context.py @@ -343,7 +343,11 @@ def _split_commands(self, line: str) -> Iterator[List[str]]: # Check for Python or system command. For these we yield a list of 2 elements: the command # followed by the rest of the command line as it was originally. if parts and (parts[0] in '$!'): - line_remainder = line.removeprefix(parts[0]).strip() + # Remove the Python/system command prefix from the command line. Can't use str.removeprefix() + # since it was added in 3.9. + line_remainder = line.strip() + assert line_remainder.find(parts[0]) == 0 + line_remainder = line_remainder[len(parts[0]):].strip() yield [parts[0], line_remainder] return From cffcc86a7c7b7c96ab4c859aa2c2e491931215bc Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 18 Dec 2021 18:52:58 -0600 Subject: [PATCH 048/123] test: fix some small functional test issues. --- test/automated_test.py | 5 ++++- test/gdb_test.py | 1 + test/gdb_test_script.py | 6 +++++- test/json_lists_test.py | 2 +- test/probeserver_test.py | 1 + test/test_util.py | 13 ++++++++----- 6 files changed, 20 insertions(+), 8 deletions(-) diff --git a/test/automated_test.py b/test/automated_test.py index e18c19f53..b9786fc26 100755 --- a/test/automated_test.py +++ b/test/automated_test.py @@ -171,6 +171,9 @@ def print_test_header(output_file, board, test): print(header, file=output_file) print(divider, file=output_file) +def clean_board_name(name: str) -> str: + return "".join((c if c.isalnum() else "_") for c in name) + def test_board(board_id, n, loglevel, logToConsole, commonLogFile): """! @brief Run all tests on a given board. @@ -198,7 +201,7 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): # Set up board-specific output file. A previously existing file is removed. env_name = (("_" + os.environ['TOX_ENV_NAME']) if ('TOX_ENV_NAME' in os.environ) else '') - name_info = "{}_{}_{}".format(env_name, board.name, n) + name_info = "{}_{}_{}".format(env_name, clean_board_name(board.name), n) log_filename = os.path.join(TEST_OUTPUT_DIR, LOG_FILE_TEMPLATE.format(name_info)) if os.path.exists(log_filename): os.remove(log_filename) diff --git a/test/gdb_test.py b/test/gdb_test.py index 154e1c7a1..3bb66be4b 100644 --- a/test/gdb_test.py +++ b/test/gdb_test.py @@ -162,6 +162,7 @@ def test_gdb(board_id=None, n=0): "--uid=%s" % board_id, ] server = PyOCDTool() + server._setup_logging = lambda: None # Disable logging setup so we don't have duplicate log output. LOG.info('Starting gdbserver: %s', ' '.join(server_args)) server_thread = threading.Thread(target=server.run, args=[server_args]) server_thread.daemon = True diff --git a/test/gdb_test_script.py b/test/gdb_test_script.py index 8f081c88e..19d9499f6 100644 --- a/test/gdb_test_script.py +++ b/test/gdb_test_script.py @@ -478,7 +478,11 @@ def run_test(): test_result_filename = os.path.join("output", "gdb_test_results%s_%d.txt" % (env_file_name, testn)) with open(test_result_filename, "wb") as f: f.write(json.dumps(test_result)) - gdb_execute("detach") + # Ignore errors detaching, in case connecting failed. + try: + gdb_execute("detach") + except gdb.error: + pass gdb_execute("quit %i" % fail_count) diff --git a/test/json_lists_test.py b/test/json_lists_test.py index 7556e8775..152b4d95f 100644 --- a/test/json_lists_test.py +++ b/test/json_lists_test.py @@ -37,7 +37,7 @@ class JsonListsTestResult(TestResult): def __init__(self): super(JsonListsTestResult, self).__init__(None, None, None) - self.name = "json_lsits" + self.name = "json_lists" class JsonListsTest(Test): def __init__(self): diff --git a/test/probeserver_test.py b/test/probeserver_test.py index 0efebf19d..fce0edce4 100644 --- a/test/probeserver_test.py +++ b/test/probeserver_test.py @@ -134,6 +134,7 @@ def test_probeserver(board_id=None, n=0): binary_file ] client = PyOCDTool() + client._setup_logging = lambda: None # Disable logging setup so we don't have duplicate log output. LOG.info('Starting client: %s', ' '.join(client_args)) client_thread = threading.Thread(target=client.run, args=[client_args]) client_thread.daemon = True diff --git a/test/test_util.py b/test/test_util.py index 9494e861a..e3c644557 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -68,7 +68,7 @@ def get_session_options(): def get_target_test_params(session): target_type = session.board.target_type error_on_invalid_access = True - if target_type in ("nrf51", "nrf52", "nrf52840"): + if target_type.startswith("nrf"): # Override clock since 10MHz is too fast test_clock = 1000000 error_on_invalid_access = False @@ -76,9 +76,8 @@ def get_target_test_params(session): # Override clock since 10MHz is too fast test_clock = 1000000 else: - # Default of 10 MHz. Most probes will not actually run this fast, but this - # sets them to their max supported frequency. - test_clock = 10000000 + # Default of 4 MHz. + test_clock = 4000000 return { 'test_clock': test_clock, 'error_on_invalid_access': error_on_invalid_access, @@ -167,6 +166,9 @@ def flush(self): for out in self.outputs: out.flush() + def isatty(self): + return False + class RecordingLogHandler(logging.Handler): def __init__(self, iostream, level=logging.NOTSET): super(RecordingLogHandler, self).__init__(level) @@ -205,7 +207,7 @@ def get_test_case(self): else: classname = "{}.{}.{}".format(self.board_name, self.board, self.name) case = ElementTree.Element('testcase', - name=self.name, + name=classname, classname=classname, status=("passed" if self.passed else "failed"), time="%.3f" % self.time @@ -217,6 +219,7 @@ def get_test_case(self): message="failure", type="failure" ) + failed.text = self.filter_output(self.output) system_out = ElementTree.SubElement(case, 'system-out') system_out.text = self.filter_output(self.output) return case From e7214ee18c1e0dcdbc359841dd7d2ad03cff7751 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 16 Dec 2021 19:18:15 -0600 Subject: [PATCH 049/123] test: flash_test: crc test and chip/page erase decision fixes. - Skip fast verify test if CRC analyser isn't supported by the flash algo. - Change fast verify test pass predicate to just check the flash result info's analyser type. - The chip and sector erase decision tests no longer require a particular erase type, since that is highly flaky over a wide range of test targets. Instead, the total byte count is checked. --- test/flash_test.py | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/test/flash_test.py b/test/flash_test.py index d49de584b..b52fbe2f3 100644 --- a/test/flash_test.py +++ b/test/flash_test.py @@ -246,13 +246,16 @@ def flash_test(board_id): test_count += 1 print("\n------ Test Fast Verify ------") - info = flash.flash_block(addr, new_data, progress_cb=print_progress(), fast_verify=True) - if info.program_type == FlashBuilder.FLASH_SECTOR_ERASE: - print("TEST PASSED") - test_pass_count += 1 + if flash.get_flash_info().crc_supported: + info = flash.flash_block(addr, new_data, progress_cb=print_progress(), fast_verify=True) + if info.analyze_type == FlashBuilder.FLASH_ANALYSIS_CRC32: + print("TEST PASSED") + test_pass_count += 1 + else: + print("TEST FAILED") + test_count += 1 else: - print("TEST FAILED") - test_count += 1 + print("Skipping because analyser isn't supported for this algo") print("\n------ Test Offset Write ------") addr = rom_start + rom_size // 2 @@ -339,8 +342,11 @@ def flash_test(board_id): print("\n------ Test Chip Erase Decision ------") new_data = list(data) new_data.extend([flash.region.erased_byte_value] * unused) # Pad with erased value + data_size = len(new_data) info = flash.flash_block(addr, new_data, progress_cb=print_progress()) - if info.program_type == FlashBuilder.FLASH_CHIP_ERASE: + print(f"Selected erase type is {info.program_type}") + print(f"Total byte count = {info.total_byte_count} (expected {data_size})") + if info.total_byte_count == data_size: print("TEST PASSED") test_pass_count += 1 result.chip_erase_rate_erased = float(len(new_data)) / float(info.program_time) @@ -351,8 +357,11 @@ def flash_test(board_id): print("\n------ Test Chip Erase Decision 2 ------") new_data = list(data) new_data.extend([unerasedValue] * unused) # Pad with unerased value + data_size = len(new_data) info = flash.flash_block(addr, new_data, progress_cb=print_progress()) - if info.program_type == FlashBuilder.FLASH_CHIP_ERASE: + print(f"Selected erase type is {info.program_type}") + print(f"Total byte count = {info.total_byte_count} (expected {data_size})") + if info.total_byte_count == data_size: print("TEST PASSED") test_pass_count += 1 result.chip_erase_rate = float(len(new_data)) / float(info.program_time) @@ -363,8 +372,11 @@ def flash_test(board_id): print("\n------ Test Page Erase Decision ------") new_data = list(data) new_data.extend([unerasedValue] * unused) # Pad with unerased value + data_size = len(new_data) info = flash.flash_block(addr, new_data, progress_cb=print_progress()) - if info.program_type == FlashBuilder.FLASH_SECTOR_ERASE: + print(f"Selected erase type is {info.program_type}") + print(f"Total byte count = {info.total_byte_count} (expected {data_size})") + if info.total_byte_count == data_size: print("TEST PASSED") test_pass_count += 1 result.page_erase_rate_same = float(len(new_data)) / float(info.program_time) @@ -381,8 +393,11 @@ def flash_test(board_id): size_differ = unused - size_same new_data.extend([unerasedValue] * size_same) # Pad 5/6 with unerased value and 1/6 with 0x55 new_data.extend([0x55] * size_differ) + data_size = len(new_data) info = flash.flash_block(addr, new_data, progress_cb=print_progress()) - if info.program_type == FlashBuilder.FLASH_SECTOR_ERASE: + print(f"Selected erase type is {info.program_type}") + print(f"Total byte count = {info.total_byte_count} (expected {data_size})") + if info.total_byte_count == data_size: print("TEST PASSED") test_pass_count += 1 else: From 0b6a82afc6cc04bdd5d3ac1b2decb33754331bb4 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 19 Dec 2021 12:46:05 -0600 Subject: [PATCH 050/123] test: automated_test: fix parallel testing. - Needed to pass runtime list of tests to the test_board() function. - Replace test_board() parameters with a BoardTestConfig dataclass. --- test/automated_test.py | 47 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/test/automated_test.py b/test/automated_test.py index b9786fc26..29c05d0e9 100755 --- a/test/automated_test.py +++ b/test/automated_test.py @@ -1,6 +1,7 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # pyOCD debugger # Copyright (c) 2015-2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +15,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import os import sys @@ -24,10 +24,11 @@ from xml.etree import ElementTree import multiprocessing as mp import io +from dataclasses import dataclass +from typing import (IO, List, Optional) from pyocd.core.session import Session from pyocd.core.helpers import ConnectHelper -from pyocd.utility.conversion import float32_to_u32 from pyocd.probe.aggregator import DebugProbeAggregator from test_util import ( @@ -174,7 +175,16 @@ def print_test_header(output_file, board, test): def clean_board_name(name: str) -> str: return "".join((c if c.isalnum() else "_") for c in name) -def test_board(board_id, n, loglevel, logToConsole, commonLogFile): +@dataclass +class BoardTestConfig: + board_id: str + n: int + loglevel: int + log_to_console: bool + common_log_file: Optional[IO[str]] + test_list: List[Test] + +def test_board(config: BoardTestConfig): """! @brief Run all tests on a given board. When multiple test jobs are being used, this function is the entry point executed in @@ -191,6 +201,12 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): @param logToConsole Boolean indicating whether output should be copied to sys.stdout. @param commonLogFile If not None, an open file object to which output should be copied. """ + board_id = config.board_id + n = config.n + loglevel = config.loglevel + logToConsole = config.log_to_console + commonLogFile = config.common_log_file + probe = DebugProbeAggregator.get_probe_with_id(board_id) assert probe is not None session = Session(probe, **get_session_options()) @@ -235,7 +251,7 @@ def test_board(board_id, n, loglevel, logToConsole, commonLogFile): print_board_header(originalStdout, board, n, logToConsole, includeLeadingNewline=(n != 0)) # Run all tests on this board. - for test in test_list: + for test in config.test_list: print("{} #{}: starting {}...".format(board.name, n, test.name), file=originalStdout) # Set the test number on the test object. Used to get a unique port for the GdbTest. @@ -358,19 +374,32 @@ def main(): if args.board: board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())] + # Generate board test configs. + test_configs = [ + BoardTestConfig( + board_id=board_id, + n=n, + loglevel=level, + log_to_console=logToConsole, + common_log_file=commonLogFile, + test_list=test_list, + ) + for n, board_id in enumerate(board_id_list) + ] + # If only 1 job was requested, don't bother spawning processes. start = time() if args.jobs == 1: - for n, board_id in enumerate(board_id_list): - result_list += test_board(board_id, n, level, logToConsole, commonLogFile) + for config in test_configs: + result_list += test_board(config) else: # Create a pool of processes to run tests. try: pool = mp.Pool(args.jobs) # Issue board test job to process pool. - async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile)) - for n, board_id in enumerate(board_id_list)] + async_results = [pool.apply_async(test_board, (config,)) + for config in test_configs] # Gather results. for r in async_results: From fd80e77bb40dad0d01e8319485e5f0b57cf60b0f Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 19 Dec 2021 13:24:51 -0600 Subject: [PATCH 051/123] test: commands: add and fix test cases. - More test cases for Python and system commands. - Add 64-bit memory command tests. - Enable disassembler command tests. - Fix paths issues for some commands on Windows. --- test/commander_test.py | 3 ++- test/commands_test.py | 36 ++++++++++++++++++++++++------------ 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/test/commander_test.py b/test/commander_test.py index 8d7467db3..bc8c36a6d 100644 --- a/test/commander_test.py +++ b/test/commander_test.py @@ -74,7 +74,8 @@ def commander_test(board_id): # Python and shell "$ 2+2", - "!echo 'hi mom'" + "!echo 'hi mom'", + " $ target.vendor", # commander command group - these are not tested by commands_test.py. "list", diff --git a/test/commands_test.py b/test/commands_test.py index 88d120af0..ae732e401 100644 --- a/test/commands_test.py +++ b/test/commands_test.py @@ -20,6 +20,7 @@ import traceback import logging import tempfile +import platform from pyocd.core.helpers import ConnectHelper from pyocd.probe.pydapaccess import DAPAccess @@ -59,6 +60,13 @@ def run(self, board): result.test = self return result +def fix_windows_path(path: str) -> str: + """Double backslashes in paths on Windows.""" + if platform.system() == "Windows": + return path.replace('\\', '\\\\') + else: + return path + def commands_test(board_id): with ConnectHelper.session_with_chosen_probe(unique_id=board_id, **get_session_options()) as session: board = session.board @@ -106,24 +114,27 @@ def commands_test(board_id): "reg all", "reg r0", "wreg r0 0x12345678", -# "d pc", # Disable disasm because capstone is not installed by default. -# "d --center pc 32", - "read32 0x%08x" % (boot_start_addr + boot_blocksize), - "read16 0x%08x" % (boot_start_addr + boot_blocksize), + "d pc", + "d --center pc 32", + "read64 0x%08x" % ((boot_start_addr + boot_blocksize) & ~7), + "read32 0x%08x" % ((boot_start_addr + boot_blocksize) & ~3), + "read16 0x%08x" % ((boot_start_addr + boot_blocksize) & ~1), "read8 0x%08x" % (boot_start_addr + boot_blocksize), + "rd 0x%08x 16" % ram_base, "rw 0x%08x 16" % ram_base, "rh 0x%08x 16" % ram_base, "rb 0x%08x 16" % ram_base, + "write64 0x%08x 0x1122334455667788 0xaabbccddeeff0011" % ram_base, "write32 0x%08x 0x11223344 0x55667788" % ram_base, "write16 0x%08x 0xabcd" % (ram_base + 8), "write8 0x%08x 0 1 2 3 4 5 6" % (ram_base + 10), - "savemem 0x%08x 128 '%s'" % (boot_start_addr, temp_bin_file), - "loadmem 0x%08x '%s'" % (ram_base, temp_bin_file), - "loadmem 0x%08x '%s'" % (boot_start_addr, binary_file), - "load '%s'" % temp_test_hex_name, - "load '%s' 0x%08x" % (binary_file, boot_start_addr), - "compare 0x%08x '%s'" % (ram_base, temp_bin_file), - "compare 0x%08x 32 '%s'" % (ram_base, temp_bin_file), + "savemem 0x%08x 128 '%s'" % (boot_start_addr, fix_windows_path(temp_bin_file)), + "loadmem 0x%08x '%s'" % (ram_base, fix_windows_path(temp_bin_file)), + "loadmem 0x%08x '%s'" % (boot_start_addr, fix_windows_path(binary_file)), + "load '%s'" % fix_windows_path(temp_test_hex_name), + "load '%s' 0x%08x" % (fix_windows_path(binary_file), boot_start_addr), + "compare 0x%08x '%s'" % (ram_base, fix_windows_path(temp_bin_file)), + "compare 0x%08x 32 '%s'" % (ram_base, fix_windows_path(temp_bin_file)), "fill 0x%08x 128 0xa5" % ram_base, "fill 16 0x%08x 64 0x55aa" % (ram_base + 64), "find 0x%08x 128 0xaa 0x55" % ram_base, # find that will pass @@ -194,7 +205,8 @@ def commands_test(board_id): '$ target', '!echo hello', '!echo hi ; echo there', # semicolon in a sytem command (because semicolon separation is not supported for Python/system command lines) - '! ls -d .', + ' $ " ".join(["hello", "there"])', + ' ! echo "yo dude" ', # Commands not tested: # "list", From c5c33c10ce0fa4361f3a85a9e90f06d685460af9 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 19 Dec 2021 17:13:01 -0600 Subject: [PATCH 052/123] test: automated_test: ignore failures of connect and gdb tests. The failures are ignored only in terms of the exit code. The xml test results will still show the actual results. --- test/automated_test.py | 10 ++++++++-- test/test_util.py | 8 +++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/test/automated_test.py b/test/automated_test.py index 29c05d0e9..a44a998db 100755 --- a/test/automated_test.py +++ b/test/automated_test.py @@ -84,11 +84,17 @@ # Actual list used at runtime, filted by command line args. test_list = [] +# Tests that can fail without causing a non-zero exit code. +IGNORE_FAILURE_TESTS = [ + "Connect Test", + "Gdb Test", + ] + def print_summary(test_list, result_list, test_time, output_file=None): for test in test_list: test.print_perf_info(result_list, output_file=output_file) - Test.print_results(result_list, output_file=output_file) + Test.print_results(result_list, output_file=output_file, ignored=IGNORE_FAILURE_TESTS) print("", file=output_file) print("Test Time: %.3f" % test_time, file=output_file) if Test.all_tests_pass(result_list): @@ -416,7 +422,7 @@ def main(): print_summary(test_list, result_list, test_time, output_file) generate_xml_results(result_list) - exit_val = 0 if Test.all_tests_pass(result_list) else -1 + exit_val = 0 if Test.all_tests_pass(result_list, ignored=IGNORE_FAILURE_TESTS) else -1 exit(exit_val) #TODO - check if any threads are still running? diff --git a/test/test_util.py b/test/test_util.py index e3c644557..f83f92b31 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -265,7 +265,7 @@ def print_perf_info(self, result_list, output_file=None): pass @staticmethod - def print_results(result_list, output_file=None): + def print_results(result_list, output_file=None, ignored=[]): msg_format_str = "{:<15}{:<21}{:<15}{:<15}" print("\n\n------ TEST RESULTS ------") print(msg_format_str .format("Target", "Test", "Result", "Time"), @@ -273,16 +273,18 @@ def print_results(result_list, output_file=None): print("", file=output_file) for result in result_list: status_str = "Pass" if result.passed else "Fail" + if not result.passed and result.test.name in ignored: + status_str += " [ignored]" print(msg_format_str.format(result.board, result.test.name, status_str, "%.3f" % result.time), file=output_file) @staticmethod - def all_tests_pass(result_list): + def all_tests_pass(result_list, ignored=[]): passed = True for result in result_list: - if not result.passed: + if not result.passed and result.test.name not in ignored: passed = False break if len(result_list) <= 0: From 43da56d6a3e8dcdca9ae750f09a0f3f587966219 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 19 Dec 2021 19:28:36 -0600 Subject: [PATCH 053/123] test: automated_test: add missing user_script_test. --- test/automated_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/automated_test.py b/test/automated_test.py index a44a998db..bc442432c 100755 --- a/test/automated_test.py +++ b/test/automated_test.py @@ -55,6 +55,7 @@ from commands_test import CommandsTest from commander_test import CommanderTest from probeserver_test import ProbeserverTest +from user_script_test import UserScriptTest XML_RESULTS_TEMPLATE = "test_results{}.xml" LOG_FILE_TEMPLATE = "automated_test_result{}.txt" @@ -79,6 +80,7 @@ CommandsTest(), CommanderTest(), ProbeserverTest(), + UserScriptTest(), ] # Actual list used at runtime, filted by command line args. From de9ff18ac85adc6c92bb7d9df601e826e9ad9240 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 15 Mar 2021 17:12:05 -0500 Subject: [PATCH 054/123] test: add Azure pipeline for functional tests. --- .azure/functional-test-pipeline.yml | 116 ++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 .azure/functional-test-pipeline.yml diff --git a/.azure/functional-test-pipeline.yml b/.azure/functional-test-pipeline.yml new file mode 100644 index 000000000..453df7505 --- /dev/null +++ b/.azure/functional-test-pipeline.yml @@ -0,0 +1,116 @@ +# pyocd functional test pipeline + +trigger: + branches: + include: + - '*' + paths: + include: + - 'pyocd' + - 'test' + +pr: + branches: + include: + - '*' + paths: + include: + - 'pyocd' + - 'test' + +jobs: +- job: functional_tests + displayName: "Functional tests" + timeoutInMinutes: 200 + + # Fully clean workspaces before testing. + workspace: + clean: all + + # Matrix. + strategy: + matrix: + Mac: + test_os: Darwin + Linux: + test_os: Linux + Win: + test_os: Windows_NT + + # Agent pool and requirements. + pool: + name: functional-test + demands: + - Agent.OS -equals $(test_os) + + steps: + # Configure shallow checkout. + - checkout: self + fetchDepth: 1 + + # Linux/Mac: Activate venv and install dependencies. + - script: | + python3 -m venv .venv + source .venv/bin/activate + python -m pip install --upgrade pip setuptools wheel + pip install .[test] + name: install_dependencies_posix + displayName: 'Install dependencies (Posix)' + condition: and(in(variables['agent.os'], 'Darwin', 'Linux'), succeeded()) + + # Windows: Activate venv and install dependencies. + - script: | + python -m venv .venv + call .venv\Scripts\activate.bat + python -m pip install --upgrade pip setuptools wheel + pip install .[test] + name: install_dependencies_win + displayName: 'Install dependencies (Win)' + condition: and(eq(variables['agent.os'], 'Windows_NT'), succeeded()) + + # Download pyocd.yaml config file into test directory. + - task: UniversalPackages@0 + name: install_test_config + inputs: + command: download + vstsFeed: 'pyocd/config' + vstsFeedPackage: 'pyocd-test-config' + vstsPackageVersion: '*' + downloadDirectory: 'test' + displayName: 'Install test config' + + # Linux/Mac: Activate venv and run automated test suite. + - script: | + source .venv/bin/activate + cd test + python ./automated_test.py --quiet + name: run_tests_posix + displayName: 'Run tests (Posix)' + condition: and(in(variables['agent.os'], 'Darwin', 'Linux'), succeeded()) + + # Windows: Activate venv and run automated test suite. + - script: | + call .venv\Scripts\activate.bat + cd test + python automated_test.py --quiet + name: run_tests_win + displayName: 'Run tests (Win)' + condition: and(eq(variables['agent.os'], 'Windows_NT'), succeeded()) + + # Publish JUnit-format test results. + - task: PublishTestResults@2 + inputs: + testResultsFiles: test/output/*.xml + testRunTitle: "$(Agent.OS) functional tests" + platform: "$(Agent.OS)" # Doesn't show up anywhere in Azure UI. + condition: succeededOrFailed() + displayName: "Publish test results" + + # Publish test outputs. + - task: PublishPipelineArtifact@1 + inputs: + targetPath: "test/output" + artifactName: outputs_$(test_os) + condition: succeededOrFailed() + displayName: "Publish test output files" + From 2b8b273b65eff01fac3ad9a7db517a641ebd96b2 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 19 Dec 2021 18:15:56 -0600 Subject: [PATCH 055/123] docs: update readme and automated_tests with AZP info and full functional test list. --- README.md | 4 +++- docs/automated_tests.md | 23 +++++++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index ebd365b5f..5c0e08ce9 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,14 @@ pyOCD ===== -[\[pyocd.io\]](https://pyocd.io/) [\[Docs\]](https://pyocd.io/docs) [\[Slack\]](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) [\[Mailing list\]](https://groups.google.com/g/pyocd) +[\[pyocd.io\]](https://pyocd.io/) [\[Docs\]](https://pyocd.io/docs) [\[Slack\]](https://join.slack.com/t/pyocd/shared_invite/zt-wmy3zvg5-nRLj1GBWYh708TVfIx9Llg) [\[Mailing list\]](https://groups.google.com/g/pyocd) [\[CI results\]](https://dev.azure.com/pyocd/pyocd/_build?definitionId=1&_a=summary) + + + + + diff --git a/pyocd/core/options.py b/pyocd/core/options.py index c64d2873f..03f51140f 100644 --- a/pyocd/core/options.py +++ b/pyocd/core/options.py @@ -39,6 +39,9 @@ "Whether the CMSIS-DAP probe backend will use deferred transfers for improved performance."), OptionInfo('cmsis_dap.limit_packets', bool, False, "Restrict CMSIS-DAP backend to using a single in-flight command at a time."), + OptionInfo('cmsis_dap.prefer_v1', bool, False, + "If a device provides both CMSIS-DAP v1 and v2 interfaces, use the v1 interface in preference of v2. " + "Normal behaviour is to prefer the v2 interface. This option is primarily intended for testing."), OptionInfo('commander.history_length', int, 1000, "Number of entries in the pyOCD Commander command history. Set to -1 for unlimited. Default is 1000."), OptionInfo('config_file', str, None, diff --git a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py index bee56a335..0c2907426 100644 --- a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py +++ b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py @@ -70,11 +70,18 @@ def _get_interfaces(): # Get CMSIS-DAPv2 interfaces. v2_interfaces = INTERFACE[USB_BACKEND_V2].get_all_connected_interfaces() - # Prefer v2 over v1 if a device provides both. - devices_in_both = [v1 for v1 in v1_interfaces for v2 in v2_interfaces - if _get_unique_id(v1) == _get_unique_id(v2)] - for dev in devices_in_both: - v1_interfaces.remove(dev) + # Prefer v2 over v1 if a device provides both, unless the 'cmsis_dap.prefer_v1' option is set. + prefer_v1 = session.Session.get_current().options.get('cmsis_dap.prefer_v1') + if prefer_v1: + devices_in_both = [v2 for v2 in v2_interfaces for v1 in v1_interfaces + if _get_unique_id(v1) == _get_unique_id(v2)] + for dev in devices_in_both: + v2_interfaces.remove(dev) + else: + devices_in_both = [v1 for v1 in v1_interfaces for v2 in v2_interfaces + if _get_unique_id(v1) == _get_unique_id(v2)] + for dev in devices_in_both: + v1_interfaces.remove(dev) # Return the combined list. return v1_interfaces + v2_interfaces From 0446fed6749d926d9c781f9fbb34492659740b08 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 15 Dec 2021 16:07:39 -0600 Subject: [PATCH 062/123] CMSIS-DAP: pydapaccess: backend: timeouts for reads. --- pyocd/probe/pydapaccess/interface/interface.py | 4 +++- pyocd/probe/pydapaccess/interface/pyusb_backend.py | 13 +++++++++---- .../pydapaccess/interface/pyusb_v2_backend.py | 14 +++++++++----- .../pydapaccess/interface/pywinusb_backend.py | 11 +++++------ 4 files changed, 26 insertions(+), 16 deletions(-) diff --git a/pyocd/probe/pydapaccess/interface/interface.py b/pyocd/probe/pydapaccess/interface/interface.py index 56c078ce9..8a95afd1b 100644 --- a/pyocd/probe/pydapaccess/interface/interface.py +++ b/pyocd/probe/pydapaccess/interface/interface.py @@ -17,6 +17,8 @@ class Interface(object): + DEFAULT_READ_TIMEOUT = 20 + def __init__(self): self.vid = 0 self.pid = 0 @@ -44,7 +46,7 @@ def close(self): def write(self, data): return - def read(self, size=-1, timeout=-1): + def read(self, timeout=DEFAULT_READ_TIMEOUT): return def get_info(self): diff --git a/pyocd/probe/pydapaccess/interface/pyusb_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_backend.py index dd1c4bc7a..82aa562b0 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_backend.py @@ -19,7 +19,6 @@ import logging import threading -from time import sleep import platform import errno @@ -31,6 +30,7 @@ generate_device_unique_id, ) from ..dap_access_api import DAPAccessIntf +from ....utility.timeout import Timeout LOG = logging.getLogger(__name__) TRACE = LOG.getChild("trace") @@ -210,11 +210,16 @@ def write(self, data): self.ep_out.write(data) - def read(self): + def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): """! @brief Read data on the IN endpoint associated to the HID interface """ - while len(self.rcv_data) == 0: - sleep(0) + # Spin for a while if there's not data available yet. 100 µs sleep between checks. + with Timeout(timeout, sleeptime=0.0001) as t_o: + while t_o.check(): + if len(self.rcv_data) != 0: + break + else: + raise DAPAccessIntf.DeviceError(f"Timeout reading from device {self.serial_number}") if self.rcv_data[0] is None: raise DAPAccessIntf.DeviceError("Device %s read thread exited" % diff --git a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py index 22a041fdd..5245c438f 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py @@ -18,7 +18,6 @@ import logging import threading -from time import sleep import errno import platform @@ -32,6 +31,7 @@ ) from ..dap_access_api import DAPAccessIntf from ... import common +from ....utility.timeout import Timeout LOG = logging.getLogger(__name__) TRACE = LOG.getChild("trace") @@ -221,12 +221,16 @@ def write(self, data): self.read_sem.release() self.ep_out.write(data) - #logging.debug('sent: %s', data) - def read(self): + def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): """! @brief Read data on the IN endpoint.""" - while len(self.rcv_data) == 0: - sleep(0) + # Spin for a while if there's not data available yet. 100 µs sleep between checks. + with Timeout(timeout, sleeptime=0.0001) as t_o: + while t_o.check(): + if len(self.rcv_data) != 0: + break + else: + raise DAPAccessIntf.DeviceError(f"Timeout reading from device {self.serial_number}") if self.rcv_data[0] is None: raise DAPAccessIntf.DeviceError("Device %s read thread exited unexpectedly" % self.serial_number) diff --git a/pyocd/probe/pydapaccess/interface/pywinusb_backend.py b/pyocd/probe/pydapaccess/interface/pywinusb_backend.py index 1549309f8..76f44bb17 100644 --- a/pyocd/probe/pydapaccess/interface/pywinusb_backend.py +++ b/pyocd/probe/pydapaccess/interface/pywinusb_backend.py @@ -17,7 +17,6 @@ import logging import collections -from time import sleep from .interface import Interface from .common import ( @@ -71,7 +70,7 @@ def open(self): # Note - this operation must be retried since # other instances of pyOCD listing board can prevent # opening this device with exclusive access. - with Timeout(OPEN_TIMEOUT_S) as t_o: + with Timeout(OPEN_TIMEOUT_S, sleeptime=0.25) as t_o: while t_o.check(): # Attempt to open the device try: @@ -151,14 +150,14 @@ def write(self, data): data.extend([0] * (self.packet_size - len(data))) self.report.send([0] + data) - def read(self, timeout=20.0): + def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): """! @brief Read data on the IN endpoint associated to the HID interface """ - with Timeout(timeout) as t_o: + # Spin for a while if there's not data available yet. 100 µs sleep between checks. + with Timeout(timeout, sleeptime=0.0001) as t_o: while t_o.check(): if len(self.rcv_data): break - sleep(0) else: # Read operations should typically take ~1-2ms. # If this exception occurs, then it could indicate @@ -167,7 +166,7 @@ def read(self, timeout=20.0): # 2. CMSIS-DAP firmware problem cause a dropped read or write # 3. CMSIS-DAP is performing a long operation or is being # halted in a debugger - raise DAPAccessIntf.DeviceError("Read timed out") + raise DAPAccessIntf.DeviceError(f"Timeout reading from device {self.serial_number}") # Trace when the higher layer actually gets a packet previously read. if TRACE.isEnabledFor(logging.DEBUG): From bddbd19b1544f855ee120d6e5a88d818c26ae595 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 15 Dec 2021 16:11:40 -0600 Subject: [PATCH 063/123] CMSIS-DAP: pydapaccess: hidapi_backend: read thread, max packet count limit. This change introduces a background read thread for the HidApiUSB backend for CMSIS-DAP. Similar to the other backends, it reads and queues incoming command responses as soon as they are available. Also added a packet count maximum to deal with the arbitrary limit of 30 queued IN reports in the Mac version of hidapi. --- .../pydapaccess/interface/hidapi_backend.py | 69 +++++++++++++++++-- 1 file changed, 64 insertions(+), 5 deletions(-) diff --git a/pyocd/probe/pydapaccess/interface/hidapi_backend.py b/pyocd/probe/pydapaccess/interface/hidapi_backend.py index e2639ffed..fd118b20e 100644 --- a/pyocd/probe/pydapaccess/interface/hidapi_backend.py +++ b/pyocd/probe/pydapaccess/interface/hidapi_backend.py @@ -15,8 +15,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import logging import six +import threading from .interface import Interface from .common import ( @@ -25,6 +27,7 @@ ) from ..dap_access_api import DAPAccessIntf from ....utility.compatibility import to_str_safe +from ....utility.timeout import Timeout LOG = logging.getLogger(__name__) TRACE = LOG.getChild("trace") @@ -43,11 +46,23 @@ class HidApiUSB(Interface): isAvailable = IS_AVAILABLE + HIDAPI_MAX_PACKET_COUNT = 30 + def __init__(self): - super(HidApiUSB, self).__init__() + super().__init__() # Vendor page and usage_id = 2 self.device = None self.device_info = None + self.thread = None + self.read_sem = threading.Semaphore(0) + self.closed_event = threading.Event() + self.received_data = collections.deque() + + def set_packet_count(self, count): + # hidapi for macos has an arbitrary limit on the number of packets it will queue for reading. + # Even though we have a read thread, it doesn't hurt to limit the packet count since the limit + # is fairly high. + self.packet_count = min(count, self.HIDAPI_MAX_PACKET_COUNT) def open(self): try: @@ -55,6 +70,29 @@ def open(self): except IOError as exc: raise DAPAccessIntf.DeviceError("Unable to open device: " + str(exc)) from exc + self.closed_event.clear() + + # Start RX thread + self.thread = threading.Thread(target=self.rx_task) + self.thread.daemon = True + self.thread.start() + + def rx_task(self): + try: + while not self.closed_event.is_set(): + self.read_sem.acquire() + if not self.closed_event.is_set(): + read_data = self.device.read(self.packet_size) + + if TRACE.isEnabledFor(logging.DEBUG): + # Strip off trailing zero bytes to reduce clutter. + TRACE.debug(" USB IN < (%d) %s", len(read_data), ' '.join([f'{i:02x}' for i in bytes(read_data).rstrip(b'\x00')])) + + self.received_data.append(read_data) + finally: + # Set last element of rcv_data to None on exit + self.received_data.append(None) + @staticmethod def get_all_connected_interfaces(): """! @brief Returns all the connected devices with CMSIS-DAP in the name. @@ -109,19 +147,40 @@ def write(self, data): if TRACE.isEnabledFor(logging.DEBUG): TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) data.extend([0] * (self.packet_size - len(data))) + self.read_sem.release() self.device.write([0] + data) - def read(self, timeout=-1): + def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): """! @brief Read data on the IN endpoint associated to the HID interface """ - data = self.device.read(self.packet_size) + # Spin for a while if there's not data available yet. 100 µs sleep between checks. + with Timeout(timeout, sleeptime=0.0001) as t_o: + while t_o.check(): + if len(self.received_data) != 0: + break + else: + raise DAPAccessIntf.DeviceError(f"Timeout reading from device {self.serial_number}") + + if self.received_data[0] is None: + raise DAPAccessIntf.DeviceError(f"Device {self.serial_number} read thread exited") + + # Trace when the higher layer actually gets a packet previously read. if TRACE.isEnabledFor(logging.DEBUG): # Strip off trailing zero bytes to reduce clutter. - TRACE.debug(" USB IN < (%d) %s", len(data), ' '.join([f'{i:02x}' for i in bytes(data).rstrip(b'\x00')])) - return data + TRACE.debug(" USB RD < (%d) %s", len(self.received_data[0]), + ' '.join([f'{i:02x}' for i in bytes(self.received_data[0]).rstrip(b'\x00')])) + + return self.received_data.popleft() + def close(self): """! @brief Close the interface """ + assert not self.closed_event.is_set() + LOG.debug("closing interface") + self.closed_event.set() + self.read_sem.release() + self.thread.join() + self.thread = None self.device.close() From b0e8de8db7b5fd3e66fac54d816949f74dd56716 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 20 Dec 2021 23:26:49 +0000 Subject: [PATCH 064/123] AZP: fix pyocd.yaml artifact access from fork PR builds (#1273) * azp: enable verbose logging for config artifact download. * azp: expand path filters to .azure/ and package metadata files. --- .azure/functional-test-pipeline.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.azure/functional-test-pipeline.yml b/.azure/functional-test-pipeline.yml index 453df7505..5464118c8 100644 --- a/.azure/functional-test-pipeline.yml +++ b/.azure/functional-test-pipeline.yml @@ -6,8 +6,12 @@ trigger: - '*' paths: include: + - '.azure' - 'pyocd' - 'test' + - 'pyproject.toml' + - 'setup.cfg' + - 'setup.py' pr: branches: @@ -15,8 +19,12 @@ pr: - '*' paths: include: + - '.azure' - 'pyocd' - 'test' + - 'pyproject.toml' + - 'setup.cfg' + - 'setup.py' jobs: - job: functional_tests @@ -77,6 +85,7 @@ jobs: vstsFeedPackage: 'pyocd-test-config' vstsPackageVersion: '*' downloadDirectory: 'test' + verbosity: 'debug' displayName: 'Install test config' # Linux/Mac: Activate venv and run automated test suite. From c72c3914916b7f945da731257e4a9038a01948b0 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 24 Dec 2021 17:55:21 +0000 Subject: [PATCH 065/123] CMSIS-DAP: hidapi_backend: don't use receive thread on Windows. (#1277) With the receive thread running on Windows, there are packet corruption errors. The receive thread also seems to not terminate in all cases. --- .../pydapaccess/interface/hidapi_backend.py | 45 ++++++++++++++----- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/pyocd/probe/pydapaccess/interface/hidapi_backend.py b/pyocd/probe/pydapaccess/interface/hidapi_backend.py index fd118b20e..f0fd9abe5 100644 --- a/pyocd/probe/pydapaccess/interface/hidapi_backend.py +++ b/pyocd/probe/pydapaccess/interface/hidapi_backend.py @@ -17,6 +17,7 @@ import collections import logging +import platform import six import threading @@ -40,6 +41,10 @@ else: IS_AVAILABLE = True +# OS flags. +_IS_DARWIN = (platform.system() == 'Darwin') +_IS_WINDOWS = (platform.system() == 'Windows') + class HidApiUSB(Interface): """! @brief CMSIS-DAP USB interface class using hidapi backend. """ @@ -62,7 +67,9 @@ def set_packet_count(self, count): # hidapi for macos has an arbitrary limit on the number of packets it will queue for reading. # Even though we have a read thread, it doesn't hurt to limit the packet count since the limit # is fairly high. - self.packet_count = min(count, self.HIDAPI_MAX_PACKET_COUNT) + if _IS_DARWIN: + count = min(count, self.HIDAPI_MAX_PACKET_COUNT) + self.packet_count = count def open(self): try: @@ -70,12 +77,15 @@ def open(self): except IOError as exc: raise DAPAccessIntf.DeviceError("Unable to open device: " + str(exc)) from exc - self.closed_event.clear() + # Windows does not use the receive thread because it causes packet corruption for some reason. + if not _IS_WINDOWS: + # Make certain the closed event is clear. + self.closed_event.clear() - # Start RX thread - self.thread = threading.Thread(target=self.rx_task) - self.thread.daemon = True - self.thread.start() + # Start RX thread + self.thread = threading.Thread(target=self.rx_task) + self.thread.daemon = True + self.thread.start() def rx_task(self): try: @@ -147,12 +157,24 @@ def write(self, data): if TRACE.isEnabledFor(logging.DEBUG): TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) data.extend([0] * (self.packet_size - len(data))) - self.read_sem.release() + if not _IS_WINDOWS: + self.read_sem.release() self.device.write([0] + data) def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): """! @brief Read data on the IN endpoint associated to the HID interface """ + # Windows doesn't use the read thread, so read directly. + if _IS_WINDOWS: + read_data = self.device.read(self.packet_size) + + if TRACE.isEnabledFor(logging.DEBUG): + # Strip off trailing zero bytes to reduce clutter. + TRACE.debug(" USB IN < (%d) %s", len(read_data), ' '.join([f'{i:02x}' for i in bytes(read_data).rstrip(b'\x00')])) + + return read_data + + # Other OSes use the read thread, so we check for and pull data from the queue. # Spin for a while if there's not data available yet. 100 µs sleep between checks. with Timeout(timeout, sleeptime=0.0001) as t_o: while t_o.check(): @@ -179,8 +201,9 @@ def close(self): assert not self.closed_event.is_set() LOG.debug("closing interface") - self.closed_event.set() - self.read_sem.release() - self.thread.join() - self.thread = None + if not _IS_WINDOWS: + self.closed_event.set() + self.read_sem.release() + self.thread.join() + self.thread = None self.device.close() From 9d2bc426daab30477ed9fadcaee511520bf26261 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 24 Dec 2021 21:54:11 +0000 Subject: [PATCH 066/123] core: session: fix probe specific config issues. (#1278) - Probe-specific config takes precedence over global config. - Warn if multiple probe uids in config matche the probe, and only apply the first matching config. --- pyocd/core/session.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/pyocd/core/session.py b/pyocd/core/session.py index f0d41e784..f298d9303 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -155,17 +155,24 @@ def __init__(self, probe, auto_open=True, options=None, option_defaults=None, ** self._project_dir = os.path.abspath(os.path.expanduser(self.options.get('project_dir'))) LOG.debug("Project directory: %s", self.project_dir) - # Apply common configuration settings from the config file. + # Load options from the config file. config = self._get_config() - probesConfig = config.pop('probes', None) - self._options.add_back(config) + probes_config = config.pop('probes', None) - # Pick up any config file options for this board. - if (probe is not None) and (probesConfig is not None): - for uid, settings in probesConfig.items(): + # Pick up any config file options for this probe. These have priority over global options. + if (probe is not None) and (probes_config is not None): + did_match_probe = False + for uid, settings in probes_config.items(): if str(uid).lower() in probe.unique_id.lower(): - LOG.info("Using config settings for probe %s" % (probe.unique_id)) + if did_match_probe: + LOG.warning("Multiple probe config options match probe ID %s", probe.unique_id) + break + LOG.info("Using config options for probe %s" % (probe.unique_id)) self._options.add_back(settings) + did_match_probe = True + + # Add global config options. + self._options.add_back(config) # Merge in lowest priority options. self._options.add_back(option_defaults) From 0acb568b12d615bd1507b80b0beedb28d925ef6c Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 26 Nov 2021 14:39:29 -0600 Subject: [PATCH 067/123] types: annotate several utilities modules. --- pyocd/utility/autoflush.py | 18 ++++++++---- pyocd/utility/cmdline.py | 6 ++-- pyocd/utility/columns.py | 14 ++++++---- pyocd/utility/compatibility.py | 9 +++--- pyocd/utility/concurrency.py | 5 ++-- pyocd/utility/conversion.py | 51 ++++++++++++++++++---------------- pyocd/utility/graph.py | 49 +++++++++++++++++--------------- pyocd/utility/hex.py | 14 ++++++++-- pyocd/utility/mask.py | 35 +++++++++++------------ pyocd/utility/timeout.py | 13 +++++---- 10 files changed, 123 insertions(+), 91 deletions(-) diff --git a/pyocd/utility/autoflush.py b/pyocd/utility/autoflush.py index 2330eda01..0f38b86f4 100644 --- a/pyocd/utility/autoflush.py +++ b/pyocd/utility/autoflush.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,9 +15,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import (Any, TYPE_CHECKING) + from ..core import exceptions -class Autoflush(object): +if TYPE_CHECKING: + from ..core.target import Target + from types import TracebackType + +class Autoflush: """! @brief Context manager for performing flushes. Pass a Target instance to the constructor, and when the context exits, the target will be @@ -27,7 +34,7 @@ class Autoflush(object): due to Python's dynamic dispatch. """ - def __init__(self, target): + def __init__(self, target: "Target") -> None: """! @brief Constructor. @param self The object. @@ -36,10 +43,11 @@ def __init__(self, target): """ self._target = target - def __enter__(self): + def __enter__(self) -> "Autoflush": return self - def __exit__(self, type, value, traceback): - if type is None or not issubclass(type, exceptions.TransferError): + def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> bool: + if exc_type is None or not issubclass(exc_type, exceptions.TransferError): self._target.flush() return False + diff --git a/pyocd/utility/cmdline.py b/pyocd/utility/cmdline.py index fc1057d07..2518ca7d0 100644 --- a/pyocd/utility/cmdline.py +++ b/pyocd/utility/cmdline.py @@ -16,7 +16,7 @@ # limitations under the License. import logging -from typing import (Any, Dict, Iterable, List, Union) +from typing import (Any, Dict, Iterable, List, Optional, Union) from ..core.target import Target from ..core.options import OPTIONS_INFO @@ -186,7 +186,7 @@ def convert_session_options(option_list: Iterable[str]) -> Dict[str, Any]: return options ## Map to convert from reset type names to enums. -RESET_TYPE_MAP = { +RESET_TYPE_MAP: Dict[str, Optional[Target.ResetType]] = { 'default': None, 'hw': Target.ResetType.HW, 'sw': Target.ResetType.SW, @@ -200,7 +200,7 @@ def convert_session_options(option_list: Iterable[str]) -> Dict[str, Any]: 'emulated': Target.ResetType.SW_EMULATED, } -def convert_reset_type(value: str) -> Target.ResetType: +def convert_reset_type(value: str) -> Optional[Target.ResetType]: """! @brief Convert a reset_type session option value to the Target.ResetType enum. @param value The value of the reset_type session option. @exception ValueError Raised if an unknown reset_type value is passed. diff --git a/pyocd/utility/columns.py b/pyocd/utility/columns.py index 833d4c52e..8336976f4 100644 --- a/pyocd/utility/columns.py +++ b/pyocd/utility/columns.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +18,11 @@ import sys import logging from shutil import get_terminal_size +from typing import (IO, Iterable, List, Optional, Tuple) LOG = logging.getLogger(__name__) -class ColumnFormatter(object): +class ColumnFormatter: """! @brief Formats a set of values in multiple columns. The value_list must be a list of bi-tuples (name, value) sorted in the desired display order. @@ -29,7 +31,7 @@ class ColumnFormatter(object): will be printed in column major order. """ - def __init__(self, maxwidth=None, inset=2): + def __init__(self, maxwidth: Optional[int] = None, inset: int = 2) -> None: """! @brief Constructor. @param self The object. @param maxwidth Number of characters to which the output width must be constrained. If not provided, @@ -39,11 +41,11 @@ def __init__(self, maxwidth=None, inset=2): """ self._inset = inset self._term_width = (maxwidth or get_terminal_size()[0]) - inset * 4 - self._items = [] + self._items: List[Tuple[str, str]] = [] self._max_name_width = 0 self._max_value_width = 0 - def add_items(self, item_list): + def add_items(self, item_list: Iterable[Tuple[str, str]]) -> None: """! @brief Add items to the output. @param self The object. @param item_list Must be a list of bi-tuples (name, value) sorted in the desired display order. @@ -55,7 +57,7 @@ def add_items(self, item_list): self._max_name_width = max(self._max_name_width, len(name)) self._max_value_width = max(self._max_value_width, len(value)) - def format(self): + def format(self) -> str: """! @brief Return the formatted columns as a string. @param self The object. @return String containing the output of the column printer. @@ -78,7 +80,7 @@ def format(self): txt += "\n" return txt - def write(self, output_file=None): + def write(self, output_file: IO[str] = None) -> None: """! @brief Write the formatted columns to stdout or the specified file. @param self The object. @param output_file Optional file to which the column printer output will be written. If no specified, diff --git a/pyocd/utility/compatibility.py b/pyocd/utility/compatibility.py index 9b7a35d37..0b141e8b4 100644 --- a/pyocd/utility/compatibility.py +++ b/pyocd/utility/compatibility.py @@ -16,6 +16,7 @@ # limitations under the License. import functools +from typing import Union # iter_single_bytes() returns an iterator over a bytes object that produces # single-byte bytes objects for each byte in the passed in value. Normally on @@ -26,8 +27,8 @@ # to_bytes_safe() converts a unicode string to a bytes object by encoding as # latin-1. It will also accept a value that is already a bytes object and # return it unmodified. -def to_bytes_safe(v): - if type(v) is str: +def to_bytes_safe(v: Union[str, bytes]) -> bytes: + if isinstance(v, str): return v.encode('utf-8') else: return v @@ -35,8 +36,8 @@ def to_bytes_safe(v): # to_str_safe() converts a bytes object to a unicode string by decoding from # latin-1. It will also accept a value that is already a str object and # return it unmodified. -def to_str_safe(v): - if type(v) is str: +def to_str_safe(v: Union[str, bytes]) -> str: + if isinstance(v, str): return v else: return v.decode('utf-8') diff --git a/pyocd/utility/concurrency.py b/pyocd/utility/concurrency.py index d92e657f0..30f6fd57e 100644 --- a/pyocd/utility/concurrency.py +++ b/pyocd/utility/concurrency.py @@ -15,14 +15,15 @@ # limitations under the License. from functools import wraps +from typing import (Any, Callable) -def locked(func): +def locked(func: Callable) -> Callable: """! @brief Decorator to automatically lock a method of a class. The class is required to have `lock()` and `unlock()` methods. """ @wraps(func) - def _locking(self, *args, **kwargs): + def _locking(self, *args: Any, **kwargs: Any) -> Any: try: self.lock() return func(self, *args, **kwargs) diff --git a/pyocd/utility/conversion.py b/pyocd/utility/conversion.py index bcdfd6314..dc891c494 100644 --- a/pyocd/utility/conversion.py +++ b/pyocd/utility/conversion.py @@ -17,11 +17,14 @@ import struct import binascii +from typing import (Any, Iterator, Sequence, Tuple, cast) import six from .mask import align_up -def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00): +ByteList = Sequence[int] + +def byte_list_to_nbit_le_list(data: ByteList, bitwidth: int, pad: int = 0x00) -> Sequence[int]: """! @brief Convert a list of bytes to a list of n-bit integers (little endian) If the length of the data list is not a multiple of `bitwidth` // 8, then the pad value is used @@ -44,7 +47,7 @@ def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00): res.append(sum((padded_data[i] << (i * 8)) for i in range(bytewidth))) return res -def nbit_le_list_to_byte_list(data, bitwidth): +def nbit_le_list_to_byte_list(data: Sequence[int], bitwidth: int) -> ByteList: """! @brief Convert a list of n-bit values into a byte list. @param data List of n-bit values. @@ -53,7 +56,7 @@ def nbit_le_list_to_byte_list(data, bitwidth): """ return [(x >> shift) & 0xff for x in data for shift in range(0, bitwidth, 8)] -def byte_list_to_u32le_list(data, pad=0x00): +def byte_list_to_u32le_list(data: ByteList, pad: int = 0x00) -> Sequence[int]: """! @brief Convert a list of bytes to a list of 32-bit integers (little endian) If the length of the data list is not a multiple of 4, then the pad value is used @@ -71,7 +74,7 @@ def byte_list_to_u32le_list(data, pad=0x00): res += byte_list_to_u32le_list(list(data[-remainder:]) + [pad] * padCount) return res -def u32le_list_to_byte_list(data): +def u32le_list_to_byte_list(data: Sequence[int]) -> ByteList: """! @brief Convert a word array into a byte array""" res = [] for x in data: @@ -81,41 +84,41 @@ def u32le_list_to_byte_list(data): res.append((x >> 24) & 0xff) return res -def u16le_list_to_byte_list(data): +def u16le_list_to_byte_list(data: Sequence[int]) -> ByteList: """! @brief Convert a halfword array into a byte array""" byteData = [] for h in data: byteData.extend([h & 0xff, (h >> 8) & 0xff]) return byteData -def byte_list_to_u16le_list(byteData): +def byte_list_to_u16le_list(byteData: ByteList) -> Sequence[int]: """! @brief Convert a byte array into a halfword array""" data = [] for i in range(0, len(byteData), 2): data.append(byteData[i] | (byteData[i + 1] << 8)) return data -def u32_to_float32(data): +def u32_to_float32(data: int) -> float: """! @brief Convert a 32-bit int to an IEEE754 float""" d = struct.pack(">I", data & 0xffff_ffff) return struct.unpack(">f", d)[0] -def float32_to_u32(data): +def float32_to_u32(data: float) -> int: """! @brief Convert an IEEE754 float to a 32-bit int""" d = struct.pack(">f", data) return struct.unpack(">I", d)[0] -def u64_to_float64(data): +def u64_to_float64(data: int) -> float: """! @brief Convert a 64-bit int to an IEEE754 float""" d = struct.pack(">Q", data & 0xffff_ffff_ffff_ffff) return struct.unpack(">d", d)[0] -def float64_to_u64(data): +def float64_to_u64(data: float) -> int: """! @brief Convert an IEEE754 float to a 64-bit int""" d = struct.pack(">d", data) return struct.unpack(">Q", d)[0] -def uint_to_hex_le(value, width): +def uint_to_hex_le(value: int, width: int) -> str: """! @brief Create an n-digit hexadecimal string from an integer value. @param value Integer value to format. @param width The width in bits. @@ -125,7 +128,7 @@ def uint_to_hex_le(value, width): """ return ''.join("%02x" % ((value >> b) & 0xff) for b in range(0, align_up(width, 8), 8)) -def hex_le_to_uint(value, width): +def hex_le_to_uint(value: str, width: int) -> int: """! @brief Create an an integer value from an n-digit hexadecimal string. @param value String consisting of pairs of hex digits with no intervening whitespace. Must have at least enough hex bytes to meet the desired width. The first hex byte is the LSB. @@ -135,61 +138,61 @@ def hex_le_to_uint(value, width): """ return sum((int(value[i:i+2], base=16) << (i * 4)) for i in range(0, align_up(width, 8) // 4, 2)) -def u32_to_hex8le(val): +def u32_to_hex8le(val: int) -> str: """! @brief Create 8-digit hexadecimal string from 32-bit register value""" return uint_to_hex_le(val, 32) -def u64_to_hex16le(val): +def u64_to_hex16le(val: int) -> str: """! @brief Create 16-digit hexadecimal string from 64-bit register value""" return uint_to_hex_le(val, 64) -def hex8_to_u32be(data): +def hex8_to_u32be(data: str) -> int: """! @brief Build 32-bit register value from big-endian 8-digit hexadecimal string @note Endianness in this function name is backwards. """ return hex_le_to_uint(data, 32) -def hex16_to_u64be(data): +def hex16_to_u64be(data: str) -> int: """! @brief Build 64-bit register value from big-endian 16-digit hexadecimal string @note Endianness in this function name is backwards. """ return hex_le_to_uint(data, 64) -def hex8_to_u32le(data): +def hex8_to_u32le(data: str) -> int: """! @brief Build 32-bit register value from little-endian 8-digit hexadecimal string @note Endianness in this function name is backwards. """ return int(data[0:8], 16) -def hex16_to_u64le(data): +def hex16_to_u64le(data: str) -> int: """! @brief Build 64-bit register value from little-endian 16-digit hexadecimal string @note Endianness in this function name is backwards. """ return int(data[0:16], 16) -def byte_to_hex2(val): +def byte_to_hex2(val: int) -> str: """! @brief Create 2-digit hexadecimal string from 8-bit value""" return "%02x" % int(val) -def hex_to_byte_list(data): +def hex_to_byte_list(data: str) -> ByteList: """! @brief Convert string of hex bytes to list of integers""" return list(six.iterbytes(binascii.unhexlify(data))) -def hex_decode(cmd): +def hex_decode(cmd: str) -> bytes: """! @brief Return the binary data represented by the hexadecimal string.""" return binascii.unhexlify(cmd) -def hex_encode(string): +def hex_encode(string: bytes) -> bytes: """! @brief Return the hexadecimal representation of the binary data.""" return binascii.hexlify(string) -def pairwise(iterable): +def pairwise(iterable: Iterator[Any]) -> Iterator[Tuple[Any, Any]]: """! s -> (s0,s1), (s2,s3), (s3, s4), ...""" r = [] for x in iterable: r.append(x) if len(r) == 2: - yield tuple(r) + yield cast(Tuple[Any, Any], tuple(r)) r = [] if len(r) > 0: yield (r[0], r[1]) diff --git a/pyocd/utility/graph.py b/pyocd/utility/graph.py index 9677e58c8..a004b91a1 100644 --- a/pyocd/utility/graph.py +++ b/pyocd/utility/graph.py @@ -15,7 +15,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -class GraphNode(object): +from typing import (Callable, List, Optional, Sequence, Type, Union) + +class GraphNode: """! @brief Simple graph node. All nodes have a parent, which is None for a root node, and zero or more children. @@ -23,40 +25,43 @@ class GraphNode(object): Supports indexing and iteration over children. """ - def __init__(self): + def __init__(self) -> None: """! @brief Constructor.""" - super(GraphNode, self).__init__() - self._parent = None - self._children = [] + super().__init__() + self._parent: Optional[GraphNode] = None + self._children: List[GraphNode] = [] @property - def parent(self): + def parent(self) -> Optional["GraphNode"]: """! @brief This node's parent in the object graph.""" return self._parent @property - def children(self): + def children(self) -> Sequence["GraphNode"]: """! @brief Child nodes in the object graph.""" return self._children @property - def is_leaf(self): + def is_leaf(self) -> bool: """! @brief Returns true if the node has no children.""" return len(self.children) == 0 - def add_child(self, node): + def add_child(self, node: "GraphNode") -> None: """! @brief Link a child node onto this object.""" node._parent = self self._children.append(node) - def find_root(self): + def find_root(self) -> "GraphNode": """! @brief Returns the root node of the object graph.""" root = self while root.parent is not None: root = root.parent return root - def find_children(self, predicate, breadth_first=True): + def find_children(self, + predicate: Callable[["GraphNode"], bool], + breadth_first: bool = True + ) -> Sequence["GraphNode"]: """! @brief Recursively search for children that match a given predicate. @param self @param predicate A callable accepting a single argument for the node to examine. If the @@ -66,25 +71,25 @@ def find_children(self, predicate, breadth_first=True): @param breadth_first Whether to search breadth first. Pass False to search depth first. @returns List of matching child nodes, or an empty list if no matches were found. """ - def _search(node, klass): - results = [] - childrenToExamine = [] + def _search(node: GraphNode): + results: List[GraphNode] = [] + childrenToExamine: List[GraphNode] = [] for child in node.children: if predicate(child): results.append(child) elif not breadth_first: - results.extend(_search(child, klass)) + results.extend(_search(child)) elif breadth_first: childrenToExamine.append(child) if breadth_first: for child in childrenToExamine: - results.extend(_search(child, klass)) + results.extend(_search(child)) return results - return _search(self, predicate) + return _search(self) - def get_first_child_of_type(self, klass): + def get_first_child_of_type(self, klass: Type) -> Optional["GraphNode"]: """! @brief Breadth-first search for a child of the given class. @param self @param klass The class type to search for. The first child at any depth that is an instance @@ -98,7 +103,7 @@ def get_first_child_of_type(self, klass): else: return None - def __getitem__(self, key): + def __getitem__(self, key: Union[int, slice]) -> Union["GraphNode", List["GraphNode"]]: """! @brief Returns the indexed child. Slicing is supported. @@ -109,11 +114,11 @@ def __iter__(self): """! @brief Iterate over the node's children.""" return iter(self.children) - def _dump_desc(self): + def _dump_desc(self) -> str: """! @brief Similar to __repr__ by used for dump_to_str().""" return str(self) - def dump_to_str(self): + def dump_to_str(self) -> str: """! @brief Returns a string describing the object graph.""" def _dump(node, level): @@ -124,6 +129,6 @@ def _dump(node, level): return _dump(self, 0) - def dump(self): + def dump(self) -> None: """! @brief Pretty print the object graph to stdout.""" print(self.dump_to_str()) diff --git a/pyocd/utility/hex.py b/pyocd/utility/hex.py index 4fe7e73a4..a0b09d004 100644 --- a/pyocd/utility/hex.py +++ b/pyocd/utility/hex.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2018-2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +18,14 @@ import sys import string import io +from typing import (IO, Optional, Sequence) from . import conversion ## ASCII printable characters not including whitespace that changes line position. _PRINTABLE = string.digits + string.ascii_letters + string.punctuation + ' ' -def format_hex_width(value, width): +def format_hex_width(value: int, width: int) -> str: """! @brief Formats the value as hex of the specified bit width. @param value Integer value to be formatted. @@ -41,7 +43,13 @@ def format_hex_width(value, width): else: raise ValueError("unrecognized register width (%d)" % width) -def dump_hex_data(data, start_address=0, width=8, output=None, print_ascii=True): +def dump_hex_data( + data: Sequence[int], + start_address: int = 0, + width: int = 8, + output: Optional[IO[str]] = None, + print_ascii: bool = True + ) -> None: """! @brief Prints a canonical hex dump of the given data. Each line of the output consists of an address column, the data as hex, and a printable ASCII @@ -122,7 +130,7 @@ def line_width_in_chars(elements: int) -> int: if width == 8: d = [d] else: - d = conversion.nbit_le_list_to_byte_list([d], width) + d = list(conversion.nbit_le_list_to_byte_list([d], width)) d.reverse() s += "".join((chr(b) if (chr(b) in _PRINTABLE) else '.') for b in d) output.write(" " * (max_line_width - actual_line_width) + " " + s + "|") diff --git a/pyocd/utility/mask.py b/pyocd/utility/mask.py index cb004b347..97dccc76a 100644 --- a/pyocd/utility/mask.py +++ b/pyocd/utility/mask.py @@ -16,8 +16,9 @@ import operator from functools import reduce +from typing import (Any, Optional, Sequence, Tuple, Union) -def bitmask(*args): +def bitmask(*args: Union[int, Sequence[int], Tuple[int, int]]) -> int: """! @brief Returns a mask with specified bit ranges set. An integer mask is generated based on the bits and bit ranges specified by the @@ -54,7 +55,7 @@ def bitmask(*args): return mask -def bit_invert(value, width=32): +def bit_invert(value: int, width: int = 32) -> int: """! @brief Return the bitwise inverted value of the argument given a specified width. @param value Integer value to be inverted. @@ -66,37 +67,37 @@ def bit_invert(value, width=32): invert32 = bit_invert """! @brief Return the 32-bit inverted value of the argument.""" -def bfx(value, msb, lsb): +def bfx(value: int, msb: int, lsb: int) -> int: """! @brief Extract a value from a bitfield.""" mask = bitmask((msb, lsb)) return (value & mask) >> lsb -def bfxw(value, lsb, width): +def bfxw(value: int, lsb: int, width: int) -> int: """! @brief Extract a value from a bitfield given the LSb and width.""" mask = bitmask((lsb + width, lsb)) return (value & mask) >> lsb -def bfi(value, msb, lsb, field): +def bfi(value: int, msb: int, lsb: int, field: int) -> int: """! @brief Change a bitfield value.""" mask = bitmask((msb, lsb)) value &= ~mask value |= (field << lsb) & mask return value -class Bitfield(object): +class Bitfield: """! @brief Represents a bitfield of a register.""" - def __init__(self, msb, lsb=None, name=None): + def __init__(self, msb: int, lsb: Optional[int] = None, name: Optional[str] = None): self._msb = msb self._lsb = lsb if (lsb is not None) else msb self._name = name assert self._msb >= self._lsb @property - def width(self): + def width(self) -> int: return self._msb - self._lsb + 1 - def get(self, value): + def get(self, value: int) -> int: """! @brief Extract the bitfield value from a register value. @param self The Bitfield object. @param value Integer register value. @@ -104,7 +105,7 @@ def get(self, value): """ return bfx(value, self._msb, self._lsb) - def set(self, register_value, field_value): + def set(self, register_value: int, field_value: int) -> int: """! @brief Modified the bitfield in a register value. @param self The Bitfield object. @param register_value Integer register value. @@ -113,10 +114,10 @@ def set(self, register_value, field_value): """ return bfi(register_value, self._msb, self._lsb, field_value) - def __repr__(self): + def __repr__(self) -> str: return "<{}@{:x} name={} {}:{}>".format(self.__class__.__name__, id(self), self._name, self._msb, self._lsb) -def msb(n): +def msb(n: int) -> int: """! @brief Return the bit number of the highest set bit.""" ndx = 0 while ( 1 < n ): @@ -124,7 +125,7 @@ def msb(n): ndx += 1 return ndx -def same(d1, d2): +def same(d1: Sequence[Any], d2: Sequence[Any]) -> bool: """! @brief Test whether two sequences contain the same values. Unlike a simple equality comparison, this function works as expected when the two sequences @@ -138,19 +139,19 @@ def same(d1, d2): return False return True -def align_down(value, multiple): +def align_down(value: int, multiple: int) -> int: """! @brief Return value aligned down to multiple.""" return value // multiple * multiple -def align_up(value, multiple): +def align_up(value: int, multiple: int) -> int: """! @brief Return value aligned up to multiple.""" return (value + multiple - 1) // multiple * multiple -def round_up_div(value, divisor): +def round_up_div(value: int, divisor: int) -> int: """! @brief Return value divided by the divisor, rounding up to the nearest multiple of the divisor.""" return (value + divisor - 1) // divisor -def parity32_high(n): +def parity32_high(n: int) -> int: """! @brief Compute parity over a 32-bit value. This function is intended to be used for computing parity over a 32-bit value transferred in an Arm diff --git a/pyocd/utility/timeout.py b/pyocd/utility/timeout.py index 9577f6721..0fbdbcb68 100644 --- a/pyocd/utility/timeout.py +++ b/pyocd/utility/timeout.py @@ -16,7 +16,10 @@ # limitations under the License. from time import (time, sleep) -from typing import Optional +from typing import (Any, Optional, TYPE_CHECKING) + +if TYPE_CHECKING: + from types import TracebackType class Timeout: """! @brief Timeout helper context manager. @@ -73,12 +76,12 @@ def __init__(self, timeout: Optional[float], sleeptime: float = 0) -> None: self._is_first_check = True self._is_running = False - def __enter__(self): + def __enter__(self) -> "Timeout": self.start() return self - def __exit__(self, exc_type, exc_val, exc_tb): - pass + def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> bool: + return False def start(self) -> None: """! @brief Start or restart the timeout timer. @@ -93,7 +96,7 @@ def start(self) -> None: self._timed_out = False self._is_first_check = True - def clear(self): + def clear(self) -> None: """! @brief Reset the timeout back to initial, non-running state. The timeout can be made to run again by calling start(). From 7f514b28523e155eee17bd96f6b9f058d2604a2c Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 8 Aug 2021 13:40:31 -0500 Subject: [PATCH 068/123] types: annotate core classes. --- pyocd/core/core_registers.py | 95 ++++--- pyocd/core/core_target.py | 47 ++++ pyocd/core/helpers.py | 47 +++- pyocd/core/memory_interface.py | 126 ++++++++-- pyocd/core/memory_map.py | 278 ++++++++++++++------- pyocd/core/options_manager.py | 61 +++-- pyocd/core/plugin.py | 29 ++- pyocd/core/session.py | 179 +++++++------ pyocd/core/soc_target.py | 229 ++++++++++------- pyocd/core/target.py | 112 +++++---- pyocd/core/target_delegate.py | 62 +++-- pyocd/coresight/coresight_target.py | 11 + pyocd/coresight/cortex_m.py | 3 +- pyocd/coresight/cortex_m_core_registers.py | 28 ++- setup.cfg | 1 + 15 files changed, 863 insertions(+), 445 deletions(-) create mode 100644 pyocd/core/core_target.py diff --git a/pyocd/core/core_registers.py b/pyocd/core/core_registers.py index ad6372544..7eb92e518 100644 --- a/pyocd/core/core_registers.py +++ b/pyocd/core/core_registers.py @@ -17,12 +17,17 @@ import logging from copy import copy +from typing import (Any, Callable, Dict, Iterator, List, Optional, Sequence, Set, Union) from ..utility import conversion LOG = logging.getLogger(__name__) -class CoreRegisterInfo(object): +# Core register related types. +CoreRegisterNameOrNumberType = Union[str, int] +CoreRegisterValueType = Union[int, float] + +class CoreRegisterInfo: """! @brief Useful information about a core register. Provides properties for classification of the register, and utilities to convert to and from @@ -34,25 +39,25 @@ class CoreRegisterInfo(object): ## Map of register name to info. # - # This is just a placeholder. The architecture-specific subclass should override the definition. Its - # value is set to None to cause an exception if used. - _NAME_MAP = None + # This is just the type declaration. The architecture-specific subclass must define the attribute, so + # that architectures + _NAME_MAP: Dict[str, "CoreRegisterInfo"] ## Map of register index to info. # # This is just a placeholder. The architecture-specific subclass should override the definition. Its # value is set to None to cause an exception if used. - _INDEX_MAP = None + _INDEX_MAP: Dict[int, "CoreRegisterInfo"] @classmethod - def add_to_map(cls, all_regs): + def add_to_map(cls, all_regs: Sequence["CoreRegisterInfo"]) -> None: """! @brief Build info map from list of CoreRegisterInfo instance.""" for reg in all_regs: cls._NAME_MAP[reg.name] = reg cls._INDEX_MAP[reg.index] = reg @classmethod - def get(cls, reg): + def get(cls, reg: "CoreRegisterNameOrNumberType") -> "CoreRegisterInfo": """! @brief Return the CoreRegisterInfo instance for a register. @param reg Either a register name or internal register number. @return CoreRegisterInfo @@ -67,7 +72,16 @@ def get(cls, reg): except KeyError as err: raise KeyError('unknown core register %s' % reg) from err - def __init__(self, name, index, bitsize, reg_type, reg_group, reg_num=None, feature=None): + def __init__( + self, + name: str, + index: int, + bitsize: int, + reg_type: str, + reg_group: str, + reg_num: Optional[int] = None, + feature: Optional[str] = None + ) -> None: """! @brief Constructor.""" self._name = name self._index = index @@ -78,65 +92,66 @@ def __init__(self, name, index, bitsize, reg_type, reg_group, reg_num=None, feat self._gdb_feature = feature @property - def name(self): + def name(self) -> str: """! @brief Name of the register. Always lowercase.""" return self._name @property - def index(self): + def index(self) -> int: """! @brief Integer index of the register.""" return self._index @property - def bitsize(self): + def bitsize(self) -> int: """! @brief Bit width of the register..""" return self._bitsize @property - def group(self): + def group(self) -> str: """! @brief Named group the register is contained within.""" return self._group @property - def gdb_type(self): + def gdb_type(self) -> str: """! @brief Value type specific to gdb.""" return self._gdb_type @property - def gdb_regnum(self): + def gdb_regnum(self) -> Optional[int]: """! @brief Register number specific to gdb.""" return self._gdb_regnum @property - def gdb_feature(self): + def gdb_feature(self) -> Optional[str]: """! @brief GDB architecture feature to which the register belongs.""" return self._gdb_feature @property - def is_float_register(self): + def is_float_register(self) -> bool: """! @brief Returns true for registers single or double precision float registers (but not, say, FPSCR).""" return self.is_single_float_register or self.is_double_float_register @property - def is_single_float_register(self): + def is_single_float_register(self) -> bool: """! @brief Returns true for registers holding single-precision float values""" return self.gdb_type == 'ieee_single' @property - def is_double_float_register(self): + def is_double_float_register(self) -> bool: """! @brief Returns true for registers holding double-precision float values""" return self.gdb_type == 'ieee_double' - def from_raw(self, value): + def from_raw(self, value: int) -> "CoreRegisterValueType": """! @brief Convert register value from raw (integer) to canonical type.""" # Convert int to float. if self.is_single_float_register: - value = conversion.u32_to_float32(value) + return conversion.u32_to_float32(value) elif self.is_double_float_register: - value = conversion.u64_to_float64(value) - return value + return conversion.u64_to_float64(value) + else: + return value - def to_raw(self, value): + def to_raw(self, value: "CoreRegisterValueType") -> int: """! @brief Convert register value from canonical type to raw (integer).""" # Convert float to int. if isinstance(value, float): @@ -148,20 +163,20 @@ def to_raw(self, value): raise TypeError("non-float register value has float type") return value - def clone(self): + def clone(self) -> "CoreRegisterInfo": """! @brief Return a copy of the register info.""" return copy(self) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, CoreRegisterInfo) and (self.index == other.index) - def __hash__(self): + def __hash__(self) -> int: return hash(self.index) - def __repr__(self): + def __repr__(self) -> str: return "<{}@{:#x} {}={} {}-bit>".format(self.__class__.__name__, id(self), self.name, self.index, self.bitsize) -class CoreRegistersIndex(object): +class CoreRegistersIndex: """! @brief Class to hold indexes of available core registers. This class is meant to be used by a core to hold the set of core registers that are actually present on @@ -170,38 +185,38 @@ class CoreRegistersIndex(object): """ def __init__(self): - self._groups = set() - self._all = set() - self._by_name = {} - self._by_index = {} - self._by_feature = {} + self._groups: Set[str] = set() + self._all: Set[CoreRegisterInfo] = set() + self._by_name: Dict[str, CoreRegisterInfo] = {} + self._by_index: Dict[int, CoreRegisterInfo] = {} + self._by_feature: Dict[str, List[CoreRegisterInfo]] = {} @property - def groups(self): + def groups(self) -> Set[str]: """! @brief Set of unique register group names.""" return self._groups @property - def as_set(self): + def as_set(self) -> Set[CoreRegisterInfo]: """! @brief Set of available registers as CoreRegisterInfo objects.""" return self._all @property - def by_name(self): + def by_name(self) -> Dict[str, CoreRegisterInfo]: """! @brief Dict of (register name) -> CoreRegisterInfo.""" return self._by_name @property - def by_index(self): + def by_index(self) -> Dict[int, CoreRegisterInfo]: """! @brief Dict of (register index) -> CoreRegisterInfo.""" return self._by_index @property - def by_feature(self): + def by_feature(self) -> Dict[str, List[CoreRegisterInfo]]: """! @brief Dict of (register gdb feature) -> List[CoreRegisterInfo].""" return self._by_feature - def iter_matching(self, predicate): + def iter_matching(self, predicate: Callable[[CoreRegisterInfo], bool]) -> Iterator[CoreRegisterInfo]: """! @brief Iterate over registers matching a given predicate callable. @param self The object. @param predicate Callable accepting a single argument, a CoreRegisterInfo, and returning a boolean. @@ -211,7 +226,7 @@ def iter_matching(self, predicate): if predicate(reg): yield reg - def add_group(self, regs): + def add_group(self, regs: Sequence[CoreRegisterInfo]) -> None: """! @brief Add a list of registers. @param self The object. @param regs Iterable of CoreRegisterInfo objects. The objects are copied as they are added. diff --git a/pyocd/core/core_target.py b/pyocd/core/core_target.py new file mode 100644 index 000000000..24df195e6 --- /dev/null +++ b/pyocd/core/core_target.py @@ -0,0 +1,47 @@ +# pyOCD debugger +# Copyright (c) 2021 Chris Reed +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import (Optional, TYPE_CHECKING) + +from .target import (Target, TargetGraphNode) + +if TYPE_CHECKING: + from ..debug.context import DebugContext + from ..debug.elf.elf import ELFBinaryFile + +class CoreTarget(TargetGraphNode): + """@brief Target base class for CPU cores.""" + + @property + def core_number(self) -> int: + raise NotImplementedError() + + @property + def elf(self) -> Optional["ELFBinaryFile"]: + raise NotImplementedError() + + @elf.setter + def elf(self, filename: "ELFBinaryFile") -> None: + raise NotImplementedError() + + def set_reset_catch(self, reset_type: Target.ResetType) -> None: + raise NotImplementedError() + + def clear_reset_catch(self, reset_type: Target.ResetType) -> None: + raise NotImplementedError() + + def set_target_context(self, context: "DebugContext") -> None: + raise NotImplementedError() diff --git a/pyocd/core/helpers.py b/pyocd/core/helpers.py index 5c6815d7c..f2d8b4bc4 100644 --- a/pyocd/core/helpers.py +++ b/pyocd/core/helpers.py @@ -18,6 +18,13 @@ from time import sleep import colorama import prettytable +from typing import (Any, List, Mapping, Optional, Sequence, TYPE_CHECKING) + +from .session import Session +from ..probe.aggregator import DebugProbeAggregator + +if TYPE_CHECKING: + from ..probe.debug_probe import DebugProbe from .session import Session from ..probe.aggregator import DebugProbeAggregator @@ -25,7 +32,7 @@ # Init colorama here since this is currently the only module that uses it. colorama.init() -class ConnectHelper(object): +class ConnectHelper: """! @brief Helper class for streamlining the probe discovery and session creation process. This class provides several static methods that wrap the DebugProbeAggregator methods @@ -34,7 +41,12 @@ class ConnectHelper(object): """ @staticmethod - def get_sessions_for_all_connected_probes(blocking=True, unique_id=None, options=None, **kwargs): + def get_sessions_for_all_connected_probes( + blocking: bool = True, + unique_id: Optional[str] = None, + options: Optional[Mapping[str, Any]] = None, + **kwargs + ) -> List[Session]: """! @brief Return a list of Session objects for all connected debug probes. This method is useful for listing detailed information about connected probes, especially @@ -60,7 +72,11 @@ def get_sessions_for_all_connected_probes(blocking=True, unique_id=None, options return sessions @staticmethod - def get_all_connected_probes(blocking=True, unique_id=None, print_wait_message=True): + def get_all_connected_probes( + blocking: bool = True, + unique_id: Optional[str] = None, + print_wait_message: bool = True + ) -> List["DebugProbe"]: """! @brief Return a list of DebugProbe objects for all connected debug probes. The returned list of debug probes is always sorted by the combination of the probe's @@ -100,8 +116,8 @@ def get_all_connected_probes(blocking=True, unique_id=None, print_wait_message=T return sortedProbes @staticmethod - def list_connected_probes(): - """! @brief List the connected debug probes. + def list_connected_probes() -> None: + """! @brief List the connected debug probes. Prints a list of all connected probes to stdout. If no probes are connected, a message saying as much is printed instead. @@ -114,7 +130,11 @@ def list_connected_probes(): print(colorama.Style.RESET_ALL, end='') @staticmethod - def choose_probe(blocking=True, return_first=False, unique_id=None): + def choose_probe( + blocking: bool = True, + return_first: bool = False, + unique_id: str = None + ) -> Optional["DebugProbe"]: """! @brief Return a debug probe possibly chosen by the user. This method provides an easy to use command line interface for selecting one of the @@ -164,6 +184,7 @@ def choose_probe(blocking=True, return_first=False, unique_id=None): # Ask user to select boards if there is more than 1 left if len(allProbes) > 1: + ch = 0 ConnectHelper._print_probe_list(allProbes) while True: print(colorama.Style.RESET_ALL) @@ -179,7 +200,7 @@ def choose_probe(blocking=True, return_first=False, unique_id=None): pass if not valid: print(colorama.Fore.YELLOW + "Invalid choice: %s\n" % line) - Session._print_probe_list(allProbes) + ConnectHelper._print_probe_list(allProbes) else: break allProbes = allProbes[ch:ch + 1] @@ -188,8 +209,14 @@ def choose_probe(blocking=True, return_first=False, unique_id=None): return allProbes[0] @staticmethod - def session_with_chosen_probe(blocking=True, return_first=False, unique_id=None, - auto_open=True, options=None, **kwargs): + def session_with_chosen_probe( + blocking: bool = True, + return_first: bool = False, + unique_id: Optional[str] = None, + auto_open: bool = True, + options: Optional[Mapping[str, Any]] = None, + **kwargs + ) -> Optional[Session]: """! @brief Create a session with a probe possibly chosen by the user. This method provides an easy to use command line interface for selecting one of the @@ -242,7 +269,7 @@ def session_with_chosen_probe(blocking=True, return_first=False, unique_id=None, return Session(probe, auto_open=auto_open, options=options, **kwargs) @staticmethod - def _print_probe_list(probes): + def _print_probe_list(probes: Sequence["DebugProbe"]) -> None: pt = prettytable.PrettyTable(["#", "Probe", "Unique ID"]) pt.align = 'l' pt.header = True diff --git a/pyocd/core/memory_interface.py b/pyocd/core/memory_interface.py index f5251cf27..702124751 100644 --- a/pyocd/core/memory_interface.py +++ b/pyocd/core/memory_interface.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2018-2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,64 +15,147 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import (Callable, Sequence, Union, cast, overload) +from typing_extensions import Literal + from ..utility import conversion -class MemoryInterface(object): +class MemoryInterface: """! @brief Interface for memory access.""" - def write_memory(self, addr, data, transfer_size=32): + def write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: """! @brief Write a single memory location. By default the transfer size is a word.""" raise NotImplementedError() - def read_memory(self, addr, transfer_size=32, now=True): + @overload + def read_memory(self, addr: int, transfer_size: int = 32) -> int: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int = 32, now: Literal[True] = True) -> int: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Read a memory location. By default, a word will be read.""" raise NotImplementedError() - def write_memory_block32(self, addr, data): + def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: """! @brief Write an aligned block of 32-bit words.""" raise NotImplementedError() - def read_memory_block32(self, addr, size): + def read_memory_block32(self, addr: int, size) -> Sequence[int]: """! @brief Read an aligned block of 32-bit words.""" raise NotImplementedError() - def write64(self, addr, value): + def write64(self, addr: int, value: int) -> None: """! @brief Shorthand to write a 64-bit word.""" self.write_memory(addr, value, 64) - def write32(self, addr, value): + def write32(self, addr: int, value: int) -> None: """! @brief Shorthand to write a 32-bit word.""" self.write_memory(addr, value, 32) - def write16(self, addr, value): + def write16(self, addr: int, value: int) -> None: """! @brief Shorthand to write a 16-bit halfword.""" self.write_memory(addr, value, 16) - def write8(self, addr, value): + def write8(self, addr: int, value: int) -> None: """! @brief Shorthand to write a byte.""" self.write_memory(addr, value, 8) - def read64(self, addr, now=True): + @overload + def read64(self, addr: int) -> int: + ... + + @overload + def read64(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read64(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read64(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: + ... + + def read64(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Shorthand to read a 64-bit word.""" return self.read_memory(addr, 64, now) - def read32(self, addr, now=True): + @overload + def read32(self, addr: int) -> int: + ... + + @overload + def read32(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read32(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read32(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: + ... + + def read32(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Shorthand to read a 32-bit word.""" return self.read_memory(addr, 32, now) - def read16(self, addr, now=True): + @overload + def read16(self, addr: int) -> int: + ... + + @overload + def read16(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read16(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read16(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: + ... + + def read16(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Shorthand to read a 16-bit halfword.""" return self.read_memory(addr, 16, now) - def read8(self, addr, now=True): + @overload + def read8(self, addr: int) -> int: + ... + + @overload + def read8(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read8(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read8(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: + ... + + def read8(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Shorthand to read a byte.""" return self.read_memory(addr, 8, now) - def read_memory_block8(self, addr, size): + def read_memory_block8(self, addr: int, size: int) -> Sequence[int]: """! @brief Read a block of unaligned bytes in memory. @return an array of byte values """ @@ -86,7 +170,7 @@ def read_memory_block8(self, addr, size): # try to read 16bits data if (size > 1) and (addr & 0x02): - mem = self.read16(addr) + mem = cast(int, self.read16(addr)) res.append(mem & 0xff) res.append((mem >> 8) & 0xff) size -= 2 @@ -94,13 +178,13 @@ def read_memory_block8(self, addr, size): # try to read aligned block of 32bits if (size >= 4): - mem = self.read_memory_block32(addr, size // 4) - res += conversion.u32le_list_to_byte_list(mem) - size -= 4*len(mem) - addr += 4*len(mem) + data32 = self.read_memory_block32(addr, size // 4) + res += conversion.u32le_list_to_byte_list(data32) + size -= 4*len(data32) + addr += 4*len(data32) if (size > 1): - mem = self.read16(addr) + mem = cast(int, self.read16(addr)) res.append(mem & 0xff) res.append((mem >> 8) & 0xff) size -= 2 @@ -112,7 +196,7 @@ def read_memory_block8(self, addr, size): return res - def write_memory_block8(self, addr, data): + def write_memory_block8(self, addr: int, data: Sequence[int]) -> None: """! @brief Write a block of unaligned bytes in memory.""" size = len(data) idx = 0 diff --git a/pyocd/core/memory_map.py b/pyocd/core/memory_map.py index dbbcfb564..b47777039 100644 --- a/pyocd/core/memory_map.py +++ b/pyocd/core/memory_map.py @@ -19,9 +19,14 @@ import collections.abc import copy from functools import total_ordering +from typing import (Any, Dict, Iterable, Iterator, List, Optional, TYPE_CHECKING, Sequence, Tuple, Type, Union) from ..utility.strings import uniquify_name +if TYPE_CHECKING: + from ..target.pack.flash_algo import PackFlashAlgo + from ..flash.flash import Flash + class MemoryType(Enum): """! @brief Known types of memory.""" OTHER = 0 @@ -30,26 +35,37 @@ class MemoryType(Enum): FLASH = 3 DEVICE = 4 -def check_range(start, end=None, length=None, range=None): - assert (start is not None) and ((isinstance(start, MemoryRange) or range is not None) or +def check_range( + start: Union[int, "MemoryRangeBase"], + end: Optional[int] = None, + length: Optional[int] = None, + range: Optional["MemoryRangeBase"] = None + ) -> Tuple[int, int]: + assert (start is not None) and ((isinstance(start, MemoryRangeBase) or range is not None) or ((end is not None) ^ (length is not None))) - if isinstance(start, MemoryRange): + if isinstance(start, MemoryRangeBase): range = start if range is not None: - start = range.start - end = range.end - elif end is None: - end = start + length - 1 - return start, end + actual_start = range.start + actual_end = range.end + else: + assert not isinstance(start, MemoryRangeBase) + actual_start = start + if end is None: + assert length is not None + actual_end = actual_start + length - 1 + else: + actual_end = end + return actual_start, actual_end @total_ordering -class MemoryRangeBase(object): +class MemoryRangeBase: """! @brief Base class for a range of memory. This base class provides the basic address range support and methods to test for containment or intersection with another range. """ - def __init__(self, start=0, end=0, length=None): + def __init__(self, start: int = 0, end: int = 0, length: Optional[int] = None) -> None: self._start = start if length is not None: self._end = self._start + length - 1 @@ -58,65 +74,89 @@ def __init__(self, start=0, end=0, length=None): assert self._end >= (self._start - 1) @property - def start(self): + def start(self) -> int: return self._start @property - def end(self): + def end(self) -> int: return self._end @property - def length(self): + def length(self) -> int: return self._end - self._start + 1 - def contains_address(self, address): + def contains_address(self, address: int) -> bool: return (address >= self.start) and (address <= self.end) - def contains_range(self, start, end=None, length=None, range=None): + def contains_range( + self, + start: Union[int, "MemoryRangeBase"], + end: Optional[int] = None, + length: Optional[int] = None, + range: Optional["MemoryRangeBase"] = None + ) -> bool: """! @return Whether the given range is fully contained by the region.""" start, end = check_range(start, end, length, range) return self.contains_address(start) and self.contains_address(end) - def contained_by_range(self, start, end=None, length=None, range=None): + def contained_by_range( + self, + start: Union[int, "MemoryRangeBase"], + end: Optional[int] = None, + length: Optional[int] = None, + range: Optional["MemoryRangeBase"] = None + ) -> bool: """! @return Whether the region is fully within the bounds of the given range.""" start, end = check_range(start, end, length, range) return start <= self.start and end >= self.end - def intersects_range(self, start, end=None, length=None, range=None): + def intersects_range( + self, + start: Union[int, "MemoryRangeBase"], + end: Optional[int] = None, + length: Optional[int] = None, + range: Optional["MemoryRangeBase"] = None + ) -> bool: """! @return Whether the region and the given range intersect at any point.""" start, end = check_range(start, end, length, range) return (start <= self.start and end >= self.start) or (start <= self.end and end >= self.end) \ or (start >= self.start and end <= self.end) - def __hash__(self): + def __hash__(self) -> int: return hash("%08x%08x%08x" % (self.start, self.end, self.length)) - def __eq__(self, other): + def __eq__(self, other: "MemoryRangeBase") -> bool: return self.start == other.start and self.length == other.length - def __lt__(self, other): + def __lt__(self, other: "MemoryRangeBase") -> bool: return self.start < other.start or (self.start == other.start and self.length == other.length) class MemoryRange(MemoryRangeBase): """! @brief A range of memory optionally tied to a region.""" - def __init__(self, start=0, end=0, length=None, region=None): - super(MemoryRange, self).__init__(start=start, end=end, length=length) + def __init__( + self, + start: int = 0, + end: int = 0, + length: Optional[int] = None, + region: Optional["MemoryRegion"] = None + ) -> None: + super().__init__(start=start, end=end, length=length) self._region = region @property - def region(self): + def region(self) -> Optional["MemoryRegion"]: return self._region - def __hash__(self): - h = super(MemoryRange, self).__hash__() + def __hash__(self) -> int: + h = super().__hash__() if self.region is not None: h ^= hash(self.region) return h - def __eq__(self, other): + def __eq__(self, other: "MemoryRange") -> bool: return self.start == other.start and self.length == other.length and self.region == other.region - def __repr__(self): + def __repr__(self) -> str: return "<%s@0x%x start=0x%x end=0x%x length=0x%x region=%s>" % (self.__class__.__name__, id(self), self.start, self.end, self.length, self.region) @@ -156,7 +196,7 @@ class MemoryRegion(MemoryRangeBase): """ ## Default attribute values for all memory region types. - DEFAULT_ATTRS = { + DEFAULT_ATTRS: Dict[str, Any] = { 'name': lambda r: r.type.name.lower(), 'access': 'rwx', 'alias': None, @@ -178,7 +218,14 @@ class MemoryRegion(MemoryRangeBase): 'is_nonsecure': lambda r: not r.is_secure, } - def __init__(self, type=MemoryType.OTHER, start=0, end=0, length=None, **attrs): + def __init__( + self, + type: MemoryType = MemoryType.OTHER, + start: int = 0, + end: int = 0, + length: Optional[int] = None, + **attrs: Any + ) -> None: """! Memory region constructor. Memory regions are required to have non-zero lengths, unlike memory ranges. @@ -191,10 +238,10 @@ def __init__(self, type=MemoryType.OTHER, start=0, end=0, length=None, **attrs): - is_powered_on_boot - is_testable """ - super(MemoryRegion, self).__init__(start=start, end=end, length=length) + super().__init__(start=start, end=end, length=length) assert self.length > 0, "Memory regions must have a non-zero length." assert isinstance(type, MemoryType) - self._map = None + self._map: Optional[MemoryMap] = None self._type = type self._attributes = attrs @@ -204,23 +251,23 @@ def __init__(self, type=MemoryType.OTHER, start=0, end=0, length=None, **attrs): self._attributes[k] = v @property - def map(self): + def map(self) -> Optional["MemoryMap"]: return self._map @map.setter - def map(self, the_map): + def map(self, the_map: Optional["MemoryMap"]) -> None: self._map = the_map @property - def type(self): + def type(self) -> MemoryType: return self._type @property - def attributes(self): + def attributes(self) -> Dict[str, Any]: return self._attributes @property - def alias(self): + def alias(self) -> Any: # Resolve alias reference. alias_value = self._attributes['alias'] if isinstance(alias_value, str) and self._map is not None: @@ -232,7 +279,7 @@ def alias(self): else: return alias_value - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: try: v = self._attributes[name] except KeyError: @@ -243,7 +290,7 @@ def __getattr__(self, name): v = v(self) return v - def _get_attributes_for_clone(self): + def _get_attributes_for_clone(self) -> Dict[str, Any]: """@brief Return a dict containing all the attributes of this region. This method must be overridden by subclasses to include in the returned dict any instance attributes @@ -251,32 +298,38 @@ def _get_attributes_for_clone(self): """ return dict(start=self.start, length=self.length, **self._attributes) - def clone_with_changes(self, **modified_attrs): + def clone_with_changes(self, **modified_attrs: Any) -> Any: # Have to return Any because Self isn't available yet. """@brief Create a duplicate this region with some of its attributes modified.""" new_attrs = self._get_attributes_for_clone() new_attrs.update(modified_attrs) return self.__class__(**new_attrs) - def __copy__(self): + def __copy__(self) -> Any: return self.clone_with_changes() # Need to redefine __hash__ since we redefine __eq__. __hash__ = MemoryRangeBase.__hash__ - def __eq__(self, other): + def __eq__(self, other: "MemoryRegion") -> bool: # Include type and attributes in equality comparison. return self.start == other.start and self.length == other.length \ and self.type == other.type and self.attributes == other.attributes - def __repr__(self): + def __repr__(self) -> str: return "<%s@0x%x name=%s type=%s start=0x%x end=0x%x length=0x%x access=%s>" % (self.__class__.__name__, id(self), self.name, self.type, self.start, self.end, self.length, self.access) class RamRegion(MemoryRegion): """! @brief Contiguous region of RAM.""" - def __init__(self, start=0, end=0, length=None, **attrs): + def __init__( + self, + start: int = 0, + end: int = 0, + length: Optional[int] = None, + **attrs: Any + ) -> None: attrs['type'] = MemoryType.RAM - super(RamRegion, self).__init__(start=start, end=end, length=length, **attrs) + super().__init__(start=start, end=end, length=length, **attrs) class RomRegion(MemoryRegion): """! @brief Contiguous region of ROM.""" @@ -287,9 +340,15 @@ class RomRegion(MemoryRegion): 'access': 'rx', # ROM is by definition not writable. }) - def __init__(self, start=0, end=0, length=None, **attrs): + def __init__( + self, + start: int = 0, + end: int = 0, + length: Optional[int] = None, + **attrs: Any + ) -> None: attrs['type'] = MemoryType.ROM - super(RomRegion, self).__init__(start=start, end=end, length=length, **attrs) + super().__init__(start=start, end=end, length=length, **attrs) class DefaultFlashWeights: """! @brief Default weights for flash programming operations.""" @@ -337,13 +396,24 @@ class FlashRegion(MemoryRegion): 'are_erased_sectors_readable': True, }) - def __init__(self, start=0, end=0, length=None, **attrs): + _algo: Optional[Dict[str, Any]] + _flm: Optional[Union[str, "PackFlashAlgo"]] + _flash: Optional["Flash"] + _flash_class: Type["Flash"] + + def __init__( + self, + start: int = 0, + end: int = 0, + length: Optional[int] = None, + **attrs: Any + ) -> None: # Import locally to prevent import loops. from ..flash.flash import Flash assert ('blocksize' in attrs) or ('sector_size' in attrs) or ('flm' in attrs) attrs['type'] = MemoryType.FLASH - super(FlashRegion, self).__init__(start=start, end=end, length=length, **attrs) + super().__init__(start=start, end=end, length=length, **attrs) self._algo = attrs.get('algo', None) self._flm = attrs.get('flm', None) self._flash = None @@ -369,38 +439,38 @@ def __init__(self, start=0, end=0, length=None, **attrs): pass @property - def algo(self): + def algo(self) -> Optional[Dict[str, Any]]: return self._algo @algo.setter - def algo(self, flash_algo): + def algo(self, flash_algo: Dict[str, Any]) -> None: self._algo = flash_algo @property - def flm(self): + def flm(self) -> Optional[Union[str, "PackFlashAlgo"]]: return self._flm @flm.setter - def flm(self, flm_path): + def flm(self, flm_path: str) -> None: self._flm = flm_path @property - def flash_class(self): + def flash_class(self) -> Type["Flash"]: return self._flash_class @flash_class.setter - def flash_class(self, klass): + def flash_class(self, klass: Type["Flash"]) -> None: self._flash_class = klass @property - def flash(self): + def flash(self) -> Optional["Flash"]: return self._flash @flash.setter - def flash(self, flash_instance): + def flash(self, flash_instance: "Flash") -> None: self._flash = flash_instance - def is_data_erased(self, d): + def is_data_erased(self, d: Iterable[int]) -> bool: """! @brief Helper method to check if a block of data is erased. @param self @param d List of data or bytearray. @@ -413,7 +483,7 @@ def is_data_erased(self, d): return False return True - def _get_attributes_for_clone(self): + def _get_attributes_for_clone(self) -> Dict[str, Any]: """@brief Return a dict containing all the attributes of this region.""" d = super()._get_attributes_for_clone() d.update( @@ -426,12 +496,12 @@ def _get_attributes_for_clone(self): # Need to redefine __hash__ since we redefine __eq__. __hash__ = MemoryRegion.__hash__ - def __eq__(self, other): + def __eq__(self, other: "FlashRegion") -> bool: # Include flash algo, class, and flm in equality test. - return super(FlashRegion, self).__eq__(other) and self.algo == other.algo and \ + return super().__eq__(other) and self.algo == other.algo and \ self.flash_class == other.flash_class and self.flm == other.flm - def __repr__(self): + def __repr__(self) -> str: return "<%s@0x%x name=%s type=%s start=0x%x end=0x%x length=0x%x access=%s blocksize=0x%x>" % ( self.__class__.__name__, id(self), self.name, self.type, self.start, self.end, self.length, self.access, self.blocksize) @@ -447,12 +517,18 @@ class DeviceRegion(MemoryRegion): 'is_testable': False, }) - def __init__(self, start=0, end=0, length=None, **attrs): + def __init__( + self, + start: int = 0, + end: int = 0, + length: Optional[int] = None, + **attrs: Any + ) -> None: attrs['type'] = MemoryType.DEVICE - super(DeviceRegion, self).__init__(start=start, end=end, length=length, **attrs) + super().__init__(start=start, end=end, length=length, **attrs) ## @brief Map from memory type to class. -MEMORY_TYPE_CLASS_MAP = { +MEMORY_TYPE_CLASS_MAP: Dict[MemoryType, Type[MemoryRegion]] = { MemoryType.OTHER: MemoryRegion, MemoryType.RAM: RamRegion, MemoryType.ROM: RomRegion, @@ -486,7 +562,9 @@ class MemoryMap(collections.abc.Sequence): MemoryMap objects implement the collections.abc.Sequence interface. """ - def __init__(self, *more_regions): + _regions: List[MemoryRegion] + + def __init__(self, *more_regions: Union[Sequence[MemoryRegion], MemoryRegion]) -> None: """! @brief Constructor. All parameters passed to the constructor are assumed to be MemoryRegion instances, and @@ -499,7 +577,7 @@ def __init__(self, *more_regions): self.add_regions(*more_regions) @property - def regions(self): + def regions(self) -> List[MemoryRegion]: """! @brief List of all memory regions. Regions in the returned list are sorted by start address. @@ -507,11 +585,11 @@ def regions(self): return self._regions @property - def region_count(self): + def region_count(self) -> int: """! @brief Number of memory regions in the map.""" return len(self._regions) - def clone(self): + def clone(self) -> "MemoryMap": """! @brief Create a duplicate of the memory map. The duplicate memory map contains shallow copies of each of the regions. This is intended @@ -520,7 +598,7 @@ def clone(self): """ return MemoryMap(*[copy.copy(r) for r in self.regions]) - def add_regions(self, *more_regions): + def add_regions(self, *more_regions: Union[Sequence[MemoryRegion], MemoryRegion]) -> None: """! @brief Add multiple regions to the memory map. There are two options for passing the list of regions to be added. The first is to pass @@ -534,15 +612,16 @@ def add_regions(self, *more_regions): passed as separate parameters. """ if len(more_regions): - if isinstance(more_regions[0], (list, tuple)): - regionsToAdd = more_regions[0] + if isinstance(more_regions[0], collections.abc.Sequence): + regions_to_add = more_regions[0] else: - regionsToAdd = more_regions + regions_to_add = more_regions - for newRegion in regionsToAdd: - self.add_region(newRegion) + for new_region in regions_to_add: + assert isinstance(new_region, MemoryRegion) + self.add_region(new_region) - def add_region(self, new_region): + def add_region(self, new_region: MemoryRegion) -> None: """! @brief Add one new region to the map. The region list is resorted after adding the provided region. @@ -560,7 +639,7 @@ def add_region(self, new_region): self._regions.append(new_region) self._regions.sort() - def remove_region(self, region): + def remove_region(self, region: MemoryRegion) -> None: """! @brief Removes a memory region from the map. @param self @param region The region to remove. The region to remove is matched by identity, not value, @@ -570,7 +649,7 @@ def remove_region(self, region): if r is region: del self._regions[i] - def get_boot_memory(self): + def get_boot_memory(self) -> Optional[MemoryRegion]: """! @brief Returns the first region marked as boot memory. @param self @@ -581,7 +660,7 @@ def get_boot_memory(self): return r return None - def get_region_for_address(self, address): + def get_region_for_address(self, address: int) -> Optional[MemoryRegion]: """! @brief Returns the first region containing the given address. @param self @@ -593,7 +672,7 @@ def get_region_for_address(self, address): return r return None - def is_valid_address(self, address): + def is_valid_address(self, address: int) -> bool: """! @brief Determines whether an address is contained by any region. @param self @@ -602,7 +681,13 @@ def is_valid_address(self, address): """ return self.get_region_for_address(address) is not None - def get_contained_regions(self, start, end=None, length=None, range=None): + def get_contained_regions( + self, + start: Union[int, "MemoryRangeBase"], + end: Optional[int] = None, + length: Optional[int] = None, + range: Optional["MemoryRangeBase"] = None + ) -> List[MemoryRegion]: """! @brief Get all regions fully contained by an address range. @param self @@ -616,7 +701,13 @@ def get_contained_regions(self, start, end=None, length=None, range=None): start, end = check_range(start, end, length, range) return [r for r in self._regions if r.contained_by_range(start, end)] - def get_intersecting_regions(self, start, end=None, length=None, range=None): + def get_intersecting_regions( + self, + start: Union[int, "MemoryRangeBase"], + end: Optional[int] = None, + length: Optional[int] = None, + range: Optional["MemoryRangeBase"] = None + ) -> List[MemoryRegion]: """! @brief Get all regions intersected by an address range. @param self @@ -630,7 +721,7 @@ def get_intersecting_regions(self, start, end=None, length=None, range=None): start, end = check_range(start, end, length, range) return [r for r in self._regions if r.intersects_range(start, end)] - def iter_matching_regions(self, **kwargs): + def iter_matching_regions(self, **kwargs: Any) -> Iterator[MemoryRegion]: """! @brief Iterate over regions matching given criteria. Useful attributes to match on include 'type', 'name', 'is_default', and others. @@ -654,7 +745,7 @@ def iter_matching_regions(self, **kwargs): yield r - def get_first_matching_region(self, **kwargs): + def get_first_matching_region(self, **kwargs: Any) -> Optional[MemoryRegion]: """! @brief Get the first region matching a given memory type. The region of given type with the lowest start address is returned. If there are no regions @@ -668,7 +759,7 @@ def get_first_matching_region(self, **kwargs): return r return None - def get_default_region_of_type(self, type): + def get_default_region_of_type(self, type: MemoryType) -> Optional[MemoryRegion]: """! @brief Get the default region of a given memory type. If there are multiple regions of the specified type marked as default, then the one with @@ -681,29 +772,32 @@ def get_default_region_of_type(self, type): """ return self.get_first_matching_region(type=type, is_default=True) - def __eq__(self, other): + def __eq__(self, other: "MemoryMap") -> bool: return isinstance(other, MemoryMap) and (self._regions == other._regions) - def __iter__(self): + def __iter__(self) -> Iterator[MemoryRegion]: """! @brief Enable iteration over the memory map.""" return iter(self._regions) - def __reversed__(self): + def __reversed__(self) -> Iterator[MemoryRegion]: """! @brief Reverse iteration over the memory map.""" return reversed(self._regions) - def __getitem__(self, key): + def __getitem__(self, key: Union[int, str]) -> MemoryRegion: """! @brief Return a region indexed by name or number.""" if isinstance(key, str): - return self.get_first_matching_region(name=key) + result = self.get_first_matching_region(name=key) + if result is None: + raise IndexError(key) + return result else: return self._regions[key] - def __len__(self): + def __len__(self) -> int: """! @brief Return the number of regions.""" return len(self._regions) - def __contains__(self, key): + def __contains__(self, key: Union[int, str, MemoryRegion]) -> bool: if isinstance(key, int): return self.is_valid_address(key) elif isinstance(key, str): @@ -711,7 +805,7 @@ def __contains__(self, key): else: return key in self._regions - def __repr__(self): + def __repr__(self) -> str: return "" % (id(self), repr(self._regions)) diff --git a/pyocd/core/options_manager.py b/pyocd/core/options_manager.py index 162f76891..4e9403e3c 100644 --- a/pyocd/core/options_manager.py +++ b/pyocd/core/options_manager.py @@ -17,26 +17,29 @@ import logging from functools import partial -from collections import namedtuple +from typing import (Any, Callable, Dict, List, Mapping, NamedTuple, Optional) from .options import OPTIONS_INFO from ..utility.notification import Notifier LOG = logging.getLogger(__name__) -## @brief Data for an option value change notification. -# -# Instances of this class are used for the data attribute of the @ref -# pyocd.utility.notification.Notification "Notification" sent to subscribers when an option's value -# is changed. -# -# An instance of this class has two attributes: -# - `new_value`: The new, current value of the option. -# - `old_value`: The previous value of the option. -OptionChangeInfo = namedtuple('OptionChangeInfo', 'new_value old_value') +class OptionChangeInfo(NamedTuple): + """@brief Data for an option value change notification. + + Instances of this class are used for the data attribute of the @ref + pyocd.utility.notification.Notification "Notification" sent to subscribers when an option's value + is changed. + + An instance of this class has two attributes: + - `new_value`: The new, current value of the option. + - `old_value`: The previous value of the option. + """ + new_value: Any + old_value: Any class OptionsManager(Notifier): - """! @brief Handles session option management for a session. + """@brief Handles session option management for a session. The options manager supports multiple layers of option priority. When an option's value is accessed, the highest priority layer that contains a value for the option is used. This design @@ -50,13 +53,17 @@ class OptionsManager(Notifier): old value is the option's default. """ - def __init__(self): + LayerType = Mapping[str, Any] + + _layers: List[Dict[str, Any]] + + def __init__(self) -> None: """! @brief Option manager constructor. """ - super(OptionsManager, self).__init__() + super().__init__() self._layers = [] - def _update_layers(self, new_options, update_operation): + def _update_layers(self, new_options: Optional[LayerType], update_operation: Callable[[LayerType], None]) -> None: """! @brief Internal method to add a new layer dictionary. @param self @@ -72,7 +79,7 @@ def _update_layers(self, new_options, update_operation): new_values = {name: self.get(name) for name in filtered_options.keys()} self._notify_changes(previous_values, new_values) - def add_front(self, new_options): + def add_front(self, new_options: Optional[LayerType]) -> None: """! @brief Add a new highest priority layer of option values. @param self @@ -80,7 +87,7 @@ def add_front(self, new_options): """ self._update_layers(new_options, partial(self._layers.insert, 0)) - def add_back(self, new_options): + def add_back(self, new_options: Optional[LayerType]) -> None: """! @brief Add a new lowest priority layer of option values. @param self @@ -88,7 +95,7 @@ def add_back(self, new_options): """ self._update_layers(new_options, self._layers.append) - def _convert_options(self, new_options): + def _convert_options(self, new_options: LayerType) -> LayerType: """! @brief Prepare a dictionary of session options for use by the manager. 1. Strip dictionary entries with a value of None. @@ -104,7 +111,7 @@ def _convert_options(self, new_options): output[name] = value return output - def is_set(self, key): + def is_set(self, key: str) -> bool: """! @brief Return whether a value is set for the specified option. This method returns True as long as any layer has a value set for the option, even if the @@ -116,46 +123,46 @@ def is_set(self, key): return True return False - def get_default(self, key): + def get_default(self, key: str) -> Any: """! @brief Return the default value for the specified option.""" if key in OPTIONS_INFO: return OPTIONS_INFO[key].default else: return None - def get(self, key): + def get(self, key: str) -> Any: """! @brief Return the highest priority value for the option, or its default.""" for layer in self._layers: if key in layer: return layer[key] return self.get_default(key) - def set(self, key, value): + def set(self, key: str, value: Any) -> None: """! @brief Set an option in the current highest priority layer.""" self.update({key: value}) - def update(self, new_options): + def update(self, new_options: LayerType) -> None: """! @brief Set multiple options in the current highest priority layer.""" filtered_options = self._convert_options(new_options) previous_values = {name: self.get(name) for name in filtered_options.keys()} self._layers[0].update(filtered_options) self._notify_changes(previous_values, filtered_options) - def _notify_changes(self, previous, options): + def _notify_changes(self, previous: LayerType, options: LayerType) -> None: """! @brief Send notifications that the specified options have changed.""" for name, new_value in options.items(): previous_value = previous[name] if new_value != previous_value: self.notify(name, data=OptionChangeInfo(new_value, previous_value)) - def __contains__(self, key): + def __contains__(self, key: str) -> bool: """! @brief Returns whether the named option has a non-default value.""" return self.is_set(key) - def __getitem__(self, key): + def __getitem__(self, key: str) -> Any: """! @brief Return the highest priority value for the option, or its default.""" return self.get(key) - def __setitem__(self, key, value): + def __setitem__(self, key: str, value: Any) -> None: """! @brief Set an option in the current highest priority layer.""" self.set(key, value) diff --git a/pyocd/core/plugin.py b/pyocd/core/plugin.py index 5ec856684..63defaa79 100644 --- a/pyocd/core/plugin.py +++ b/pyocd/core/plugin.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2020 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,13 +17,21 @@ import pkg_resources import logging +from typing import ( + Any, + Dict, + List, + ) from .._version import version as pyocd_version -from .options import add_option_set +from .options import ( + add_option_set, + OptionInfo, + ) LOG = logging.getLogger(__name__) -class Plugin(object): +class Plugin: """! @brief Class that describes a plugin for pyOCD. Each plugin vends a subclass of Plugin that describes itself and provides meta-actions. @@ -32,11 +41,11 @@ class Plugin(object): will always load, and does nothing when loaded. """ - def should_load(self): + def should_load(self) -> bool: """! @brief Whether the plugin should be loaded.""" return True - def load(self): + def load(self) -> Any: """! @brief Load the plugin and return the plugin implementation. This method can perform any actions required to load the plugin beyond simply returning @@ -47,15 +56,15 @@ def load(self): pass @property - def options(self): + def options(self) -> List[OptionInfo]: """! @brief A list of options added by the plugin. @return List of @ref pyocd.core.options.OptionInfo "OptionInfo" objects. """ return [] @property - def version(self): - """! @brief Current version of the plugin. + def version(self) -> str: + """! @brief Current release version of the plugin. The default implementation returns pyOCD's version. @@ -64,16 +73,16 @@ def version(self): return pyocd_version @property - def name(self): + def name(self) -> str: """! @brief Name of the plugin.""" raise NotImplementedError() @property - def description(self): + def description(self) -> str: """! @brief Short description of the plugin.""" return "" -def load_plugin_classes_of_type(plugin_group, plugin_dict, base_class): +def load_plugin_classes_of_type(plugin_group: str, plugin_dict: Dict[str, Any], base_class: type) -> None: """! @brief Helper method to load plugins. Plugins are expected to return an implementation class from their Plugin.load() method. This diff --git a/pyocd/core/session.py b/pyocd/core/session.py index f298d9303..c87efd049 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -21,12 +21,20 @@ import os import weakref from inspect import getfullargspec +from typing import (Any, cast, Dict, List, Mapping, Optional, TYPE_CHECKING) from . import exceptions from .options_manager import OptionsManager from ..board.board import Board from ..utility.notification import Notifier +if TYPE_CHECKING: + from types import TracebackType + from .soc_target import SoCTarget + from ..probe.debug_probe import DebugProbe + from ..probe.tcp_probe_server import DebugProbeServer + from ..gdbserver.gdbserver import GDBServer + LOG = logging.getLogger(__name__) ## @brief Set of default config filenames to search for. @@ -81,10 +89,10 @@ class Session(Notifier): """ ## @brief Weak reference to the most recently created session. - _current_session = None + _current_session: Optional[weakref.ref] = None @classmethod - def get_current(cls): + def get_current(cls) -> "Session": """! @brief Return the most recently created Session instance or a default Session. By default this method will return the most recently created Session object that is @@ -102,7 +110,14 @@ def get_current(cls): return Session(None) - def __init__(self, probe, auto_open=True, options=None, option_defaults=None, **kwargs): + def __init__( + self, + probe: Optional["DebugProbe"], + auto_open: bool = True, + options: Optional[Mapping[str, Any]] = None, + option_defaults: Optional[Mapping[str, Any]] = None, + **kwargs + ) -> None: """! @brief Session constructor. Creates a new session using the provided debug probe. Session options are merged from the @@ -125,20 +140,20 @@ def __init__(self, probe, auto_open=True, options=None, option_defaults=None, ** defaults for option if they are not set through any other method. @param kwargs Session options passed as keyword arguments. """ - super(Session, self).__init__() + super().__init__() Session._current_session = weakref.ref(self) self._probe = probe - self._closed = True - self._inited = False - self._user_script_namespace = None - self._user_script_proxy = None - self._delegate = None + self._closed: bool = True + self._inited: bool = False + self._user_script_namespace: Dict[str, Any] = {} + self._user_script_proxy: Optional[UserScriptDelegateProxy] = None + self._delegate: Optional[Any] = None self._auto_open = auto_open self._options = OptionsManager() - self._gdbservers = {} - self._probeserver = None + self._gdbservers: Dict[int, "GDBServer"] = {} + self._probeserver: Optional["DebugProbeServer"] = None # Set this session on the probe, if we were given a probe. if probe is not None: @@ -150,9 +165,9 @@ def __init__(self, probe, auto_open=True, options=None, option_defaults=None, ** # Init project directory. if self.options.get('project_dir') is None: - self._project_dir = os.environ.get('PYOCD_PROJECT_DIR') or os.getcwd() + self._project_dir: str = os.environ.get('PYOCD_PROJECT_DIR') or os.getcwd() else: - self._project_dir = os.path.abspath(os.path.expanduser(self.options.get('project_dir'))) + self._project_dir: str = os.path.abspath(os.path.expanduser(self.options.get('project_dir'))) LOG.debug("Project directory: %s", self.project_dir) # Load options from the config file. @@ -191,7 +206,7 @@ def __init__(self, probe, auto_open=True, options=None, option_defaults=None, ** # Ask the probe if it has an associated board, and if not then we create a generic one. self._board = probe.create_associated_board() or Board(self) - def _get_config(self): + def _get_config(self) -> Dict[str, Any]: # Load config file if one was provided via options, and no_config option was not set. if not self.options.get('no_config'): configPath = self.find_user_file('config_file', _CONFIG_FILE_NAMES) @@ -214,7 +229,7 @@ def _get_config(self): return {} - def find_user_file(self, option_name, filename_list): + def find_user_file(self, option_name: Optional[str], filename_list: List[str]) -> Optional[str]: """! @brief Search the project directory for a file. @retval None No matching file was found. @@ -241,14 +256,14 @@ def find_user_file(self, option_name, filename_list): return filePath - def _configure_logging(self): + def _configure_logging(self) -> None: """! @brief Load a logging config dict or file.""" # Get logging config that could have been loaded from the config file. - config = self.options.get('logging') + config_value = self.options.get('logging') # Allow logging setting to refer to another file. - if isinstance(config, str): - loggingConfigPath = self.find_user_file(None, [config]) + if isinstance(config_value, str): + loggingConfigPath = self.find_user_file(None, [config_value]) if loggingConfigPath is not None: try: @@ -256,8 +271,13 @@ def _configure_logging(self): config = yaml.safe_load(configFile) LOG.debug("Using logging configuration from: %s", config) except IOError as err: - LOG.warning("Error attempting to load logging config file '%s': %s", config, err) + LOG.warning("Error attempting to load logging config file '%s': %s", config_value, err) return + else: + LOG.warning("Logging config file '%s' does not exist", config_value) + return + else: + config = config_value if config is not None: # Stuff a version key if it's missing, to make it easier to use. @@ -276,74 +296,74 @@ def _configure_logging(self): LOG.warning("Error applying logging configuration: %s", err) @property - def is_open(self): + def is_open(self) -> bool: """! @brief Boolean of whether the session has been opened.""" return self._inited and not self._closed @property - def probe(self): + def probe(self) -> Optional["DebugProbe"]: """! @brief The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" instance.""" return self._probe @property - def board(self): + def board(self) -> Optional[Board]: """! @brief The @ref pyocd.board.board.Board "Board" object.""" return self._board @property - def target(self): + def target(self) -> Optional["SoCTarget"]: """! @brief The @ref pyocd.core.target.soc_target "SoCTarget" object representing the SoC. This is the @ref pyocd.core.target.soc_target "SoCTarget" instance owned by the board. """ - return self.board.target + return self.board.target if self.board else None @property - def options(self): + def options(self) -> OptionsManager: """! @brief The @ref pyocd.core.options_manager.OptionsManager "OptionsManager" object.""" return self._options @property - def project_dir(self): + def project_dir(self) -> str: """! @brief Path to the project directory.""" return self._project_dir @property - def delegate(self): + def delegate(self) -> Any: """! @brief An optional delegate object for customizing behaviour.""" return self._delegate @delegate.setter - def delegate(self, new_delegate): + def delegate(self, new_delegate: Any) -> None: """! @brief Setter for the `delegate` property.""" self._delegate = new_delegate @property - def user_script_proxy(self): + def user_script_proxy(self) -> Optional["UserScriptDelegateProxy"]: """! @brief The UserScriptDelegateProxy object for a loaded user script.""" return self._user_script_proxy @property - def gdbservers(self): + def gdbservers(self) -> Dict[int, "GDBServer"]: """! @brief Dictionary of core numbers to @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances.""" return self._gdbservers @property - def probeserver(self): + def probeserver(self) -> Optional["DebugProbeServer"]: """! @brief A @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer" instance.""" return self._probeserver @probeserver.setter - def probeserver(self, server): + def probeserver(self, server: "DebugProbeServer") -> None: """! @brief Setter for the `probeserver` property.""" self._probeserver = server @property - def log_tracebacks(self): + def log_tracebacks(self) -> bool: """! @brief Quick access to debug.traceback option since it is widely used.""" - return self.options.get('debug.traceback') + return cast(bool, self.options.get('debug.traceback')) - def __enter__(self): + def __enter__(self) -> "Session": assert self._probe is not None if self._auto_open: try: @@ -353,11 +373,11 @@ def __enter__(self): raise return self - def __exit__(self, type, value, traceback): + def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> bool: self.close() return False - def _init_user_script_namespace(self, user_script_path): + def _init_user_script_namespace(self, user_script_path: str) -> None: """! @brief Create the namespace dict used for user scripts. This initial namespace has only those objects that are available very early in the @@ -366,33 +386,37 @@ def _init_user_script_namespace(self, user_script_path): later on. """ import pyocd - import pyocd.flash.file_programmer + from . import target + from . import memory_map + from ..flash import file_programmer + from ..flash import eraser + from ..flash import loader self._user_script_namespace = { # Modules and classes 'pyocd': pyocd, - 'exceptions': pyocd.core.exceptions, - 'Error': pyocd.core.exceptions.Error, - 'TransferError': pyocd.core.exceptions.TransferError, - 'TransferFaultError': pyocd.core.exceptions.TransferFaultError, - 'Target': pyocd.core.target.Target, - 'State': pyocd.core.target.Target.State, - 'SecurityState': pyocd.core.target.Target.SecurityState, - 'BreakpointType': pyocd.core.target.Target.BreakpointType, - 'WatchpointType': pyocd.core.target.Target.WatchpointType, - 'VectorCatch': pyocd.core.target.Target.VectorCatch, - 'Event': pyocd.core.target.Target.Event, - 'RunType': pyocd.core.target.Target.RunType, - 'HaltReason': pyocd.core.target.Target.HaltReason, - 'ResetType': pyocd.core.target.Target.ResetType, - 'MemoryType': pyocd.core.memory_map.MemoryType, - 'MemoryMap': pyocd.core.memory_map.MemoryMap, - 'RamRegion': pyocd.core.memory_map.RamRegion, - 'RomRegion': pyocd.core.memory_map.RomRegion, - 'FlashRegion': pyocd.core.memory_map.FlashRegion, - 'DeviceRegion': pyocd.core.memory_map.DeviceRegion, - 'FileProgrammer': pyocd.flash.file_programmer.FileProgrammer, - 'FlashEraser': pyocd.flash.eraser.FlashEraser, - 'FlashLoader': pyocd.flash.loader.FlashLoader, + 'exceptions': exceptions, + 'Error': exceptions.Error, + 'TransferError': exceptions.TransferError, + 'TransferFaultError': exceptions.TransferFaultError, + 'Target': target.Target, + 'State': target.Target.State, + 'SecurityState': target.Target.SecurityState, + 'BreakpointType': target.Target.BreakpointType, + 'WatchpointType': target.Target.WatchpointType, + 'VectorCatch': target.Target.VectorCatch, + 'Event': target.Target.Event, + 'RunType': target.Target.RunType, + 'HaltReason': target.Target.HaltReason, + 'ResetType': target.Target.ResetType, + 'MemoryType': memory_map.MemoryType, + 'MemoryMap': memory_map.MemoryMap, + 'RamRegion': memory_map.RamRegion, + 'RomRegion': memory_map.RomRegion, + 'FlashRegion': memory_map.FlashRegion, + 'DeviceRegion': memory_map.DeviceRegion, + 'FileProgrammer': file_programmer.FileProgrammer, + 'FlashEraser': eraser.FlashEraser, + 'FlashLoader': loader.FlashLoader, # User script info '__name__': os.path.splitext(os.path.basename(user_script_path))[0], '__file__': user_script_path, @@ -402,18 +426,18 @@ def _init_user_script_namespace(self, user_script_path): 'LOG': logging.getLogger('pyocd.user_script'), } - def _update_user_script_namespace(self): + def _update_user_script_namespace(self) -> None: """! @brief Add objects available only after init to the user script namespace.""" if self._user_script_namespace is not None: self._user_script_namespace.update({ 'probe': self.probe, 'board': self.board, 'target': self.target, - 'dp': self.target.dp, - 'aps': self.target.aps, + 'dp': getattr(self.target, "dp", None), + 'aps': getattr(self.target, "aps", None), }) - def _load_user_script(self): + def _load_user_script(self) -> None: scriptPath = self.find_user_file('user_script', _USER_SCRIPT_NAMES) if scriptPath is not None: @@ -440,7 +464,7 @@ def _load_user_script(self): except IOError as err: LOG.warning("Error attempting to load user script '%s': %s", scriptPath, err) - def open(self, init_board=True): + def open(self, init_board: bool = True) -> None: """! @brief Open the session. This method does everything necessary to begin a debug session. It first loads the user @@ -468,7 +492,7 @@ def open(self, init_board=True): self._board.init() self._inited = True - def close(self): + def close(self) -> None: """! @brief Close the session. Uninits the board and disconnects then closes the probe. @@ -477,10 +501,13 @@ def close(self): return self._closed = True + # Should not have been able to open the session with either _probe or _board being None. + assert (self._probe is not None) and (self._board is not None) + LOG.debug("uninit session %s", self) if self._inited: try: - self.board.uninit() + self._board.uninit() self._inited = False except exceptions.Error: LOG.error("exception during board uninit:", exc_info=self.log_tracebacks) @@ -495,31 +522,31 @@ def close(self): except exceptions.Error: LOG.error("probe exception during close:", exc_info=self.log_tracebacks) -class UserScriptFunctionProxy(object): +class UserScriptFunctionProxy: """! @brief Proxy for user script functions. This proxy makes arguments to user script functions optional. """ - def __init__(self, fn): + def __init__(self, fn) -> None: self._fn = fn self._spec = getfullargspec(fn) - def __call__(self, **kwargs): + def __call__(self, **kwargs) -> Any: args = {} for arg in self._spec.args: if arg in kwargs: args[arg] = kwargs[arg] self._fn(**args) -class UserScriptDelegateProxy(object): +class UserScriptDelegateProxy: """! @brief Delegate proxy for user scripts.""" - def __init__(self, script_namespace): - super(UserScriptDelegateProxy, self).__init__() + def __init__(self, script_namespace: Dict) -> None: + super().__init__() self._script = script_namespace - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if name in self._script: fn = self._script[name] return UserScriptFunctionProxy(fn) diff --git a/pyocd/core/soc_target.py b/pyocd/core/soc_target.py index b61bd4d42..1815c2782 100644 --- a/pyocd/core/soc_target.py +++ b/pyocd/core/soc_target.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2020 Arm Limited +# Copyright (c) Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,18 +16,27 @@ # limitations under the License. import logging +from typing import (Callable, Dict, List, Optional, overload, Sequence, Union, TYPE_CHECKING) +from typing_extensions import Literal -from .target import Target +from .target import (Target, TargetGraphNode) +from .core_target import CoreTarget from ..flash.eraser import FlashEraser from ..debug.cache import CachingDebugContext from ..debug.elf.elf import ELFBinaryFile from ..debug.elf.elf_reader import ElfReaderContext -from ..utility.graph import GraphNode from ..utility.sequencer import CallSequence +if TYPE_CHECKING: + from .session import Session + from .memory_map import MemoryMap + from .core_registers import (CoreRegistersIndex, CoreRegisterNameOrNumberType, CoreRegisterValueType) + from ..debug.context import DebugContext + from ..debug.breakpoints.provider import Breakpoint + LOG = logging.getLogger(__name__) -class SoCTarget(Target, GraphNode): +class SoCTarget(TargetGraphNode): """! @brief Represents a microcontroller system-on-chip. An instance of this class is the root of the chip-level object graph. It has child @@ -41,40 +51,53 @@ class SoCTarget(Target, GraphNode): VENDOR = "Generic" - def __init__(self, session, memory_map=None): - Target.__init__(self, session, memory_map) - GraphNode.__init__(self) - self.vendor = self.VENDOR - self.part_families = getattr(self, 'PART_FAMILIES', []) - self.part_number = getattr(self, 'PART_NUMBER', self.__class__.__name__) - self._cores = {} - self._selected_core = None + def __init__(self, session: "Session", memory_map: Optional["MemoryMap"] = None) -> None: + super().__init__(session, memory_map) + self.vendor: str = self.VENDOR + self.part_families: List[str] = getattr(self, 'PART_FAMILIES', []) + self.part_number: str = getattr(self, 'PART_NUMBER', self.__class__.__name__) + self._cores: Dict[int, CoreTarget] = {} + self._selected_core: int = -1 self._new_core_num = 0 self._elf = None @property - def cores(self): + def cores(self) -> Dict[int, CoreTarget]: return self._cores @property - def selected_core(self): - if self._selected_core is None: + def selected_core(self) -> Optional[CoreTarget]: + """@brief Get the selected CPU core object.""" + if self._selected_core == -1: return None return self.cores[self._selected_core] @selected_core.setter - def selected_core(self, core_number): + def selected_core(self, core_number: int) -> None: # type:ignore # core_number int type is not the same + # as selected_core property return type + """@brief Set the selected CPU core object.""" if core_number not in self.cores: - raise ValueError("invalid core number %d" % core_number) + raise ValueError("invalid core number %d" % core_number) # TODO should be a KeyError LOG.debug("selected core #%d" % core_number) self._selected_core = core_number @property - def elf(self): + def selected_core_or_raise(self) -> CoreTarget: + """@brief Get the selected CPU core object. + + Like selected_core but will raise an exception if no core is selected rather than returning None. + @exception KeyError The selected_core property is None. + """ + if self._selected_core == -1: + raise KeyError("SoCTarget has no selected core") + return self.cores[self._selected_core] + + @property + def elf(self) -> Optional[ELFBinaryFile]: return self._elf @elf.setter - def elf(self, filename): + def elf(self, filename: str) -> None: # type:ignore # filename str type is not same as elf property return type if filename is None: self._elf = None else: @@ -86,27 +109,28 @@ def elf(self, filename): ElfReaderContext(self.cores[core_number].get_target_context(), self._elf)) @property - def supported_security_states(self): - return self.selected_core.supported_security_states + def supported_security_states(self) -> Sequence[Target.SecurityState]: + return self.selected_core_or_raise.supported_security_states @property - def core_registers(self): - return self.selected_core.core_registers + def core_registers(self) -> "CoreRegistersIndex": + return self.selected_core_or_raise.core_registers - def add_core(self, core): + def add_core(self, core: CoreTarget) -> None: core.delegate = self.delegate core.set_target_context(CachingDebugContext(core)) self.cores[core.core_number] = core self.add_child(core) - if self._selected_core is None: - self._selected_core = core.core_number + # Select first added core. + if self.selected_core is None: + self.selected_core = core.core_number - def create_init_sequence(self): + def create_init_sequence(self) -> CallSequence: # Return an empty call sequence. The subclass must override this. return CallSequence() - def init(self): + def init(self) -> None: # If we don't have a delegate installed yet but there is a session delegate, use it. if (self.delegate is None) and (self.session.delegate is not None): self.delegate = self.session.delegate @@ -117,14 +141,14 @@ def init(self): seq.invoke() self.call_delegate('did_init_target', target=self) - def post_connect_hook(self): + def post_connect_hook(self) -> None: """! @brief Hook function called after post_connect init task. This hook lets the target subclass configure the target as necessary. """ pass - def disconnect(self, resume=True): + def disconnect(self, resume: bool = True) -> None: self.session.notify(Target.Event.PRE_DISCONNECT, self) self.call_delegate('will_disconnect', target=self, resume=resume) for core in self.cores.values(): @@ -132,109 +156,128 @@ def disconnect(self, resume=True): self.call_delegate('did_disconnect', target=self, resume=resume) @property - def run_token(self): - return self.selected_core.run_token + def run_token(self) -> int: + return self.selected_core_or_raise.run_token - def halt(self): - return self.selected_core.halt() + def halt(self) -> None: + return self.selected_core_or_raise.halt() - def step(self, disable_interrupts=True, start=0, end=0, hook_cb=None): - return self.selected_core.step(disable_interrupts, start, end, hook_cb) + def step(self, disable_interrupts: bool = True, start: int = 0, end: int = 0, + hook_cb: Optional[Callable[[], bool]] = None) -> None: + return self.selected_core_or_raise.step(disable_interrupts, start, end, hook_cb) - def resume(self): - return self.selected_core.resume() + def resume(self) -> None: + return self.selected_core_or_raise.resume() - def mass_erase(self): + def mass_erase(self) -> None: if not self.call_delegate('mass_erase', target=self): # The default mass erase implementation is to simply perform a chip erase. FlashEraser(self.session, FlashEraser.Mode.CHIP).erase() - return True - def write_memory(self, addr, value, transfer_size=32): - return self.selected_core.write_memory(addr, value, transfer_size) + def write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: + return self.selected_core_or_raise.write_memory(addr, data, transfer_size) + + @overload + def read_memory(self, addr: int, transfer_size: int = 32) -> int: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int = 32, now: Literal[True] = True) -> int: + ... - def read_memory(self, addr, transfer_size=32, now=True): - return self.selected_core.read_memory(addr, transfer_size, now) + @overload + def read_memory(self, addr: int, transfer_size: int, now: Literal[False]) -> Callable[[], int]: + ... - def write_memory_block8(self, addr, value): - return self.selected_core.write_memory_block8(addr, value) + @overload + def read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Callable[[], int]]: + ... - def write_memory_block32(self, addr, data): - return self.selected_core.write_memory_block32(addr, data) + def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: + return self.selected_core_or_raise.read_memory(addr, transfer_size, now) - def read_memory_block8(self, addr, size): - return self.selected_core.read_memory_block8(addr, size) + def write_memory_block8(self, addr: int, data: Sequence[int]) -> None: + return self.selected_core_or_raise.write_memory_block8(addr, data) - def read_memory_block32(self, addr, size): - return self.selected_core.read_memory_block32(addr, size) + def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: + return self.selected_core_or_raise.write_memory_block32(addr, data) - def read_core_register(self, id): - return self.selected_core.read_core_register(id) + def read_memory_block8(self, addr: int, size: int) -> Sequence[int]: + return self.selected_core_or_raise.read_memory_block8(addr, size) - def write_core_register(self, id, data): - return self.selected_core.write_core_register(id, data) + def read_memory_block32(self, addr: int, size) -> Sequence[int]: + return self.selected_core_or_raise.read_memory_block32(addr, size) - def read_core_register_raw(self, reg): - return self.selected_core.read_core_register_raw(reg) + def read_core_register(self, id: "CoreRegisterNameOrNumberType") -> "CoreRegisterValueType": + return self.selected_core_or_raise.read_core_register(id) - def read_core_registers_raw(self, reg_list): - return self.selected_core.read_core_registers_raw(reg_list) + def write_core_register(self, id: "CoreRegisterNameOrNumberType", data: "CoreRegisterValueType") -> None: + return self.selected_core_or_raise.write_core_register(id, data) - def write_core_register_raw(self, reg, data): - self.selected_core.write_core_register_raw(reg, data) + def read_core_register_raw(self, reg: "CoreRegisterNameOrNumberType") -> int: + return self.selected_core_or_raise.read_core_register_raw(reg) - def write_core_registers_raw(self, reg_list, data_list): - self.selected_core.write_core_registers_raw(reg_list, data_list) + def read_core_registers_raw(self, reg_list: Sequence["CoreRegisterNameOrNumberType"]) -> List[int]: + return self.selected_core_or_raise.read_core_registers_raw(reg_list) - def find_breakpoint(self, addr): - return self.selected_core.find_breakpoint(addr) + def write_core_register_raw(self, reg: "CoreRegisterNameOrNumberType", data: int) -> None: + self.selected_core_or_raise.write_core_register_raw(reg, data) - def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): - return self.selected_core.set_breakpoint(addr, type) + def write_core_registers_raw(self, reg_list: Sequence["CoreRegisterNameOrNumberType"], data_list: Sequence[int]) -> None: + self.selected_core_or_raise.write_core_registers_raw(reg_list, data_list) - def get_breakpoint_type(self, addr): - return self.selected_core.get_breakpoint_type(addr) + def find_breakpoint(self, addr: int) -> Optional["Breakpoint"]: + return self.selected_core_or_raise.find_breakpoint(addr) - def remove_breakpoint(self, addr): - return self.selected_core.remove_breakpoint(addr) + def set_breakpoint(self, addr: int, type: Target.BreakpointType = Target.BreakpointType.AUTO) -> bool: + return self.selected_core_or_raise.set_breakpoint(addr, type) - def set_watchpoint(self, addr, size, type): - return self.selected_core.set_watchpoint(addr, size, type) + def get_breakpoint_type(self, addr: int) -> Optional[Target.BreakpointType]: + return self.selected_core_or_raise.get_breakpoint_type(addr) - def remove_watchpoint(self, addr, size, type): - return self.selected_core.remove_watchpoint(addr, size, type) + def remove_breakpoint(self, addr: int) -> None: + return self.selected_core_or_raise.remove_breakpoint(addr) - def reset(self, reset_type=None): + def set_watchpoint(self, addr: int, size: int, type: Target.WatchpointType) -> bool: + return self.selected_core_or_raise.set_watchpoint(addr, size, type) + + def remove_watchpoint(self, addr: int, size: int, type: Target.WatchpointType) -> None: + return self.selected_core_or_raise.remove_watchpoint(addr, size, type) + + def reset(self, reset_type: Optional[Target.ResetType] = None) -> None: # Use the probe to reset to perform a hardware reset if there is not a core. if self.selected_core is None: # Use the probe to reset. (We can't use the DP here because that's a class layering violation; # the DP is only created by the CoreSightTarget subclass.) + assert self.session.probe self.session.probe.reset() return - self.selected_core.reset(reset_type) + self.selected_core_or_raise.reset(reset_type) - def reset_and_halt(self, reset_type=None): - return self.selected_core.reset_and_halt(reset_type) + def reset_and_halt(self, reset_type: Optional[Target.ResetType] = None) -> None: + return self.selected_core_or_raise.reset_and_halt(reset_type) - def get_state(self): - return self.selected_core.get_state() + def get_state(self) -> Target.State: + return self.selected_core_or_raise.get_state() - def get_security_state(self): - return self.selected_core.get_security_state() + def get_security_state(self) -> Target.SecurityState: + return self.selected_core_or_raise.get_security_state() - def get_halt_reason(self): - return self.selected_core.get_halt_reason() + def get_halt_reason(self) -> Target.HaltReason: + return self.selected_core_or_raise.get_halt_reason() - def set_vector_catch(self, enableMask): - return self.selected_core.set_vector_catch(enableMask) + def set_vector_catch(self, enable_mask: int) -> None: + return self.selected_core_or_raise.set_vector_catch(enable_mask) - def get_vector_catch(self): - return self.selected_core.get_vector_catch() + def get_vector_catch(self) -> int: + return self.selected_core_or_raise.get_vector_catch() - def get_target_context(self, core=None): - if core is None: - core = self._selected_core - return self.cores[core].get_target_context() + def get_target_context(self, core: Optional[int] = None) -> "DebugContext": + if core is not None: + core_obj = self.cores[core] + else: + core_obj = self.selected_core_or_raise + return core_obj.get_target_context() def trace_start(self): self.call_delegate('trace_start', target=self, mode=0) diff --git a/pyocd/core/target.py b/pyocd/core/target.py index 98710449c..47c076b22 100644 --- a/pyocd/core/target.py +++ b/pyocd/core/target.py @@ -16,9 +16,20 @@ # limitations under the License. from enum import Enum +from typing import (Any, Callable, List, Optional, Sequence, TYPE_CHECKING) from .memory_interface import MemoryInterface from .memory_map import MemoryMap +from ..utility.graph import GraphNode + +if TYPE_CHECKING: + from .session import Session + from .core_registers import (CoreRegistersIndex, CoreRegisterNameOrNumberType, CoreRegisterValueType) + from ..debug.breakpoints.provider import Breakpoint + from ..debug.context import DebugContext + from ..debug.svd.loader import SVDFile + from ..debug.svd.model import SVDDevice + from ..utility.sequencer import CallSequence class Target(MemoryInterface): @@ -174,31 +185,31 @@ class HaltReason(Enum): ## PMU event. v8.1-M only. PMU = 7 - def __init__(self, session, memory_map=None): + def __init__(self, session: "Session", memory_map: Optional[MemoryMap] = None) -> None: self._session = session - self._delegate = None + self._delegate: Any = None # Make a target-specific copy of the memory map. This is safe to do without locking # because the memory map may not be mutated until target initialization. self.memory_map = memory_map.clone() if memory_map else MemoryMap() - self._svd_location = None - self._svd_device = None + self._svd_location: Optional[SVDFile] = None + self._svd_device: Optional[SVDDevice] = None @property - def session(self): + def session(self) -> "Session": return self._session @property - def delegate(self): + def delegate(self) -> Any: return self._delegate @delegate.setter - def delegate(self, the_delegate): + def delegate(self, the_delegate: Any) -> None: self._delegate = the_delegate - def delegate_implements(self, method_name): + def delegate_implements(self, method_name: str) -> bool: return (self._delegate is not None) and (hasattr(self._delegate, method_name)) - def call_delegate(self, method_name, *args, **kwargs): + def call_delegate(self, method_name: str, *args, **kwargs) -> None: if self.delegate_implements(method_name): return getattr(self._delegate, method_name)(*args, **kwargs) else: @@ -206,113 +217,122 @@ def call_delegate(self, method_name, *args, **kwargs): return None @property - def svd_device(self): + def svd_device(self) -> Optional["SVDDevice"]: return self._svd_device @property - def supported_security_states(self): + def supported_security_states(self) -> Sequence[SecurityState]: raise NotImplementedError() @property - def core_registers(self): + def core_registers(self) -> "CoreRegistersIndex": raise NotImplementedError() - def is_locked(self): + def is_locked(self) -> bool: return False - def create_init_sequence(self): + def create_init_sequence(self) -> "CallSequence": raise NotImplementedError() - def init(self): + def init(self) -> None: raise NotImplementedError() - def disconnect(self, resume=True): + def disconnect(self, resume: bool = True) -> None: pass - def flush(self): - self.session.probe.flush() + def flush(self) -> None: + if self.session.probe: + self.session.probe.flush() - def halt(self): + def halt(self) -> None: raise NotImplementedError() - def step(self, disable_interrupts=True, start=0, end=0, hook_cb=None): + def step(self, disable_interrupts: bool = True, start: int = 0, end: int = 0, + hook_cb: Optional[Callable[[], bool]] = None) -> None: raise NotImplementedError() - def resume(self): + def resume(self) -> None: raise NotImplementedError() - def mass_erase(self): + def mass_erase(self) -> None: raise NotImplementedError() - def read_core_register(self, id): + def read_core_register(self, id: "CoreRegisterNameOrNumberType") -> "CoreRegisterValueType": raise NotImplementedError() - def write_core_register(self, id, data): + def write_core_register(self, id: "CoreRegisterNameOrNumberType", data: "CoreRegisterValueType") -> None: raise NotImplementedError() - def read_core_register_raw(self, reg): + def read_core_register_raw(self, reg: "CoreRegisterNameOrNumberType") -> int: raise NotImplementedError() - def read_core_registers_raw(self, reg_list): + def read_core_registers_raw(self, reg_list: Sequence["CoreRegisterNameOrNumberType"]) -> List[int]: raise NotImplementedError() - def write_core_register_raw(self, reg, data): + def write_core_register_raw(self, reg: "CoreRegisterNameOrNumberType", data: int) -> None: raise NotImplementedError() - def write_core_registers_raw(self, reg_list, data_list): + def write_core_registers_raw(self, reg_list: Sequence["CoreRegisterNameOrNumberType"], data_list: Sequence[int]) -> None: raise NotImplementedError() - def find_breakpoint(self, addr): + def find_breakpoint(self, addr: int) -> Optional["Breakpoint"]: raise NotImplementedError() - def set_breakpoint(self, addr, type=BreakpointType.AUTO): + def set_breakpoint(self, addr: int, type: BreakpointType = BreakpointType.AUTO) -> bool: raise NotImplementedError() - def get_breakpoint_type(self, addr): + def get_breakpoint_type(self, addr: int) -> Optional[BreakpointType]: raise NotImplementedError() - def remove_breakpoint(self, addr): + def remove_breakpoint(self, addr: int) -> None: raise NotImplementedError() - def set_watchpoint(self, addr, size, type): + def set_watchpoint(self, addr: int, size: int, type: WatchpointType) -> bool: raise NotImplementedError() - def remove_watchpoint(self, addr, size, type): + def remove_watchpoint(self, addr: int, size: int, type: WatchpointType) -> None: raise NotImplementedError() - def reset(self, reset_type=None): + def reset(self, reset_type: Optional[ResetType] = None) -> None: raise NotImplementedError() - def reset_and_halt(self, reset_type=None): + def reset_and_halt(self, reset_type: Optional[ResetType] = None) -> None: raise NotImplementedError() - def get_state(self): + def get_state(self) -> State: raise NotImplementedError() - def get_security_state(self): + def get_security_state(self) -> SecurityState: raise NotImplementedError() - def get_halt_reason(self): + def get_halt_reason(self) -> HaltReason: raise NotImplementedError() @property - def run_token(self): + def run_token(self) -> int: return 0 - def is_running(self): + def is_running(self) -> bool: return self.get_state() == Target.State.RUNNING - def is_halted(self): + def is_halted(self) -> bool: return self.get_state() == Target.State.HALTED - def get_memory_map(self): + def get_memory_map(self) -> MemoryMap: return self.memory_map - def set_vector_catch(self, enableMask): + def set_vector_catch(self, enable_mask: int) -> None: raise NotImplementedError() - def get_vector_catch(self): + def get_vector_catch(self) -> int: raise NotImplementedError() - def get_target_context(self, core=None): + def get_target_context(self, core: Optional[int] = None) -> "DebugContext": raise NotImplementedError() + +class TargetGraphNode(Target, GraphNode): + """@brief Abstract class for a target that is a graph node.""" + + def __init__(self, session: "Session", memory_map: Optional[MemoryMap] = None) -> None: + Target.__init__(self, session, memory_map) + GraphNode.__init__(self) diff --git a/pyocd/core/target_delegate.py b/pyocd/core/target_delegate.py index 64063319d..333a8ff80 100644 --- a/pyocd/core/target_delegate.py +++ b/pyocd/core/target_delegate.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2019 Arm Limited +# COpyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,17 +15,34 @@ # See the License for the specific language governing permissions and # limitations under the License. -class TargetDelegateInterface(object): +from typing import (Optional, TYPE_CHECKING) + +if TYPE_CHECKING: + from .session import Session + from .soc_target import SoCTarget + from .target import Target + from ..board.board import Board + from ..utility.sequencer import CallSequence + +## @brief Return type for some delegate methods. +# +# For certain delegate method, the delegate can reply with a boolean or None, where True means that +# it handled the actions and no further action is to be performed, and False or None means to continue +# processing. +DelegateResult = Optional[bool] + +class TargetDelegateInterface: """! @brief Abstract class defining the delegate interface for targets. Note that delegates don't actually have to derive from this class due to Python's - dynamic method dispatching. + dynamic method dispatching. The primary purpose of this class is for documentation + and type checking. """ - def __init__(self, session): + def __init__(self, session: "Session") -> None: self._session = session - def will_connect(self, board): + def will_connect(self, board: "Board") -> None: """! @brief Pre-init hook for the board. @param self @param board A Board instance that is about to be initialized. @@ -32,7 +50,7 @@ def will_connect(self, board): """ pass - def did_connect(self, board): + def did_connect(self, board: "Board") -> None: """! @brief Post-initialization hook for the board. @param self @param board A Board instance. @@ -40,25 +58,25 @@ def did_connect(self, board): """ pass - def will_init_target(self, target, init_sequence): + def will_init_target(self, target: "SoCTarget", init_sequence: "CallSequence") -> None: """! @brief Hook to review and modify init call sequence prior to execution. @param self - @param target A CoreSightTarget object about to be initialized. + @param target An SoCTarget object about to be initialized. @param init_sequence The CallSequence that will be invoked. Because call sequences are mutable, this parameter can be modified before return to change the init calls. @return Ignored. """ pass - def did_init_target(self, target): + def did_init_target(self, target: "SoCTarget") -> None: """! @brief Post-initialization hook. @param self - @param target A CoreSightTarget. + @param target An SoCTarget. @return Ignored. """ pass - def will_start_debug_core(self, core): + def will_start_debug_core(self, core: "Target") -> DelegateResult: """! @brief Hook to enable debug for the given core. @param self @param core A CortexM object about to be initialized. @@ -67,7 +85,7 @@ def will_start_debug_core(self, core): """ pass - def did_start_debug_core(self, core): + def did_start_debug_core(self, core: "Target") -> None: """! @brief Post-initialization hook. @param self @param core A CortexM object. @@ -75,7 +93,7 @@ def did_start_debug_core(self, core): """ pass - def will_stop_debug_core(self, core): + def will_stop_debug_core(self, core: "Target") -> DelegateResult: """! @brief Pre-cleanup hook for the core. @param self @param core A CortexM object. @@ -84,7 +102,7 @@ def will_stop_debug_core(self, core): """ pass - def did_stop_debug_core(self, core): + def did_stop_debug_core(self, core: "Target") -> None: """! @brief Post-cleanup hook for the core. @param self @param core A CortexM object. @@ -92,7 +110,7 @@ def did_stop_debug_core(self, core): """ pass - def will_disconnect(self, target, resume): + def will_disconnect(self, target: "SoCTarget", resume: bool) -> None: """! @brief Pre-disconnect hook. @param self @param target Either a CoreSightTarget or CortexM object. @@ -101,7 +119,7 @@ def will_disconnect(self, target, resume): """ pass - def did_disconnect(self, target, resume): + def did_disconnect(self, target: "SoCTarget", resume: bool) -> None: """! @brief Post-disconnect hook. @param self @param target Either a CoreSightTarget or CortexM object. @@ -109,7 +127,7 @@ def did_disconnect(self, target, resume): @return Ignored.""" pass - def will_reset(self, core, reset_type): + def will_reset(self, core: "Target", reset_type: "Target.ResetType") -> DelegateResult: """! @brief Pre-reset hook. @param self @param core A CortexM instance. @@ -119,7 +137,7 @@ def will_reset(self, core, reset_type): """ pass - def did_reset(self, core, reset_type): + def did_reset(self, core: "Target", reset_type: "Target.ResetType") -> None: """! @brief Post-reset hook. @param self @param core A CortexM instance. @@ -128,7 +146,7 @@ def did_reset(self, core, reset_type): """ pass - def set_reset_catch(self, core, reset_type): + def set_reset_catch(self, core: "Target", reset_type: "Target.ResetType") -> DelegateResult: """! @brief Hook to prepare target for halting on reset. @param self @param core A CortexM instance. @@ -138,7 +156,7 @@ def set_reset_catch(self, core, reset_type): """ pass - def clear_reset_catch(self, core, reset_type): + def clear_reset_catch(self, core: "Target", reset_type: "Target.ResetType") -> None: """! @brief Hook to clean up target after a reset and halt. @param self @param core A CortexM instance. @@ -147,7 +165,7 @@ def clear_reset_catch(self, core, reset_type): """ pass - def mass_erase(self, target): + def mass_erase(self, target: "SoCTarget") -> DelegateResult: """! @brief Hook to override mass erase. @param self @param target A CoreSightTarget object. @@ -157,7 +175,7 @@ def mass_erase(self, target): """ pass - def trace_start(self, target, mode): + def trace_start(self, target: "SoCTarget", mode: int) -> None: """! @brief Hook to prepare for tracing the target. @param self @param target A CoreSightTarget object. @@ -166,7 +184,7 @@ def trace_start(self, target, mode): """ pass - def trace_stop(self, target, mode): + def trace_stop(self, target: "SoCTarget", mode: int) -> None: """! @brief Hook to clean up after tracing the target. @param self @param target A CoreSightTarget object. diff --git a/pyocd/coresight/coresight_target.py b/pyocd/coresight/coresight_target.py index 0bd6aca4e..e9ae95e1d 100644 --- a/pyocd/coresight/coresight_target.py +++ b/pyocd/coresight/coresight_target.py @@ -242,6 +242,17 @@ def check_for_cores(self): LOG.error("No cores were discovered!") else: raise exceptions.DebugError("No cores were discovered!") + + def disconnect(self, resume: bool = True) -> None: + # Override this from SoCTarget so we can call power_down_debug() on the DP, which is + # created in this class and thus not (safely) accessible to SoCTarget. + self.session.notify(Target.Event.PRE_DISCONNECT, self) + self.call_delegate('will_disconnect', target=self, resume=resume) + for core in self.cores.values(): + core.disconnect(resume) + self.dp.power_down_debug() + self.call_delegate('did_disconnect', target=self, resume=resume) + @property def irq_table(self): if (self._irq_table is None): diff --git a/pyocd/coresight/cortex_m.py b/pyocd/coresight/cortex_m.py index 96aef8357..3d64068f4 100644 --- a/pyocd/coresight/cortex_m.py +++ b/pyocd/coresight/cortex_m.py @@ -19,6 +19,7 @@ from time import sleep from ..core.target import Target +from ..core.core_target import CoreTarget from ..core import exceptions from ..core.core_registers import CoreRegistersIndex from ..utility import (cmdline, timeout) @@ -35,7 +36,7 @@ LOG = logging.getLogger(__name__) -class CortexM(Target, CoreSightCoreComponent): +class CortexM(CoreTarget, CoreSightCoreComponent): """! @brief CoreSight component for a v6-M or v7-M Cortex-M core. This class has basic functions to access a Cortex-M core: diff --git a/pyocd/coresight/cortex_m_core_registers.py b/pyocd/coresight/cortex_m_core_registers.py index 1a20816fe..61663a728 100644 --- a/pyocd/coresight/cortex_m_core_registers.py +++ b/pyocd/coresight/cortex_m_core_registers.py @@ -16,9 +16,13 @@ # limitations under the License. import logging +from typing import TYPE_CHECKING, cast from ..core.core_registers import CoreRegisterInfo +if TYPE_CHECKING: + from ..core.core_registers import CoreRegisterNameOrNumberType + LOG = logging.getLogger(__name__) # Program Status Register @@ -32,7 +36,8 @@ class CortexMCoreRegisterInfo(CoreRegisterInfo): For most registers, the index is the value written to the DCRSR register to read or write the core register. Other core registers not directly supported by DCRSR have special index values that are interpreted by the helper methods on this class and the core register read/write code in CortexM - and its subclasses. + and its subclasses. These artificial register index values and how they are interpreted are documented + in the register definitions in CoreRegisterGroups. """ ## Map of register name to info. @@ -42,7 +47,7 @@ class CortexMCoreRegisterInfo(CoreRegisterInfo): _INDEX_MAP = {} @classmethod - def register_name_to_index(cls, reg): + def register_name_to_index(cls, reg: "CoreRegisterNameOrNumberType") -> int: """! @brief Convert a register name to integer register index. @param reg Either a register name or internal register number. @return Internal register number. @@ -55,23 +60,32 @@ def register_name_to_index(cls, reg): raise KeyError('unknown core register name %s' % reg) from err return reg + @classmethod + def get(cls, reg: "CoreRegisterNameOrNumberType") -> "CortexMCoreRegisterInfo": + """! @brief Return the CoreRegisterInfo instance for a register. + @param reg Either a register name or internal register number. + @return CoreRegisterInfo + @exception KeyError + """ + return cast(CortexMCoreRegisterInfo, super().get(reg)) + @property - def is_fpu_register(self): + def is_fpu_register(self) -> bool: """! @brief Returns true for FPSCR, SP, or DP registers.""" return self.index == 33 or self.is_float_register @property - def is_cfbp_subregister(self): + def is_cfbp_subregister(self) -> bool: """! @brief Whether the register is one of those combined into CFBP by the DCSR.""" return -4 <= self.index <= -1 @property - def is_psr_subregister(self): + def is_psr_subregister(self) -> bool: """! @brief Whether the register is a combination of xPSR fields.""" return 0x100 <= self.index <= 0x107 @property - def psr_mask(self): + def psr_mask(self) -> int: """! @brief Generate a PSR mask based on bottom 3 bits of a MRS SYSm value""" mask = 0 if (self.index & 1) != 0: @@ -252,6 +266,6 @@ class CoreRegisterGroups: + CoreRegisterGroups.V81M_MVE_ONLY + CoreRegisterGroups.VFP_V5) -def index_for_reg(name): +def index_for_reg(name: str) -> int: """! @brief Utility to easily convert register name to index.""" return CortexMCoreRegisterInfo.get(name).index diff --git a/setup.cfg b/setup.cfg index 4efc2c38d..2d4dff981 100644 --- a/setup.cfg +++ b/setup.cfg @@ -63,6 +63,7 @@ install_requires = pyusb>=1.2.1,<2.0 pyyaml>=6.0,<7.0 six>=1.15.0,<2.0 + typing-extensions>=4.0,<5.0 [options.extras_require] test = From eb9ea158aeb30083fa6304de9b50b51b01bb0036 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 13 Jun 2021 15:34:24 -0500 Subject: [PATCH 069/123] types: annotate DebugProbe, CMSISDAPProbe, Board. - Also: STLink type error fix. --- pyocd/board/board.py | 63 ++++++++++---- pyocd/board/board_ids.py | 12 +-- pyocd/probe/cmsis_dap_probe.py | 146 +++++++++++++++++++++++---------- pyocd/probe/debug_probe.py | 139 ++++++++++++++++++++++--------- pyocd/probe/stlink/stlink.py | 2 +- 5 files changed, 256 insertions(+), 106 deletions(-) diff --git a/pyocd/board/board.py b/pyocd/board/board.py index 3be879f3d..12d601105 100644 --- a/pyocd/board/board.py +++ b/pyocd/board/board.py @@ -16,20 +16,45 @@ # limitations under the License. import logging +from typing import (Any, Optional, TYPE_CHECKING) from ..core import exceptions from ..target import TARGET from ..target.pack import pack_target from ..utility.graph import GraphNode +if TYPE_CHECKING: + from ..core.session import Session + LOG = logging.getLogger(__name__) class Board(GraphNode): - """! - @brief This class associates a target and flash to create a board. + """!@brief Represents the board containing the target and associated components. + + The board is the root of the runtime object graph. """ - def __init__(self, session, target=None): - super(Board, self).__init__() + def __init__(self, + session: "Session", + target: Optional[str] = None, + ) -> None: + """@brief Constructor + + This method is responsible for selecting the SoCTarget subclass for the SoC, implementing the target + type support. There are several possible sources for the target type name, with differing levels of + priority. + + 1. `target` parameter + 2. `target_override` session option + 3. `soft_target` parameter + 4. Last resort `cortex_m` target type. If this type is used, a warning is printed (unless the + `warning.cortex_m_default` option is disabled). + + @param self + @param session The session instance that owns us. + @param target Target type name to use. If this parameter is set, it overrides all other sources of the + target type. + """ + super().__init__() # Use the session option if no target type was given to us. if target is None: @@ -85,7 +110,7 @@ def __init__(self, session, target=None): self.add_child(self.target) - def init(self): + def init(self) -> None: """! @brief Initialize the board.""" # If we don't have a delegate set yet, see if there is a session delegate. if (self.delegate is None) and (self.session.delegate is not None): @@ -103,7 +128,7 @@ def init(self): if (self.delegate is not None) and hasattr(self.delegate, 'did_connect'): self.delegate.did_connect(board=self) - def uninit(self): + def uninit(self) -> None: """! @brief Uninitialize the board.""" if self._inited: LOG.debug("uninit board %s", self) @@ -115,34 +140,44 @@ def uninit(self): LOG.error("link exception during target disconnect: %s", err, exc_info=self._session.log_tracebacks) @property - def session(self): + def session(self) -> "Session": + """@brief The session that owns this board instance.""" return self._session @property - def delegate(self): + def delegate(self) -> Any: + """@brief Delegate object that will be inherited by the SoCTarget.""" return self._delegate @delegate.setter - def delegate(self, the_delegate): + def delegate(self, the_delegate: Any) -> None: + """@brief Set the delegate object that will be inherited by the SoCTarget.""" self._delegate = the_delegate @property - def unique_id(self): + def unique_id(self) -> str: + """@brief The probe's unique ID. + + Deprecated. Use the probe's `unique_id` property instead. + """ + assert self.session.probe return self.session.probe.unique_id @property - def target_type(self): + def target_type(self) -> str: + """@brief Target type name.""" return self._target_type @property - def test_binary(self): + def test_binary(self) -> Optional[str]: return self._test_binary @property - def name(self): + def name(self) -> str: return "generic" @property - def description(self): + def description(self) -> str: + assert self.session.probe return "Generic board via " + self.session.probe.vendor_name + " " \ + self.session.probe.product_name + " [" + self.target_type + "]" diff --git a/pyocd/board/board_ids.py b/pyocd/board/board_ids.py index d7ca23be6..e44c97c6b 100644 --- a/pyocd/board/board_ids.py +++ b/pyocd/board/board_ids.py @@ -15,11 +15,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -class BoardInfo(object): - def __init__(self, name, target, binary): - self.name = name - self.target = target - self.binary = binary +from typing import (NamedTuple, Optional) + +class BoardInfo(NamedTuple): + name: str + target: str + binary: Optional[str] = None + vendor: Optional[str] = None BOARD_ID_TO_INFO = { # Note: please keep board list sorted by ID! diff --git a/pyocd/probe/cmsis_dap_probe.py b/pyocd/probe/cmsis_dap_probe.py index 14d77191c..6939c3ae2 100644 --- a/pyocd/probe/cmsis_dap_probe.py +++ b/pyocd/probe/cmsis_dap_probe.py @@ -17,7 +17,10 @@ from time import sleep import logging -from typing import (Dict, Tuple) +from typing import (Callable, Collection, Dict, List, Optional, overload, Sequence, Set, TYPE_CHECKING, Tuple, Union) +from typing_extensions import Literal + +from pyocd.probe.pydapaccess.dap_access_api import DAPAccessIntf from .debug_probe import DebugProbe from ..core import exceptions @@ -26,6 +29,9 @@ from ..board.mbed_board import MbedBoard from ..board.board_ids import BOARD_ID_TO_INFO +if TYPE_CHECKING: + from ..board.board import Board + LOG = logging.getLogger(__name__) TRACE = LOG.getChild("trace") TRACE.setLevel(logging.CRITICAL) @@ -44,10 +50,12 @@ class CMSISDAPProbe(DebugProbe): # # Note that Protocol.DEFAULT gets mapped to PORT.SWD. We need a concrete port type because some # non-reference CMSIS-DAP implementations do not accept the default port type. - PORT_MAP = { + _PROTOCOL_TO_PORT: Dict[DebugProbe.Protocol, DAPAccess.PORT] = { DebugProbe.Protocol.DEFAULT: DAPAccess.PORT.SWD, DebugProbe.Protocol.SWD: DAPAccess.PORT.SWD, DebugProbe.Protocol.JTAG: DAPAccess.PORT.JTAG, + } + _PORT_TO_PROTOCOL: Dict[DAPAccess.PORT, DebugProbe.Protocol] = { DAPAccess.PORT.DEFAULT: DebugProbe.Protocol.DEFAULT, DAPAccess.PORT.SWD: DebugProbe.Protocol.SWD, DAPAccess.PORT.JTAG: DebugProbe.Protocol.JTAG, @@ -77,14 +85,14 @@ class CMSISDAPProbe(DebugProbe): DAPLINK_VIDPID = (0x0d28, 0x0204) @classmethod - def get_all_connected_probes(cls, unique_id=None, is_explicit=False): + def get_all_connected_probes(cls, unique_id: str = None, is_explicit: bool = False) -> Sequence["DebugProbe"]: try: return [cls(dev) for dev in DAPAccess.get_connected_devices()] except DAPAccess.Error as exc: raise cls._convert_exception(exc) from exc @classmethod - def get_probe_with_id(cls, unique_id, is_explicit=False): + def get_probe_with_id(cls, unique_id: str, is_explicit: bool = False) -> Optional["DebugProbe"]: try: dap_access = DAPAccess.get_device(unique_id) if dap_access is not None: @@ -94,16 +102,16 @@ def get_probe_with_id(cls, unique_id, is_explicit=False): except DAPAccess.Error as exc: raise cls._convert_exception(exc) from exc - def __init__(self, device): + def __init__(self, device: DAPAccessIntf) -> None: super(CMSISDAPProbe, self).__init__() self._link = device - self._supported_protocols = None - self._protocol = None + self._supported_protocols: List[DebugProbe.Protocol] = [] + self._protocol: Optional[DebugProbe.Protocol] = None self._is_open = False - self._caps = set() + self._caps: Set[DebugProbe.Capability] = set() @property - def board_id(self): + def board_id(self) -> Optional[str]: """! @brief Unique identifier for the board. Only board IDs for DAPLink firmware are supported. We can't assume other @@ -118,7 +126,7 @@ def board_id(self): return None @property - def description(self): + def description(self) -> str: try: # self.board_id may be None. board_info = BOARD_ID_TO_INFO[self.board_id] @@ -128,35 +136,35 @@ def description(self): return "{0} [{1}]".format(board_info.name, board_info.target) @property - def vendor_name(self): + def vendor_name(self) -> str: return self._link.vendor_name @property - def product_name(self): + def product_name(self) -> str: return self._link.product_name @property - def supported_wire_protocols(self): + def supported_wire_protocols(self) -> Collection[DebugProbe.Protocol]: """! @brief Only valid after opening.""" return self._supported_protocols @property - def unique_id(self): + def unique_id(self) -> str: return self._link.get_unique_id() @property - def wire_protocol(self): + def wire_protocol(self) -> Optional[DebugProbe.Protocol]: return self._protocol @property - def is_open(self): + def is_open(self) -> bool: return self._is_open @property - def capabilities(self): + def capabilities(self) -> Set[DebugProbe.Capability]: return self._caps - def create_associated_board(self): + def create_associated_board(self) -> Optional["Board"]: assert self.session is not None # Only support associated Mbed boards for DAPLink firmware. We can't assume other @@ -167,7 +175,8 @@ def create_associated_board(self): else: return None - def open(self): + def open(self) -> None: + assert self.session try: TRACE.debug("trace: open") @@ -196,7 +205,7 @@ def open(self): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def close(self): + def close(self) -> None: try: TRACE.debug("trace: close") @@ -208,7 +217,7 @@ def close(self): # ------------------------------------------- # # Target control functions # ------------------------------------------- # - def connect(self, protocol=None): + def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None: TRACE.debug("trace: connect(%s)", protocol.name if (protocol is not None) else "None") # Convert protocol to port enum. @@ -216,7 +225,8 @@ def connect(self, protocol=None): # We must get a non-default port, since some CMSIS-DAP implementations do not accept the default # port. Note that the conversion of the default port type is contained in the PORT_MAP dict so it # is one location. - port = self.PORT_MAP.get(protocol, self.PORT_MAP[DebugProbe.Protocol.DEFAULT]) + port = (self._PROTOCOL_TO_PORT.get(protocol) + if protocol else self._PROTOCOL_TO_PORT[DebugProbe.Protocol.DEFAULT]) assert port is not DAPAccess.PORT.DEFAULT try: @@ -226,9 +236,9 @@ def connect(self, protocol=None): # Read the current mode and save it. actualMode = self._link.get_swj_mode() - self._protocol = self.PORT_MAP[actualMode] + self._protocol = self._PORT_TO_PROTOCOL[actualMode] - def swj_sequence(self, length, bits): + def swj_sequence(self, length: int, bits: int) -> None: TRACE.debug("trace: swj_sequence(length=%i, bits=%x)", length, bits) try: @@ -236,15 +246,15 @@ def swj_sequence(self, length, bits): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def swd_sequence(self, sequences): + def swd_sequence(self, sequences: Sequence[Union[Tuple[int], Tuple[int, int]]]) -> Tuple[int, Sequence[bytes]]: TRACE.debug("trace: swd_sequence(sequences=%r)", sequences) try: - self._link.swd_sequence(sequences) + return self._link.swd_sequence(sequences) except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def jtag_sequence(self, cycles, tms, read_tdo, tdi): + def jtag_sequence(self, cycles: int, tms: int, read_tdo: bool, tdi: int) -> Optional[int]: TRACE.debug("trace: jtag_sequence(cycles=%i, tms=%x, read_tdo=%s, tdi=%x)", cycles, tms, read_tdo, tdi) try: @@ -252,7 +262,7 @@ def jtag_sequence(self, cycles, tms, read_tdo, tdi): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def disconnect(self): + def disconnect(self) -> None: TRACE.debug("trace: disconnect") try: @@ -261,7 +271,7 @@ def disconnect(self): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def set_clock(self, frequency): + def set_clock(self, frequency: float) -> None: TRACE.debug("trace: set_clock(freq=%i)", frequency) try: @@ -269,7 +279,8 @@ def set_clock(self, frequency): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def reset(self): + def reset(self) -> None: + assert self.session TRACE.debug("trace: reset") try: @@ -280,7 +291,7 @@ def reset(self): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def assert_reset(self, asserted): + def assert_reset(self, asserted: bool) -> None: TRACE.debug("trace: assert_reset(%s)", asserted) try: @@ -288,7 +299,7 @@ def assert_reset(self, asserted): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def is_reset_asserted(self): + def is_reset_asserted(self) -> bool: try: result = self._link.is_reset_asserted() TRACE.debug("trace: is_reset_asserted -> %s", result) @@ -296,7 +307,7 @@ def is_reset_asserted(self): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def flush(self): + def flush(self) -> None: TRACE.debug("trace: flush") try: @@ -309,7 +320,23 @@ def flush(self): # DAP Access functions # ------------------------------------------- # - def read_dp(self, addr, now=True): + @overload + def read_dp(self, addr: int) -> int: + ... + + @overload + def read_dp(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_dp(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_dp(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_dp(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: reg_id = self.REG_ADDR_TO_ID_MAP[self.DP, addr] try: @@ -336,7 +363,7 @@ def read_dp_result_callback(): else: return read_dp_result_callback - def write_dp(self, addr, data): + def write_dp(self, addr: int, data: int) -> None: reg_id = self.REG_ADDR_TO_ID_MAP[self.DP, addr] # Write the DP register. @@ -347,9 +374,23 @@ def write_dp(self, addr, data): TRACE.debug("trace: write_dp(addr=%#010x, data=%#010x) -> error(%s)", addr, data, error) raise self._convert_exception(error) from error - return True + @overload + def read_ap(self, addr: int) -> int: + ... + + @overload + def read_ap(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_ap(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_ap(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... - def read_ap(self, addr, now=True): + def read_ap(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: assert isinstance(addr, int) ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)] @@ -376,7 +417,7 @@ def read_ap_result_callback(): else: return read_ap_result_callback - def write_ap(self, addr, data): + def write_ap(self, addr: int, data) -> None: assert isinstance(addr, int) ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)] @@ -388,9 +429,24 @@ def write_ap(self, addr, data): TRACE.debug("trace: write_ap(addr=%#010x, data=%#010x) -> error(%s)", addr, data, error) raise self._convert_exception(error) from error - return True + @overload + def read_ap_multiple(self, addr: int, count: int = 1) -> Sequence[int]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: Literal[True] = True) -> Sequence[int]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: Literal[False]) -> Callable[[], Sequence[int]]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: bool) -> Union[Sequence[int], Callable[[], Sequence[int]]]: + ... - def read_ap_multiple(self, addr, count=1, now=True): + def read_ap_multiple(self, addr: int, count: int = 1, now: bool = True) \ + -> Union[Sequence[int], Callable[[], Sequence[int]]]: assert isinstance(addr, int) ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)] @@ -420,7 +476,7 @@ def read_ap_repeat_callback(): else: return read_ap_repeat_callback - def write_ap_multiple(self, addr, values): + def write_ap_multiple(self, addr: int, values) -> None: assert isinstance(addr, int) ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)] @@ -437,7 +493,7 @@ def write_ap_multiple(self, addr, values): # SWO functions # ------------------------------------------- # - def swo_start(self, baudrate): + def swo_start(self, baudrate: float) -> None: TRACE.debug("trace: swo_start(baud=%i)", baudrate) try: @@ -446,7 +502,7 @@ def swo_start(self, baudrate): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def swo_stop(self): + def swo_stop(self) -> None: TRACE.debug("trace: swo_stop") try: @@ -454,7 +510,7 @@ def swo_stop(self): except DAPAccess.Error as exc: raise self._convert_exception(exc) from exc - def swo_read(self): + def swo_read(self) -> bytearray: try: data = self._link.swo_read() TRACE.debug("trace: swo_read -> %i bytes", len(data)) @@ -463,7 +519,7 @@ def swo_read(self): raise self._convert_exception(exc) from exc @staticmethod - def _convert_exception(exc): + def _convert_exception(exc: Exception) -> Exception: if isinstance(exc, DAPAccess.TransferFaultError): return exceptions.TransferFaultError(*exc.args) elif isinstance(exc, DAPAccess.TransferTimeoutError): diff --git a/pyocd/probe/debug_probe.py b/pyocd/probe/debug_probe.py index 8009c3470..c477fe45d 100644 --- a/pyocd/probe/debug_probe.py +++ b/pyocd/probe/debug_probe.py @@ -17,8 +17,16 @@ from enum import Enum import threading +from typing import (Callable, Collection, Optional, overload, Sequence, Set, TYPE_CHECKING, Tuple, Union) +from typing_extensions import Literal -class DebugProbe(object): +if TYPE_CHECKING: + from ..core.session import Session + from ..core.memory_interface import MemoryInterface + from ..board.board import Board + from ..coresight.ap import APAddressBase + +class DebugProbe: """! @brief Abstract debug probe class. Subclasses of this abstract class are drivers for different debug probe interfaces, either hardware such as a @@ -107,7 +115,7 @@ class Capability(Enum): JTAG_SEQUENCE = 7 @classmethod - def get_all_connected_probes(cls, unique_id=None, is_explicit=False): + def get_all_connected_probes(cls, unique_id: str = None, is_explicit: bool = False) -> Sequence["DebugProbe"]: """! @brief Returns a list of DebugProbe instances. To filter the list of returned probes, the `unique_id` parameter may be set to a string with a full or @@ -126,7 +134,7 @@ def get_all_connected_probes(cls, unique_id=None, is_explicit=False): raise NotImplementedError() @classmethod - def get_probe_with_id(cls, unique_id, is_explicit=False): + def get_probe_with_id(cls, unique_id: str, is_explicit: bool = False) -> Optional["DebugProbe"]: """! @brief Returns a DebugProbe instance for a probe with the given unique ID. If no probe is connected with a fully matching unique ID, then None will be returned. @@ -138,37 +146,37 @@ def get_probe_with_id(cls, unique_id, is_explicit=False): """ raise NotImplementedError() - def __init__(self): + def __init__(self) -> None: """! @brief Constructor.""" - self._session = None + self._session: Optional["Session"] = None self._lock = threading.RLock() @property - def session(self): + def session(self) -> Optional["Session"]: """! @brief Session associated with this probe.""" return self._session @session.setter - def session(self, the_session): + def session(self, the_session: "Session") -> None: self._session = the_session @property - def description(self): + def description(self) -> str: """! @brief Combined description of the debug probe and/or associated board.""" return self.vendor_name + " " + self.product_name @property - def vendor_name(self): + def vendor_name(self) -> str: """! @brief Name of the debug probe's manufacturer.""" raise NotImplementedError() @property - def product_name(self): + def product_name(self) -> str: """! @brief Name of the debug probe.""" raise NotImplementedError() @property - def supported_wire_protocols(self): + def supported_wire_protocols(self) -> Collection[Protocol]: """! @brief List of DebugProbe.Protocol supported by the probe. Only one of the values returned from this property may be passed to connect(). @@ -176,7 +184,7 @@ def supported_wire_protocols(self): raise NotImplementedError() @property - def unique_id(self): + def unique_id(self) -> str: """! @brief The unique ID of this device. This property will be valid before open() is called. This value can be passed to @@ -185,7 +193,7 @@ def unique_id(self): raise NotImplementedError() @property - def wire_protocol(self): + def wire_protocol(self) -> Optional[Protocol]: """! @brief Currently selected wire protocol. If the probe is not open and connected, i.e., open() and connect() have not been called, @@ -195,7 +203,7 @@ def wire_protocol(self): raise NotImplementedError() @property - def is_open(self): + def is_open(self) -> bool: """! @brief Whether the probe is currently open. To open the probe, call the open() method. @@ -203,14 +211,14 @@ def is_open(self): raise NotImplementedError() @property - def capabilities(self): + def capabilities(self) -> Set[Capability]: """! @brief A set of DebugProbe.Capability enums indicating the probe's features. This value should not be trusted until after the probe is opened. """ raise NotImplementedError() - def create_associated_board(self): + def create_associated_board(self) -> Optional["Board"]: """! @brief Create a board instance representing the board of which the probe is a component. If the probe is part of a board, then this method will create a Board instance that @@ -223,15 +231,15 @@ def create_associated_board(self): """ return None - def open(self): + def open(self) -> None: """! @brief Open the USB interface to the probe for sending commands.""" raise NotImplementedError() - def close(self): + def close(self) -> None: """! @brief Close the probe's USB interface.""" raise NotImplementedError() - def lock(self): + def lock(self) -> None: """! @brief Lock the probe from access by other threads. This lock is recursive, so locking multiple times from a single thread is acceptable as long @@ -241,7 +249,7 @@ def lock(self): """ self._lock.acquire() - def unlock(self): + def unlock(self) -> None: """! @brief Unlock the probe. Only when the thread unlocks the probe the same number of times it has called lock() will @@ -252,15 +260,15 @@ def unlock(self): ## @name Target control ##@{ - def connect(self, protocol=None): + def connect(self, protocol: Optional[Protocol] = None) -> None: """! @brief Initialize DAP IO pins for JTAG or SWD""" raise NotImplementedError() - def disconnect(self): + def disconnect(self) -> None: """! @brief Deinitialize the DAP I/O pins""" raise NotImplementedError() - def swj_sequence(self, length, bits): + def swj_sequence(self, length: int, bits: int) -> None: """! @brief Transfer some number of bits on SWDIO/TMS. @param self @@ -269,7 +277,7 @@ def swj_sequence(self, length, bits): """ pass - def swd_sequence(self, sequences): + def swd_sequence(self, sequences: Sequence[Union[Tuple[int], Tuple[int, int]]]) -> Tuple[int, Sequence[bytes]]: """! @brief Send a sequences of bits on the SWDIO signal. Each sequence in the _sequences_ parameter is a tuple with 1 or 2 members in this order: @@ -286,7 +294,7 @@ def swd_sequence(self, sequences): """ raise NotImplementedError() - def jtag_sequence(self, cycles, tms, read_tdo, tdi): + def jtag_sequence(self, cycles: int, tms: int, read_tdo: bool, tdi: int) -> Optional[int]: """! @brief Send JTAG sequence. @param self @@ -300,18 +308,18 @@ def jtag_sequence(self, cycles, tms, read_tdo, tdi): """ raise NotImplementedError() - def set_clock(self, frequency): + def set_clock(self, frequency: float) -> None: """! @brief Set the frequency for JTAG and SWD in Hz. This function is safe to call before connect is called. """ raise NotImplementedError() - def reset(self): + def reset(self) -> None: """! @brief Perform a hardware reset of the target.""" raise NotImplementedError() - def assert_reset(self, asserted): + def assert_reset(self, asserted: bool) -> None: """! @brief Assert or de-assert target's nRESET signal. Because nRESET is negative logic and usually open drain, passing True will drive it low, and @@ -319,7 +327,7 @@ def assert_reset(self, asserted): """ raise NotImplementedError() - def is_reset_asserted(self): + def is_reset_asserted(self) -> bool: """! @brief Returns True if nRESET is asserted or False if de-asserted. If the debug probe cannot actively read the reset signal, the value returned will be the @@ -327,7 +335,7 @@ def is_reset_asserted(self): """ raise NotImplementedError() - def flush(self): + def flush(self) -> None: """! @brief Write out all unsent commands. This API may be a no-op for certain debug probe types. @@ -339,7 +347,23 @@ def flush(self): ## @name DAP access ##@{ - def read_dp(self, addr, now=True): + @overload + def read_dp(self, addr: int) -> int: + ... + + @overload + def read_dp(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_dp(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_dp(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_dp(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Read a DP register. @param self @@ -350,7 +374,7 @@ def read_dp(self, addr, now=True): """ raise NotImplementedError() - def write_dp(self, addr, data): + def write_dp(self, addr: int, data: int) -> None: """! @brief Write a DP register. @param self @@ -359,23 +383,56 @@ def write_dp(self, addr, data): """ raise NotImplementedError() - def read_ap(self, addr, now=True): + @overload + def read_ap(self, addr: int) -> int: + ... + + @overload + def read_ap(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_ap(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_ap(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_ap(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Read an AP register.""" raise NotImplementedError() - def write_ap(self, addr, data): + def write_ap(self, addr: int, data) -> None: """! @brief Write an AP register.""" raise NotImplementedError() - def read_ap_multiple(self, addr, count=1, now=True): + @overload + def read_ap_multiple(self, addr: int, count: int = 1) -> Sequence[int]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: Literal[True] = True) -> Sequence[int]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: Literal[False]) -> Callable[[], Sequence[int]]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: bool) -> Union[Sequence[int], Callable[[], Sequence[int]]]: + ... + + def read_ap_multiple(self, addr: int, count: int = 1, now: bool = True) \ + -> Union[Sequence[int], Callable[[], Sequence[int]]]: """! @brief Read one AP register multiple times.""" raise NotImplementedError() - def write_ap_multiple(self, addr, values): + def write_ap_multiple(self, addr: int, values) -> None: """! @brief Write one AP register multiple times.""" raise NotImplementedError() - def get_memory_interface_for_ap(self, ap_address): + def get_memory_interface_for_ap(self, ap_address: "APAddressBase") -> Optional["MemoryInterface"]: """! @brief Returns a @ref pyocd.core.memory_interface.MemoryInterface "MemoryInterface" for the specified AP. @@ -385,7 +442,7 @@ def get_memory_interface_for_ap(self, ap_address): does not provide an accelerated memory interface, None will be returned. @param self The debug probe. - @param ap_address An instance of @ref pyocd.coresight.ap.APAddress "APAddress". + @param ap_address An instance of @ref pyocd.coresight.ap.APAddressBase "APAddressBase". """ return None @@ -394,7 +451,7 @@ def get_memory_interface_for_ap(self, ap_address): ## @name SWO ##@{ - def swo_start(self, baudrate): + def swo_start(self, baudrate: float) -> None: """! @brief Start receiving SWO data at the given baudrate. Once SWO reception has started, the swo_read() method must be called at regular intervals @@ -403,11 +460,11 @@ def swo_start(self, baudrate): """ raise NotImplementedError() - def swo_stop(self): + def swo_stop(self) -> None: """! @brief Stop receiving SWO data.""" raise NotImplementedError() - def swo_read(self): + def swo_read(self) -> bytearray: """! @brief Read buffered SWO data from the target. @eturn Bytearray of the received data. May be 0 bytes in length if no SWO data is buffered diff --git a/pyocd/probe/stlink/stlink.py b/pyocd/probe/stlink/stlink.py index 36e8dec82..7390bb41b 100644 --- a/pyocd/probe/stlink/stlink.py +++ b/pyocd/probe/stlink/stlink.py @@ -279,7 +279,7 @@ def get_com_frequencies(self, protocol): response = self._device.transfer(cmd, readSize=52) self._check_status(response[0:2]) - freqs = conversion.byte_list_to_u32le_list(response[4:52]) + freqs = list(conversion.byte_list_to_u32le_list(response[4:52])) currentFreq = freqs.pop(0) freqCount = freqs.pop(0) return currentFreq, freqs[:freqCount] From ec074d7627851accbf4195dd072b935372b5a127 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 26 Nov 2021 14:30:54 -0600 Subject: [PATCH 070/123] types: annotate DebugPort , AccessPort, CoreSightTarget, CortexM. --- pyocd/coresight/ap.py | 305 ++++++++++++++++++---------- pyocd/coresight/coresight_target.py | 67 +++--- pyocd/coresight/cortex_m.py | 161 ++++++++++----- pyocd/coresight/dap.py | 255 +++++++++++++++-------- 4 files changed, 503 insertions(+), 285 deletions(-) diff --git a/pyocd/coresight/ap.py b/pyocd/coresight/ap.py index a0d7aae4f..67dd7ac93 100644 --- a/pyocd/coresight/ap.py +++ b/pyocd/coresight/ap.py @@ -19,11 +19,20 @@ from contextlib import contextmanager from functools import total_ordering from enum import Enum +from typing import (Any, Callable, Dict, Generator, Optional, TYPE_CHECKING, Sequence, Set, Tuple, Type, Union, overload) +from typing_extensions import Literal from ..core import (exceptions, memory_interface) from ..core.target import Target from ..utility.concurrency import locked +if TYPE_CHECKING: + from types import TracebackType + from ..core.core_target import CoreTarget + from .dap import DebugPort + from .rom_table import CoreSightComponentID + from ..utility.notification import Notification + LOG = logging.getLogger(__name__) TRACE = LOG.getChild("trace") @@ -171,7 +180,7 @@ class APVersion(Enum): APv2 = 2 @total_ordering -class APAddressBase(object): +class APAddressBase: """! @brief Base class for AP addresses. An instance of this class has a "nominal address", which is an integer address in terms of how @@ -191,24 +200,24 @@ class APAddressBase(object): address format. """ - def __init__(self, address): + def __init__(self, address: int) -> None: """! @brief Constructor accepting the nominal address.""" self._nominal_address = address @property - def ap_version(self): + def ap_version(self) -> APVersion: """! @brief Version of the AP, as an APVersion enum.""" raise NotImplementedError() @property - def nominal_address(self): + def nominal_address(self) -> int: """! @brief Integer AP address in the form in which one speaks about it. This value is used for comparisons and hashing.""" return self._nominal_address @property - def address(self): + def address(self) -> int: """! @brief Integer AP address used as a base for register accesses. This value can be passed to the DebugPort's read_ap() or write_ap() methods. Offsets of @@ -216,22 +225,25 @@ def address(self): raise NotImplementedError() @property - def idr_address(self): + def idr_address(self) -> int: """! @brief Address of the IDR register.""" raise NotImplementedError() - def __hash__(self): + def __hash__(self) -> int: return hash(self.nominal_address) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return (self.nominal_address == other.nominal_address) \ if isinstance(other, APAddressBase) else (self.nominal_address == other) - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: return (self.nominal_address < other.nominal_address) \ if isinstance(other, APAddressBase) else (self.nominal_address < other) - def __repr__(self): + def __str__(self) -> str: + raise NotImplementedError() + + def __repr__(self) -> str: return "<{}@{:#x} {}>".format(self.__class__.__name__, id(self), str(self)) class APv1Address(APAddressBase): @@ -242,25 +254,25 @@ class APv1Address(APAddressBase): """ @property - def ap_version(self): + def ap_version(self) -> APVersion: """! @brief APVersion.APv1.""" return APVersion.APv1 @property - def apsel(self): + def apsel(self) -> int: """! @brief Alias for the _nominal_address_ property.""" return self._nominal_address @property - def address(self): + def address(self) -> int: return self.apsel << APSEL_SHIFT @property - def idr_address(self): + def idr_address(self) -> int: """! @brief Address of the IDR register.""" return AP_IDR - def __str__(self): + def __str__(self) -> str: return "#%d" % self.apsel class APv2Address(APAddressBase): @@ -273,27 +285,27 @@ class APv2Address(APAddressBase): """ @property - def ap_version(self): + def ap_version(self) -> APVersion: """! @brief Returns APVersion.APv2.""" return APVersion.APv2 @property - def address(self): + def address(self) -> int: return self._nominal_address @property - def idr_address(self): + def idr_address(self) -> int: """! @brief Address of the IDR register.""" return APv2_IDR - def __str__(self): + def __str__(self) -> str: return "@0x%x" % self.address -class AccessPort(object): +class AccessPort: """! @brief Base class for a CoreSight Access Port (AP) instance.""" @staticmethod - def probe(dp, ap_num): + def probe(dp: "DebugPort", ap_num: int) -> bool: """! @brief Determine if an AP exists with the given AP number. Only applicable for ADIv5. @@ -306,7 +318,11 @@ def probe(dp, ap_num): return idr != 0 @staticmethod - def create(dp, ap_address, cmpid=None): + def create( + dp: "DebugPort", + ap_address: APAddressBase, + cmpid: Optional["CoreSightComponentID"] = None + ) -> "AccessPort": """! @brief Create a new AP object. Determines the type of the AP by examining the IDR value and creates a new @@ -350,7 +366,15 @@ def create(dp, ap_address, cmpid=None): ap.init() return ap - def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): + def __init__( + self, + dp: "DebugPort", + ap_address: APAddressBase, + idr: Optional[int] = None, + name: Optional[str] = None, + flags: int = 0, + cmpid: Optional["CoreSightComponentID"] = None + ) -> None: """! @brief AP constructor. @param self @param dp The DebugPort object. @@ -368,16 +392,16 @@ def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): self.rom_addr = 0 self.has_rom_table = False self.rom_table = None - self.core = None + self.core: Optional["CoreTarget"] = None self._flags = flags self._cmpid = cmpid @property - def short_description(self): + def short_description(self) -> str: return self.type_name + str(self.address) @property - def ap_version(self): + def ap_version(self) -> APVersion: """! @brief The AP's major version determined by ADI version. @retval APVersion.APv1 @retval APVersion.APv2 @@ -385,7 +409,7 @@ def ap_version(self): return self._ap_version @locked - def init(self): + def init(self) -> None: # Read IDR if it wasn't given to us in the ctor. if self.idr is None: self.idr = self.read_reg(self.address.idr_address) @@ -403,28 +427,44 @@ def init(self): LOG.info("%s IDR = 0x%08x (%s)", self.short_description, self.idr, desc) - def find_components(self): + def find_components(self) -> None: """! @brief Find CoreSight components attached to this AP.""" pass + @overload + def read_reg(self, addr: int) -> int: + ... + + @overload + def read_reg(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_reg(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_reg(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... + @locked - def read_reg(self, addr, now=True): + def read_reg(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: return self.dp.read_ap(self.address.address + addr, now) @locked - def write_reg(self, addr, data): + def write_reg(self, addr: int, data: int) -> None: self.dp.write_ap(self.address.address + addr, data) - def lock(self): + def lock(self) -> None: """! @brief Lock the AP from access by other threads.""" self.dp.probe.lock() - def unlock(self): + def unlock(self) -> None: """! @brief Unlock the AP.""" self.dp.probe.unlock() @contextmanager - def locked(self): + def locked(self) -> Generator[None, None, None]: """! @brief Context manager for locking the AP using a with statement. All public methods of AccessPort and its subclasses are automatically locked, so manual @@ -435,7 +475,7 @@ def locked(self): yield self.unlock() - def __repr__(self): + def __repr__(self) -> str: return "<{}@{:x} {} idr={:08x} rom={:08x}>".format( self.__class__.__name__, id(self), self.short_description, self.idr, self.rom_addr) @@ -461,8 +501,16 @@ class MEM_AP(AccessPort, memory_interface.MemoryInterface): - Barrier Operation Extension """ - def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): - super(MEM_AP, self).__init__(dp, ap_address, idr, name, flags, cmpid) + def __init__( + self, + dp: "DebugPort", + ap_address: APAddressBase, + idr: Optional[int] = None, + name: Optional[str] = None, + flags: int = 0, + cmpid: Optional["CoreSightComponentID"] = None + ) -> None: + super().__init__(dp, ap_address, idr, name, flags, cmpid) # Check AP version and set the offset to the control and status registers. if self.ap_version == APVersion.APv1: @@ -472,39 +520,39 @@ def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): else: assert False, "Unrecognized AP version %s" % self.ap_version - self._impl_hprot = 0 - self._impl_hnonsec = 0 + self._impl_hprot: int = 0 + self._impl_hnonsec: int = 0 ## Default HPROT value for CSW. - self._hprot = HPROT_DATA | HPROT_PRIVILEGED + self._hprot: int = HPROT_DATA | HPROT_PRIVILEGED ## Default HNONSEC value for CSW. - self._hnonsec = SECURE + self._hnonsec: int = SECURE ## Base CSW value to use. - self._csw = DEFAULT_CSW_VALUE + self._csw: int = DEFAULT_CSW_VALUE ## Cached current CSW value. - self._cached_csw = -1 + self._cached_csw: int = -1 ## Supported transfer sizes. - self._transfer_sizes = (32,) + self._transfer_sizes: Set[int] = {32} ## Auto-increment wrap modulus. # # The AP_4K_WRAP flag indicates a 4 kB wrap size. Otherwise it defaults to the smallest # size supported by all targets. A size smaller than the supported size will decrease # performance due to the extra address writes, but will not create any read/write errors. - self.auto_increment_page_size = 0x1000 if (self._flags & AP_4K_WRAP) else 0x400 + self.auto_increment_page_size: int = 0x1000 if (self._flags & AP_4K_WRAP) else 0x400 ## Number of DAR registers. - self._dar_count = 0 + self._dar_count: int = 0 ## Mask of addresses. This indicates whether 32-bit or 64-bit addresses are supported. - self._address_mask = 0xffffffff + self._address_mask: int = 0xffffffff ## Whether the Large Data extension is supported. - self._has_large_data = False + self._has_large_data: bool = False # Ask the probe for an accelerated memory interface for this AP. If it provides one, # then bind our memory interface APIs to its methods. Otherwise use our standard @@ -526,12 +574,12 @@ def __init__(self, dp, ap_address, idr=None, name="", flags=0, cmpid=None): self.dp.session.subscribe(self._reset_did_occur, (Target.Event.PRE_RESET, Target.Event.POST_RESET)) @property - def supported_transfer_sizes(self): + def supported_transfer_sizes(self) -> Set[int]: """! @brief Tuple of transfer sizes supported by this AP.""" return self._transfer_sizes @property - def is_enabled(self): + def is_enabled(self) -> bool: """! @brief Whether any memory transfers are allowed by this AP. Memory transfers may be disabled by an input signal to the AP. This is often done when debug security @@ -539,7 +587,7 @@ def is_enabled(self): """ return self.is_enabled_for(Target.SecurityState.NONSECURE) - def is_enabled_for(self, security_state): + def is_enabled_for(self, security_state: Target.SecurityState) -> bool: """! @brief Checks whether memory transfers are allowed by this AP for the given security state. Memory transfers may be disabled by an input signal to the AP. This is often done when debug security @@ -564,7 +612,7 @@ def is_enabled_for(self, security_state): assert False, "unsupported security state" @locked - def init(self): + def init(self) -> None: """! @brief Initialize the MEM-AP. This method interrogates the MEM-AP to determine its capabilities, and performs any initial setup @@ -583,12 +631,12 @@ def init(self): These controls are configured. - (v2 only) Configure the error mode. """ - super(MEM_AP, self).init() + super().init() # Read initial CSW. Superclass register access methods are used to avoid the CSW cache. original_csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW) - def _init_cfg(): + def _init_cfg() -> None: """! @brief Read MEM-AP CFG register.""" cfg = self.read_reg(self._reg_offset + MEM_AP_CFG) @@ -622,7 +670,7 @@ def _init_cfg(): darsize = (cfg & MEM_AP_CFG_DARSIZE_MASK) >> MEM_AP_CFG_DARSIZE_SHIFT self._dar_count = (1 << darsize) // 4 - def _init_transfer_sizes(): + def _init_transfer_sizes() -> None: """! @brief Determine supported transfer sizes. If the #AP_ALL_TX_SZ flag is set, then we know a priori that this AP implementation @@ -641,7 +689,7 @@ def _init_transfer_sizes(): # If AP_ALL_TX_SZ is set, we can skip the test. Double check this by ensuring that LD is not # enabled. if (self._flags & AP_ALL_TX_SZ) and not self._has_large_data: - self._transfer_sizes = (8, 16, 32) + self._transfer_sizes = {8, 16, 32} return def _test_transfer_size(sz): @@ -667,13 +715,13 @@ def _test_transfer_size(sz): SIZES_TO_TEST = (CSW_SIZE8, CSW_SIZE16, CSW_SIZE64, CSW_SIZE128, CSW_SIZE256) sz_result_cbs = ((sz, _test_transfer_size(sz)) for sz in SIZES_TO_TEST) - self._transfer_sizes = ([32] + [(8 * (1 << sz)) for sz, cb in sz_result_cbs if cb()]) - self._transfer_sizes.sort() + self._transfer_sizes = {32} | {(8 * (1 << sz)) for sz, cb in sz_result_cbs if cb()} + # self._transfer_sizes.sort() elif _test_transfer_size(CSW_SIZE16)(): - self._transfer_sizes = (8, 16, 32) + self._transfer_sizes = {8, 16, 32} - def _init_hprot(): + def _init_hprot() -> None: """! @brief Init HPROT HNONSEC. Determines the implemented bits of HPROT and HNONSEC in this MEM-AP. The defaults for these @@ -697,7 +745,7 @@ def _init_hprot(): self.hprot = self._hprot & self._impl_hprot self.hnonsec = self._hnonsec & self._impl_hnonsec - def _init_rom_table_base(): + def _init_rom_table_base() -> None: """! @brief Read ROM table base address.""" base = self.read_reg(self._reg_offset + MEM_AP_BASE) is_adiv5_base = (base & AP_BASE_FORMAT_MASK) != 0 @@ -725,7 +773,7 @@ def _init_rom_table_base(): AccessPort.write_reg(self, self._reg_offset + MEM_AP_CSW, original_csw) @locked - def find_components(self): + def find_components(self) -> None: try: if self.has_rom_table: if not self.is_enabled: @@ -748,20 +796,20 @@ def find_components(self): exc_info=self.dp.session.log_tracebacks) @property - def implemented_hprot_mask(self): + def implemented_hprot_mask(self) -> int: return self._impl_hprot @property - def implemented_hnonsec_mask(self): + def implemented_hnonsec_mask(self) -> int: return self._impl_hnonsec @property - def hprot(self): + def hprot(self) -> int: return self._hprot @hprot.setter @locked - def hprot(self, value): + def hprot(self, value: int) -> None: """! @brief Setter for current HPROT value used for memory transactions. The bits of HPROT have the following meaning. Not all bits are implemented in all @@ -781,12 +829,12 @@ def hprot(self, value): | (self._hprot << CSW_HPROT_SHIFT)) @property - def hnonsec(self): + def hnonsec(self) -> int: return self._hnonsec @hnonsec.setter @locked - def hnonsec(self, value): + def hnonsec(self, value: int) -> None: """! @brief Setter for current HNONSEC value used for memory transactions. Not all MEM-APs support control of HNONSEC. In particular, only the AHB5-AP used for @@ -799,20 +847,20 @@ def hnonsec(self, value): self._csw = ((self._csw & ~CSW_HNONSEC_MASK) | (self._hnonsec << CSW_HNONSEC_SHIFT)) - class _MemAttrContext(object): + class _MemAttrContext: """! @brief Context manager for temporarily setting HPROT and/or HNONSEC. The AP is locked during the lifetime of the context manager. This means that only the calling thread can perform memory transactions. """ - def __init__(self, ap, hprot=None, hnonsec=None): + def __init__(self, ap: "MEM_AP", hprot: Optional[int] = None, hnonsec: Optional[int] = None): self._ap = ap self._hprot = hprot self._saved_hprot = None self._hnonsec = hnonsec self._saved_hnonsec = None - def __enter__(self): + def __enter__(self) -> "MEM_AP._MemAttrContext": self._ap.lock() if self._hprot is not None: self._saved_hprot = self._ap.hprot @@ -822,42 +870,57 @@ def __enter__(self): self._ap.hnonsec = self._hnonsec return self - def __exit__(self, type, value, traceback): + def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> None: if self._saved_hprot is not None: self._ap.hprot = self._saved_hprot if self._saved_hnonsec is not None: self._ap.hnonsec = self._saved_hnonsec self._ap.unlock() - return False - def hprot_lock(self, hprot): + def hprot_lock(self, hprot: int) -> _MemAttrContext: """! @brief Context manager to temporarily change HPROT.""" return self._MemAttrContext(self, hprot=hprot) - def hnonsec_lock(self, hnonsec): + def hnonsec_lock(self, hnonsec: int) -> _MemAttrContext: """! @brief Context manager to temporarily change HNONSEC. @see secure_lock(), nonsecure_lock() """ return self._MemAttrContext(self, hnonsec=hnonsec) - def secure_lock(self): + def secure_lock(self) -> _MemAttrContext: """! @brief Context manager to temporarily set the AP to use secure memory transfers.""" return self.hnonsec_lock(SECURE) - def nonsecure_lock(self): + def nonsecure_lock(self) -> _MemAttrContext: """! @brief Context manager to temporarily set AP to use non-secure memory transfers.""" return self.hnonsec_lock(NONSECURE) + @overload + def read_reg(self, addr: int) -> int: + ... + + @overload + def read_reg(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_reg(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_reg(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... + @locked - def read_reg(self, addr, now=True): + def read_reg(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: ap_regaddr = addr & APREG_MASK if ap_regaddr == self._reg_offset + MEM_AP_CSW and self._cached_csw != -1 and now: return self._cached_csw return self.dp.read_ap(self.address.address + addr, now) @locked - def write_reg(self, addr, data): + def write_reg(self, addr: int, data: int) -> None: ap_regaddr = addr & APREG_MASK # Don't need to write CSW if it's not changing value. @@ -878,17 +941,17 @@ def write_reg(self, addr, data): self._invalidate_cache() raise - def _invalidate_cache(self): + def _invalidate_cache(self) -> None: """! @brief Invalidate cached registers associated with this AP.""" self._cached_csw = -1 - def _reset_did_occur(self, notification): + def _reset_did_occur(self, notification: "Notification") -> None: """! @brief Handles reset notifications to invalidate CSW cache.""" # We clear the cache on all resets just to be safe. self._invalidate_cache() @locked - def _write_memory(self, addr, data, transfer_size=32): + def _write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: """! @brief Write a single memory location. By default the transfer size is a word @@ -904,22 +967,23 @@ def _write_memory(self, addr, data, transfer_size=32): TRACE.debug("write_mem:%06d (ap=0x%x; addr=0x%08x, size=%d) = 0x%08x {", num, self.address.nominal_address, addr, transfer_size, data) self.write_reg(self._reg_offset + MEM_AP_CSW, self._csw | TRANSFER_SIZE[transfer_size]) - if transfer_size == 8: - data = data << ((addr & 0x03) << 3) - elif transfer_size == 16: - data = data << ((addr & 0x02) << 3) - elif transfer_size > 32: - # Split the value into a tuple of 32-bit words, least-significant first. - data = (((data >> (32 * i)) & 0xffffffff) for i in range(transfer_size // 32)) try: self.write_reg(self._reg_offset + MEM_AP_TAR, addr) if transfer_size <= 32: + if transfer_size == 8: + data = data << ((addr & 0x03) << 3) + elif transfer_size == 16: + data = data << ((addr & 0x02) << 3) + self.write_reg(self._reg_offset + MEM_AP_DRW, data) else: + # Split the value into a tuple of 32-bit words, least-significant first. + data_words = list(((data >> (32 * i)) & 0xffffffff) for i in range(transfer_size // 32)) + # Multi-word transfer. - self.dp.write_ap_multiple(self.address.address + self._reg_offset + MEM_AP_DRW, data) + self.dp.write_ap_multiple(self.address.address + self._reg_offset + MEM_AP_DRW, data_words) except exceptions.TransferFaultError as error: # Annotate error with target address. self._handle_error(error, num) @@ -931,8 +995,24 @@ def _write_memory(self, addr, data, transfer_size=32): raise TRACE.debug("write_mem:%06d }", num) + @overload + def _read_memory(self, addr: int, transfer_size: int = 32) -> int: + ... + + @overload + def _read_memory(self, addr: int, transfer_size: int = 32, now: Literal[True] = True) -> int: + ... + + @overload + def _read_memory(self, addr: int, transfer_size: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def _read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Callable[[], int]]: + ... + @locked - def _read_memory(self, addr, transfer_size=32, now=True): + def _read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Read a memory location. By default, a word will be read. @@ -955,7 +1035,7 @@ def _read_memory(self, addr, transfer_size=32, now=True): result_cb = self.read_reg(self._reg_offset + MEM_AP_DRW, now=False) else: # Multi-word transfer. - result_cb = self.dp.read_ap_multiple(self.address.address + self._reg_offset + MEM_AP_DRW, + result_cb_mw = self.dp.read_ap_multiple(self.address.address + self._reg_offset + MEM_AP_DRW, transfer_size // 32, now=False) except exceptions.TransferFaultError as error: # Annotate error with target address. @@ -967,18 +1047,20 @@ def _read_memory(self, addr, transfer_size=32, now=True): self._handle_error(error, num) raise - def read_mem_cb(): - res = None + def read_mem_cb() -> int: try: - res = result_cb() - if transfer_size == 8: - res = (res >> ((addr & 0x03) << 3) & 0xff) - elif transfer_size == 16: - res = (res >> ((addr & 0x02) << 3) & 0xffff) - elif transfer_size > 32: - res = sum((w << (32 * i)) for i, w in enumerate(res)) + if transfer_size <= 32: + res = result_cb() + if transfer_size == 8: + res = (res >> ((addr & 0x03) << 3) & 0xff) + elif transfer_size == 16: + res = (res >> ((addr & 0x02) << 3) & 0xffff) + else: + res_mw = result_cb_mw() + res = sum((w << (32 * i)) for i, w in enumerate(res_mw)) TRACE.debug("read_mem:%06d %s(ap=0x%x; addr=0x%08x, size=%d) -> 0x%08x }", num, "" if now else "...", self.address.nominal_address, addr, transfer_size, res) + return res except exceptions.TransferFaultError as error: # Annotate error with target address. self._handle_error(error, num) @@ -988,7 +1070,6 @@ def read_mem_cb(): except exceptions.Error as error: self._handle_error(error, num) raise - return res if now: result = read_mem_cb() @@ -996,7 +1077,7 @@ def read_mem_cb(): else: return read_mem_cb - def _write_block32_page(self, addr, data): + def _write_block32_page(self, addr: int, data: Sequence[int]) -> None: """! @brief Write a single transaction's worth of aligned words. The transaction must not cross the MEM-AP's auto-increment boundary. @@ -1023,7 +1104,7 @@ def _write_block32_page(self, addr, data): raise TRACE.debug("_write_block32:%06d }", num) - def _read_block32_page(self, addr, size): + def _read_block32_page(self, addr: int, size: int) -> Sequence[int]: """! @brief Read a single transaction's worth of aligned words. The transaction must not cross the MEM-AP's auto-increment boundary. @@ -1052,7 +1133,7 @@ def _read_block32_page(self, addr, size): return resp @locked - def _write_memory_block32(self, addr, data): + def _write_memory_block32(self, addr: int, data: Sequence[int]) -> None: """! @brief Write a block of aligned words in memory.""" assert (addr & 0x3) == 0 addr &= self._address_mask @@ -1068,7 +1149,7 @@ def _write_memory_block32(self, addr, data): return @locked - def _read_memory_block32(self, addr, size): + def _read_memory_block32(self, addr: int, size: int) -> Sequence[int]: """! @brief Read a block of aligned words in memory. @return A list of word values. @@ -1085,7 +1166,7 @@ def _read_memory_block32(self, addr, size): addr += n return resp - def _handle_error(self, error, num): + def _handle_error(self, error: Exception, num: int) -> None: self.dp._handle_error(error, num) self._invalidate_cache() @@ -1101,14 +1182,14 @@ class AHB_AP(MEM_AP): """ @locked - def init(self): - super(AHB_AP, self).init() + def init(self) -> None: + super().init() # Check for and enable the Master Type bit on AHB-APs where it might be implemented. if self._flags & AP_MSTRTYPE: self._init_mstrtype() - def _init_mstrtype(self): + def _init_mstrtype(self) -> None: """! @brief Set master type control in CSW. Only the v1 AHB-AP from Cortex-M3 and Cortex-M4 implements the MSTRTYPE flag to control @@ -1117,7 +1198,7 @@ def _init_mstrtype(self): # Set the master type to "debugger" for AP's that support this field. self._csw |= CSW_MSTRDBG - def find_components(self): + def find_components(self) -> None: # Turn on DEMCR.TRCENA before reading the ROM table. Some ROM table entries can # come back as garbage if TRCENA is not set. try: @@ -1129,7 +1210,7 @@ def find_components(self): pass # Invoke superclass. - super(AHB_AP, self).find_components() + super().find_components() ## @brief Arm JEP106 code # @@ -1175,7 +1256,7 @@ def find_components(self): # 0x14770005 AHB5-AP Used on M33. Note that M33 r0p0 incorrect fails to report this IDR. # 0x04770025 AHB5-AP Used on M23. # 0x54770002 APB-AP used on M33. -AP_TYPE_MAP = { +AP_TYPE_MAP: Dict[Tuple[int, int, int, int], Tuple[str, Type[AccessPort], int]] = { # |JEP106 |Class |Var|Type |Name |Class (AP_JEP106_ARM, AP_CLASS_JTAG_AP, 0, 0): ("JTAG-AP", AccessPort, 0 ), (AP_JEP106_ARM, AP_CLASS_COM_AP, 0, 0): ("SDC-600", AccessPort, 0 ), diff --git a/pyocd/coresight/coresight_target.py b/pyocd/coresight/coresight_target.py index e9ae95e1d..ba28ba62d 100644 --- a/pyocd/coresight/coresight_target.py +++ b/pyocd/coresight/coresight_target.py @@ -18,9 +18,10 @@ import logging from inspect import getfullargspec from pathlib import PurePath +from typing import (Callable, Dict, Optional, TYPE_CHECKING, cast) from ..core.target import Target -from ..core.memory_map import (MemoryType, RamRegion, DeviceRegion, MemoryMap) +from ..core.memory_map import (FlashRegion, MemoryType, RamRegion, DeviceRegion, MemoryMap) from ..core.soc_target import SoCTarget from ..core import exceptions from . import (dap, discovery) @@ -28,6 +29,12 @@ from ..utility.sequencer import CallSequence from ..target.pack.flash_algo import PackFlashAlgo +if TYPE_CHECKING: + from ..core.session import Session + from ..core.memory_map import MemoryMap + from .ap import (APAddressBase, AccessPort) + from ..debug.svd.model import SVDDevice + LOG = logging.getLogger(__name__) class CoreSightTarget(SoCTarget): @@ -36,31 +43,32 @@ class CoreSightTarget(SoCTarget): This class adds Arm CoreSight-specific discovery and initialization code to SoCTarget. """ - def __init__(self, session, memory_map=None): + def __init__(self, session: "Session", memory_map: Optional["MemoryMap"] = None) -> None: # Supply a default memory map. if (memory_map is None) or (memory_map.region_count == 0): memory_map = self._create_default_cortex_m_memory_map() LOG.debug("Using default Cortex-M memory map (no memory map supplied)") - super(CoreSightTarget, self).__init__(session, memory_map) + super().__init__(session, memory_map) + assert session.probe self.dp = dap.DebugPort(session.probe, self) - self._svd_load_thread = None - self._irq_table = None - self._discoverer = None + self._svd_load_thread: Optional[SVDLoader] = None + self._irq_table: Optional[Dict[int, str]] = None + self._discoverer: Optional[Callable] = None @property - def aps(self): + def aps(self) -> Dict["APAddressBase", "AccessPort"]: return self.dp.aps @property - def svd_device(self): + def svd_device(self) -> Optional["SVDDevice"]: """! @brief Waits for SVD file to complete loading before returning.""" if not self._svd_device and self._svd_load_thread: LOG.debug("Waiting for SVD load to complete") self._svd_device = self._svd_load_thread.device return self._svd_device - def _create_default_cortex_m_memory_map(self): + def _create_default_cortex_m_memory_map(self) -> MemoryMap: """! @brief Create a MemoryMap for the Cortex-M system address map.""" return MemoryMap( RamRegion(name="Code", start=0x00000000, length=0x20000000, access='rwx'), @@ -73,7 +81,7 @@ def _create_default_cortex_m_memory_map(self): DeviceRegion(name="PPB", start=0xE0000000, length=0x20000000, access='rw'), ) - def load_svd(self): + def load_svd(self) -> None: def svd_load_completed_cb(svdDevice): self._svd_device = svdDevice self._svd_load_thread = None @@ -83,13 +91,13 @@ def svd_load_completed_cb(svdDevice): self._svd_load_thread = SVDLoader(self._svd_location, svd_load_completed_cb) self._svd_load_thread.load() - def create_init_sequence(self): + def create_init_sequence(self) -> CallSequence: seq = CallSequence( ('load_svd', self.load_svd), ('pre_connect', self.pre_connect), ('dp_init', self.dp.create_connect_sequence), ('create_discoverer', self.create_discoverer), - ('discovery', lambda : self._discoverer.discover()), + ('discovery', lambda : self._discoverer.discover() if self._discoverer else None), ('check_for_cores', self.check_for_cores), ('halt_on_connect', self.perform_halt_on_connect), ('post_connect', self.post_connect), @@ -100,7 +108,7 @@ def create_init_sequence(self): return seq - def disconnect(self, resume=True): + def disconnect(self, resume: bool = True) -> None: """! @brief Disconnect from the target. Same as SoCTarget.disconnect(), except that it asks the DebugPort to power down. @@ -112,7 +120,7 @@ def disconnect(self, resume=True): self.dp.disconnect() self.call_delegate('did_disconnect', target=self, resume=resume) - def create_discoverer(self): + def create_discoverer(self) -> None: """! @brief Init task to create the discovery object. Instantiates the appropriate @ref pyocd.coresight.discovery.CoreSightDiscovery @@ -120,7 +128,7 @@ def create_discoverer(self): """ self._discoverer = discovery.ADI_DISCOVERY_CLASS_MAP[self.dp.adi_version](self) - def pre_connect(self): + def pre_connect(self) -> None: """! @brief Handle some of the connect modes. This init task performs a connect pre-reset or asserts reset if the connect mode is @@ -134,7 +142,7 @@ def pre_connect(self): LOG.info("Asserting reset prior to connect") self.dp.assert_reset(True) - def perform_halt_on_connect(self): + def perform_halt_on_connect(self) -> None: """! @brief Halt cores. This init task performs a connect pre-reset or asserts reset if the connect mode is @@ -148,14 +156,14 @@ def perform_halt_on_connect(self): for core in self.cores.values(): try: if mode == 'under-reset': - core.set_reset_catch() + core.set_reset_catch(Target.ResetType.HW) else: core.halt() except exceptions.Error as err: LOG.warning("Could not halt core #%d: %s", core.core_number, err, exc_info=self.session.log_tracebacks) - def post_connect(self): + def post_connect(self) -> None: """! @brief Handle cleaning up some of the connect modes. This init task de-asserts reset if the connect mode is under-reset. @@ -169,12 +177,12 @@ def post_connect(self): # Apply to all cores. for core in self.cores.values(): try: - core.clear_reset_catch() + core.clear_reset_catch(Target.ResetType.HW) except exceptions.Error as err: LOG.warning("Could not halt core #%d: %s", core.core_number, err, exc_info=self.session.log_tracebacks) - def create_flash(self): + def create_flash(self) -> None: """! @brief Instantiates flash objects for memory regions. This init task iterates over flash memory regions and for each one creates the Flash @@ -182,11 +190,12 @@ def create_flash(self): to construct the flash object. """ for region in self.memory_map.iter_matching_regions(type=MemoryType.FLASH): + region = cast(FlashRegion, region) # If the region doesn't have an algo dict but does have an FLM file, try to load # the FLM and create the algo dict. if (region.algo is None) and (region.flm is not None): if isinstance(region.flm, (str, PurePath)): - flm_path = self.session.find_user_file(None, [region.flm]) + flm_path = self.session.find_user_file(None, [str(region.flm)]) if flm_path is not None: LOG.info("creating flash algo from: %s", flm_path) pack_algo = PackFlashAlgo(flm_path) @@ -226,7 +235,7 @@ def create_flash(self): LOG.warning("flash region '%s' has no flash algo" % region.name) continue else: - obj = klass(self) + obj = klass(self) # type:ignore # Set the region in the flash instance. obj.region = region @@ -234,7 +243,7 @@ def create_flash(self): # Store the flash object back into the memory region. region.flash = obj - def check_for_cores(self): + def check_for_cores(self) -> None: """! @brief Init task: verify that at least one core was discovered.""" if not len(self.cores): # Allow the user to override the exception to enable uses like chip bringup. @@ -243,18 +252,8 @@ def check_for_cores(self): else: raise exceptions.DebugError("No cores were discovered!") - def disconnect(self, resume: bool = True) -> None: - # Override this from SoCTarget so we can call power_down_debug() on the DP, which is - # created in this class and thus not (safely) accessible to SoCTarget. - self.session.notify(Target.Event.PRE_DISCONNECT, self) - self.call_delegate('will_disconnect', target=self, resume=resume) - for core in self.cores.values(): - core.disconnect(resume) - self.dp.power_down_debug() - self.call_delegate('did_disconnect', target=self, resume=resume) - @property - def irq_table(self): + def irq_table(self) -> Dict[int, str]: if (self._irq_table is None): if (self.svd_device is not None) and (self.svd_device.peripherals is not None): peripherals = [ diff --git a/pyocd/coresight/cortex_m.py b/pyocd/coresight/cortex_m.py index 3d64068f4..f824509a4 100644 --- a/pyocd/coresight/cortex_m.py +++ b/pyocd/coresight/cortex_m.py @@ -17,13 +17,15 @@ import logging from time import sleep +from typing import (Any, Callable, List, Optional, overload, Sequence, TYPE_CHECKING, Union, cast) +from typing_extensions import Literal from ..core.target import Target from ..core.core_target import CoreTarget from ..core import exceptions from ..core.core_registers import CoreRegistersIndex from ..utility import (cmdline, timeout) -from .component import CoreSightCoreComponent +from .component import (CoreSightComponent, CoreSightCoreComponent) from .fpb import FPB from .dwt import DWT from .core_ids import (CORE_TYPE_NAME, CoreArchitecture, CortexMExtension) @@ -33,10 +35,21 @@ ) from ..debug.breakpoints.manager import BreakpointManager from ..debug.breakpoints.software import SoftwareBreakpointProvider +from .ap import MEM_AP + +if TYPE_CHECKING: + from .coresight_target import CoreSightTarget + from .rom_table import CoreSightComponentID + from ..core.session import Session + from ..core.memory_interface import MemoryInterface + from ..core.memory_map import MemoryMap + from ..core.target_delegate import DelegateResult + from ..debug.context import DebugContext + from ..debug.elf.elf import ELFBinaryFile LOG = logging.getLogger(__name__) -class CortexM(CoreTarget, CoreSightCoreComponent): +class CortexM(CoreTarget, CoreSightCoreComponent): # lgtm[py/multiple-calls-to-init] """! @brief CoreSight component for a v6-M or v7-M Cortex-M core. This class has basic functions to access a Cortex-M core: @@ -172,14 +185,16 @@ class CortexM(CoreTarget, CoreSightCoreComponent): _RESET_RECOVERY_SLEEP_INTERVAL = 0.01 # 10 ms @classmethod - def factory(cls, ap, cmpid, address): + def factory(cls, ap: "MemoryInterface", cmpid: "CoreSightComponentID", address: int) -> Any: + assert isinstance(ap, MEM_AP) + # Create a new core instance. - root = ap.dp.target + root = cast("CoreSightTarget", ap.dp.target) core = cls(root.session, ap, root.memory_map, root._new_core_num, cmpid, address) # Associate this core with the AP. if ap.core is not None: - raise exceptions.TargetError("AP#%d has multiple cores associated with it" % ap.ap_num) + raise exceptions.TargetError(f"{ap.short_description} has multiple cores associated with it") ap.core = core # Add the new core to the root target. @@ -189,25 +204,32 @@ def factory(cls, ap, cmpid, address): return core - def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address=None): - Target.__init__(self, session, memory_map) + def __init__(self, + session: "Session", + ap: MEM_AP, + memory_map: Optional["MemoryMap"] = None, + core_num: int = 0, + cmpid: Optional["CoreSightComponentID"] = None, + address: Optional[int] = None + ) -> None: + CoreTarget.__init__(self, session, memory_map) CoreSightCoreComponent.__init__(self, ap, cmpid, address) - self._architecture = None - self._extensions = [] + self._architecture: CoreArchitecture = CoreArchitecture.ARMv6M + self._extensions: List[CortexMExtension] = [] self.core_type = 0 - self.has_fpu = False - self.core_number = core_num - self._run_token = 0 - self._target_context = None + self.has_fpu: bool = False + self._core_number: int = core_num + self._run_token: int = 0 + self._target_context: Optional["DebugContext"] = None self._elf = None self.target_xml = None self._core_registers = CoreRegistersIndex() - self._supports_vectreset = False - self._reset_catch_delegate_result = False - self._reset_catch_saved_demcr = 0 - self.fpb = None - self.dwt = None + self._supports_vectreset: bool = False + self._reset_catch_delegate_result: DelegateResult = False + self._reset_catch_saved_demcr: int = 0 + self.fpb: Optional[FPB] = None + self.dwt: Optional[DWT] = None # Default to software reset using the default software reset method. self._default_reset_type = Target.ResetType.SW @@ -223,9 +245,9 @@ def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address self.bp_manager = BreakpointManager(self) self.bp_manager.add_provider(self.sw_bp) - def add_child(self, cmp): + def add_child(self, cmp: "CoreSightComponent") -> None: """! @brief Connect related CoreSight components.""" - super(CortexM, self).add_child(cmp) + super().add_child(cmp) if isinstance(cmp, FPB): self.fpb = cmp @@ -234,45 +256,49 @@ def add_child(self, cmp): self.dwt = cmp @property - def architecture(self): + def core_number(self) -> int: + return self._core_number + + @property + def architecture(self) -> CoreArchitecture: """! @brief @ref pyocd.coresight.core_ids.CoreArchitecture "CoreArchitecture" for this core.""" return self._architecture @property - def extensions(self): + def extensions(self) -> List[CortexMExtension]: """! @brief List of extensions supported by this core.""" return self._extensions @property - def core_registers(self): + def core_registers(self) -> CoreRegistersIndex: """! @brief Instance of @ref pyocd.core.core_registers.CoreRegistersIndex "CoreRegistersIndex" describing available core registers. """ return self._core_registers @property - def elf(self): + def elf(self) -> Optional["ELFBinaryFile"]: return self._elf @elf.setter - def elf(self, elffile): + def elf(self, elffile: "ELFBinaryFile") -> None: self._elf = elffile @property - def default_reset_type(self): + def default_reset_type(self) -> Target.ResetType: return self._default_reset_type @default_reset_type.setter - def default_reset_type(self, reset_type): + def default_reset_type(self, reset_type: Target.ResetType) -> None: assert isinstance(reset_type, Target.ResetType) self._default_reset_type = reset_type @property - def default_software_reset_type(self): + def default_software_reset_type(self) -> Target.ResetType: return self._default_software_reset_type @default_software_reset_type.setter - def default_software_reset_type(self, reset_type): + def default_software_reset_type(self, reset_type: Target.ResetType) -> None: """! @brief Modify the default software reset method. @param self @param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ, @@ -284,7 +310,7 @@ def default_software_reset_type(self, reset_type): self._default_software_reset_type = reset_type @property - def supported_security_states(self): + def supported_security_states(self) -> Sequence[Target.SecurityState]: """! @brief Tuple of security states supported by the processor. @return Tuple of @ref pyocd.core.target.Target.SecurityState "Target.SecurityState". For @@ -292,7 +318,7 @@ def supported_security_states(self): """ return (Target.SecurityState.NONSECURE,) - def init(self): + def init(self) -> None: """! @brief Cortex M initialization. The bus must be accessible when this method is called. @@ -305,7 +331,7 @@ def init(self): self.call_delegate('did_start_debug_core', core=self) - def disconnect(self, resume=True): + def disconnect(self, resume: bool = True) -> None: if not self.call_delegate('will_stop_debug_core', core=self): # Remove breakpoints and watchpoints. self.bp_manager.remove_all_breakpoints() @@ -322,7 +348,7 @@ def disconnect(self, resume=True): self.call_delegate('did_stop_debug_core', core=self) - def _build_registers(self): + def _build_registers(self) -> None: """! @brief Build set of core registers available on this code. This method builds the list of core registers for this particular core. This includes all @@ -338,7 +364,7 @@ def _build_registers(self): if self.has_fpu: self._core_registers.add_group(CoreRegisterGroups.VFP_V5) - def _read_core_type(self): + def _read_core_type(self) -> None: """! @brief Read the CPUID register and determine core type and architecture.""" # Read CPUID register cpuid = self.read32(CortexM.CPUID) @@ -365,7 +391,7 @@ def _read_core_type(self): else: LOG.warning("CPU core #%d type is unrecognized", self.core_number) - def _check_for_fpu(self): + def _check_for_fpu(self) -> None: """! @brief Determine if a core has an FPU. The core architecture must have been identified prior to calling this function. @@ -406,13 +432,29 @@ def _check_for_fpu(self): fpu_type = "FPv4-SP-D16-M" LOG.info("FPU present: " + fpu_type) - def write_memory(self, addr, value, transfer_size=32): + def write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: """! @brief Write a single memory location. By default the transfer size is a word.""" - self.ap.write_memory(addr, value, transfer_size) + self.ap.write_memory(addr, data, transfer_size) + + @overload + def read_memory(self, addr: int, transfer_size: int = 32) -> int: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int = 32, now: Literal[True] = True) -> int: + ... - def read_memory(self, addr, transfer_size=32, now=True): + @overload + def read_memory(self, addr: int, transfer_size: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Read a memory location. By default, a word will be read.""" @@ -427,27 +469,27 @@ def read_memory_cb(): else: return read_memory_cb - def read_memory_block8(self, addr, size): + def read_memory_block8(self, addr: int, size: int) -> Sequence[int]: """! @brief Read a block of unaligned bytes in memory. @return an array of byte values """ data = self.ap.read_memory_block8(addr, size) return self.bp_manager.filter_memory_unaligned_8(addr, size, data) - def write_memory_block8(self, addr, data): + def write_memory_block8(self, addr: int, data: Sequence[int]) -> None: """! @brief Write a block of unaligned bytes in memory.""" self.ap.write_memory_block8(addr, data) - def write_memory_block32(self, addr, data): + def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: """! @brief Write an aligned block of 32-bit words.""" self.ap.write_memory_block32(addr, data) - def read_memory_block32(self, addr, size): + def read_memory_block32(self, addr: int, size) -> Sequence[int]: """! @brief Read an aligned block of 32-bit words.""" data = self.ap.read_memory_block32(addr, size) return self.bp_manager.filter_memory_aligned_32(addr, size, data) - def halt(self): + def halt(self) -> None: """! @brief Halt the core """ LOG.debug("halting core %d", self.core_number) @@ -457,7 +499,8 @@ def halt(self): self.flush() self.session.notify(Target.Event.POST_HALT, self, Target.HaltReason.USER) - def step(self, disable_interrupts=True, start=0, end=0, hook_cb=None): + def step(self, disable_interrupts: bool = True, start: int = 0, end: int = 0, + hook_cb: Optional[Callable[[], bool]] = None) -> None: """! @brief Perform an instruction level step. This API will execute one or more individual instructions on the core. With default parameters, it @@ -690,7 +733,7 @@ def _perform_reset(self, reset_type): assert isinstance(reset_type, Target.ResetType) if reset_type is Target.ResetType.HW: # Tell DP to not send reset notifications because we are doing it. - self.session.target.dp.reset(send_notifications=False) + cast("CoreSightTarget", self.session.target).dp.reset(send_notifications=False) elif reset_type is Target.ResetType.SW_EMULATED: self._perform_emulated_reset() else: @@ -786,7 +829,7 @@ def reset(self, reset_type=None): self.session.notify(Target.Event.POST_RESET, self) - def set_reset_catch(self, reset_type=None): + def set_reset_catch(self, reset_type): """! @brief Prepare to halt core on reset.""" LOG.debug("set reset catch, core %d", self.core_number) @@ -804,7 +847,7 @@ def set_reset_catch(self, reset_type=None): if (self._reset_catch_saved_demcr & CortexM.DEMCR_VC_CORERESET) == 0: self.write_memory(CortexM.DEMCR, self._reset_catch_saved_demcr | CortexM.DEMCR_VC_CORERESET) - def clear_reset_catch(self, reset_type=None): + def clear_reset_catch(self, reset_type): """! @brief Disable halt on reset.""" LOG.debug("clear reset catch, core %d", self.core_number) @@ -816,6 +859,8 @@ def clear_reset_catch(self, reset_type=None): def reset_and_halt(self, reset_type=None): """! @brief Perform a reset and stop the core on the reset handler.""" + reset_type = self._get_actual_reset_type(reset_type) + # Set up reset catch. self.set_reset_catch(reset_type) @@ -984,6 +1029,8 @@ def _base_read_core_registers_raw(self, reg_list): # Handle doubles. doubles = [reg for reg in reg_list if CortexMCoreRegisterInfo.get(reg).is_double_float_register] hasDoubles = len(doubles) > 0 + originalRegList = [] + singleValues = [] if hasDoubles: originalRegList = reg_list @@ -1012,8 +1059,8 @@ def _base_read_core_registers_raw(self, reg_list): # we're running so slow compared to the target that it's not necessary. # Read it and check that S_REGRDY is set. - dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False) - reg_cb = self.read_memory(CortexM.DCRDR, now=False) + dhcsr_cb = self.read32(CortexM.DHCSR, now=False) + reg_cb = self.read32(CortexM.DCRDR, now=False) dhcsr_cb_list.append(dhcsr_cb) reg_cb_list.append(reg_cb) @@ -1161,6 +1208,7 @@ def _base_write_core_registers_raw(self, reg_list, data_list): reg = CortexMCoreRegisterInfo.get('cfbp').index elif CortexMCoreRegisterInfo.get(reg).is_psr_subregister: mask = CortexMCoreRegisterInfo.get(reg).psr_mask + assert xpsrValue is not None data = (xpsrValue & (0xffffffff ^ mask)) | (data & mask) xpsrValue = data reg = CortexMCoreRegisterInfo.get('xpsr').index @@ -1174,7 +1222,7 @@ def _base_write_core_registers_raw(self, reg_list, data_list): # Technically, we need to poll S_REGRDY in DHCSR here to ensure the # register write has completed. # Read it and assert that S_REGRDY is set - dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False) + dhcsr_cb = self.read32(CortexM.DHCSR, now=False) dhcsr_cb_list.append(dhcsr_cb) # Make sure S_REGRDY was set for all register writes. @@ -1206,7 +1254,7 @@ def get_breakpoint_type(self, addr): @property def available_breakpoint_count(self): - return self.fpb.available_breakpoints + return self.fpb.available_breakpoints if self.fpb else 0 def find_watchpoint(self, addr, size, type): if self.dwt is not None: @@ -1334,7 +1382,7 @@ def set_target_context(self, context): "SysTick", ] - def exception_number_to_name(self, exc_num, name_thread=False): + def exception_number_to_name(self, exc_num: int, name_thread: bool = False) -> Optional[str]: if exc_num < len(self.CORE_EXCEPTION): if exc_num == 0 and not name_thread: return None @@ -1343,13 +1391,16 @@ def exception_number_to_name(self, exc_num, name_thread=False): else: irq_num = exc_num - len(self.CORE_EXCEPTION) name = None - if self.session.target.irq_table: - name = self.session.target.irq_table.get(irq_num) + cstarget = cast("CoreSightTarget", self.session.target) + if cstarget.irq_table: + name = cstarget.irq_table.get(irq_num) if name is not None: return "Interrupt[%s]" % name else: return "Interrupt %d" % irq_num - def in_thread_mode_on_main_stack(self): + def in_thread_mode_on_main_stack(self) -> bool: + if not self._target_context: + return False return (self._target_context.read_core_register('ipsr') == 0 and (self._target_context.read_core_register('control') & CortexM.CONTROL_SPSEL) == 0) diff --git a/pyocd/coresight/dap.py b/pyocd/coresight/dap.py index de4fbbb4e..9e1ba6eb7 100644 --- a/pyocd/coresight/dap.py +++ b/pyocd/coresight/dap.py @@ -17,7 +17,8 @@ import logging from enum import Enum -from typing import NamedTuple +from typing import (Callable, Dict, List, NamedTuple, Optional, Sequence, Tuple, TYPE_CHECKING, Union, overload) +from typing_extensions import Literal from ..core import (exceptions, memory_interface) from ..core.target import Target @@ -27,6 +28,11 @@ from ..utility.sequencer import CallSequence from ..utility.timeout import Timeout +if TYPE_CHECKING: + from .ap import (APAddressBase, AccessPort) + from ..core.session import Session + from ..utility.notification import Notification + LOG = logging.getLogger(__name__) TRACE = LOG.getChild("trace") @@ -117,21 +123,21 @@ class DPConnector: attempts at sending the SWJ sequence to select the wire protocol and read the DP IDR register. """ - def __init__(self, probe): + def __init__(self, probe: DebugProbe) -> None: self._probe = probe - self._session = probe.session - self._idr = None + self._idr = DPIDR(0, 0, 0, 0, 0) # Make sure we have a session, since we get the session from the probe and probes have their session set # after creation. - assert self._session is not None, "DPConnector requires the probe to have a session" + assert probe.session is not None, "DPConnector requires the probe to have a session" + self._session = probe.session @property - def idr(self): + def idr(self) -> DPIDR: """! @brief DPIDR instance containing values read from the DP IDR register.""" return self._idr - def _get_protocol(self, protocol): + def _get_protocol(self, protocol: Optional[DebugProbe.Protocol]) -> DebugProbe.Protocol: # Convert protocol from setting if not passed as parameter. if protocol is None: protocol_name = self._session.options.get('dap_protocol').strip().lower() @@ -140,7 +146,7 @@ def _get_protocol(self, protocol): raise exceptions.DebugError("requested wire protocol %s not supported by the debug probe" % protocol.name) return protocol - def connect(self, protocol=None): + def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None: """! @brief Establish a connection to the DP. This method causes the debug probe to connect using the wire protocol. @@ -163,23 +169,25 @@ def connect(self, protocol=None): already_connected = current_wire_protocol is not None if already_connected: + assert current_wire_protocol self._check_protocol(current_wire_protocol, protocol) else: self._connect_probe(protocol) protocol = self._probe.wire_protocol + assert protocol self._connect_dp(protocol) finally: self._probe.unlock() - def _check_protocol(self, current_wire_protocol, protocol): + def _check_protocol(self, current_wire_protocol: DebugProbe.Protocol, protocol: DebugProbe.Protocol) -> None: # Warn about mismatched current and requested wire protocols. if (protocol is not current_wire_protocol) and (protocol is not DebugProbe.Protocol.DEFAULT): LOG.warning("Cannot use %s; already connected with %s", protocol.name, current_wire_protocol.name) else: LOG.debug("Already connected with %s", current_wire_protocol.name) - def _connect_probe(self, protocol): + def _connect_probe(self, protocol: DebugProbe.Protocol) -> None: # Debug log with the selected protocol. if protocol is not DebugProbe.Protocol.DEFAULT: LOG.debug("Using %s wire protocol", protocol.name) @@ -189,10 +197,11 @@ def _connect_probe(self, protocol): # Log the actual protocol if selected was default. if protocol is DebugProbe.Protocol.DEFAULT: - protocol = self._probe.wire_protocol - LOG.debug("Default wire protocol selected; using %s", protocol.name) + actual_protocol = self._probe.wire_protocol + assert actual_protocol + LOG.debug("Default wire protocol selected; using %s", actual_protocol.name) - def _connect_dp(self, protocol): + def _connect_dp(self, protocol: DebugProbe.Protocol) -> None: # Get SWJ settings. use_dormant = self._session.options.get('dap_swj_use_dormant') send_swj = self._session.options.get('dap_swj_enable') \ @@ -251,7 +260,7 @@ class DebugPort: ## Number of times to try to read DP registers after hw reset before attempting reconnect. _RESET_RECOVERY_ATTEMPTS_BEFORE_RECONNECT = 1 - def __init__(self, probe, target): + def __init__(self, probe: DebugProbe, target: Target) -> None: """! @brief Constructor. @param self The DebugPort object. @param probe The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" object. The probe is assumed to not @@ -261,70 +270,71 @@ def __init__(self, probe, target): """ self._probe = probe self.target = target + assert target.session self._session = target.session - self.valid_aps = None - self.dpidr = None - self.aps = {} - self._access_number = 0 - self._cached_dp_select = None - self._protocol = None - self._probe_managed_ap_select = False - self._probe_managed_dpbanksel = False - self._probe_supports_dpbanksel = False - self._probe_supports_apv2_addresses = False - self._have_probe_capabilities = False - self._did_check_version = False - self._log_dp_info = True + self.valid_aps: Optional[List["APAddressBase"]] = None + self.dpidr = DPIDR(0, 0, 0, 0, 0) + self.aps: Dict["APAddressBase", "AccessPort"] = {} + self._access_number: int = 0 + self._cached_dp_select: Optional[int] = None + self._protocol: Optional[DebugProbe.Protocol] = None + self._probe_managed_ap_select: bool = False + self._probe_managed_dpbanksel: bool = False + self._probe_supports_dpbanksel: bool = False + self._probe_supports_apv2_addresses: bool = False + self._have_probe_capabilities: bool = False + self._did_check_version: bool = False + self._log_dp_info: bool = True # DPv3 attributes - self._is_dpv3 = False - self._addr_size = None - self._addr_mask = None - self._errmode = None - self._base_addr = None - self._apacc_mem_interface = None + self._is_dpv3: bool = False + self._addr_size: int = -1 + self._addr_mask: int = -1 + self._errmode: int = -1 + self._base_addr: int = -1 + self._apacc_mem_interface: Optional[APAccessMemoryInterface] = None # Subscribe to reset events. self._session.subscribe(self._reset_did_occur, (Target.Event.PRE_RESET, Target.Event.POST_RESET)) @property - def probe(self): + def probe(self) -> DebugProbe: return self._probe @property - def session(self): + def session(self) -> "Session": return self._session @property - def adi_version(self): + def adi_version(self) -> ADIVersion: return ADIVersion.ADIv6 if self._is_dpv3 else ADIVersion.ADIv5 @property - def base_address(self): + def base_address(self) -> int: """! @brief Base address of the first component for an ADIv6 system.""" return self._base_addr @property - def apacc_memory_interface(self): + def apacc_memory_interface(self) -> "APAccessMemoryInterface": """! @brief Memory interface for performing APACC transactions.""" if self._apacc_mem_interface is None: self._apacc_mem_interface = APAccessMemoryInterface(self) return self._apacc_mem_interface @property - def next_access_number(self): + def next_access_number(self) -> int: self._access_number += 1 return self._access_number - def lock(self): + def lock(self) -> None: """! @brief Lock the DP from access by other threads.""" self.probe.lock() - def unlock(self): + def unlock(self) -> None: """! @brief Unlock the DP.""" self.probe.unlock() - def connect(self, protocol=None): + def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None: """! @brief Connect to the target. This method causes the debug probe to connect using the selected wire protocol. The probe @@ -340,14 +350,14 @@ def connect(self, protocol=None): self._protocol = protocol self.create_connect_sequence().invoke() - def disconnect(self): + def disconnect(self) -> None: """! @brief Disconnect from target. DP debug is powered down. See power_down_debug(). """ self.power_down_debug() - def create_connect_sequence(self): + def create_connect_sequence(self) -> CallSequence: """! @brief Returns call sequence to connect to the target. Returns a @ref pyocd.utility.sequence.CallSequence CallSequence that will connect to the @@ -359,7 +369,7 @@ def create_connect_sequence(self): @param self @return @ref pyocd.utility.sequence.CallSequence CallSequence """ - seq = [ + seq: List[Tuple[str, Callable]] = [ ('lock_probe', self.probe.lock), ] if not self._have_probe_capabilities: @@ -380,7 +390,7 @@ def create_connect_sequence(self): ] return CallSequence(*seq) - def _get_probe_capabilities(self): + def _get_probe_capabilities(self) -> None: """! @brief Examine the probe's capabilities.""" caps = self._probe.capabilities self._probe_managed_ap_select = (DebugProbe.Capability.MANAGED_AP_SELECTION in caps) @@ -389,7 +399,7 @@ def _get_probe_capabilities(self): self._probe_supports_apv2_addresses = (DebugProbe.Capability.APv2_ADDRESSES in caps) self._have_probe_capabilities = True - def _connect(self): + def _connect(self) -> None: # Attempt to connect. connector = DPConnector(self.probe) connector.connect(self._protocol) @@ -400,7 +410,7 @@ def _connect(self): "DP IDR = 0x%08x (v%d%s rev%d)", self.dpidr.idr, self.dpidr.version, " MINDP" if self.dpidr.mindp else "", self.dpidr.revision) - def _check_version(self): + def _check_version(self) -> None: self._is_dpv3 = (self.dpidr.version == 3) if self._is_dpv3: # Check that the probe will be able to access ADIv6 APs. @@ -440,13 +450,29 @@ def flush(self): self._handle_error(error, self.next_access_number) raise - def read_reg(self, addr, now=True): + @overload + def read_reg(self, addr: int) -> int: + ... + + @overload + def read_reg(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_reg(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_reg(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_reg(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: return self.read_dp(addr, now) - def write_reg(self, addr, data): + def write_reg(self, addr: int, data: int) -> None: self.write_dp(addr, data) - def power_up_debug(self): + def power_up_debug(self) -> bool: """! @brief Assert DP power requests. Request both debug and system power be enabled, and wait until the request is acked. @@ -467,7 +493,7 @@ def power_up_debug(self): return True - def power_down_debug(self): + def power_down_debug(self) -> bool: """! @brief Deassert DP power requests. ADIv6 says that we must not clear CSYSPWRUPREQ and CDBGPWRUPREQ at the same time. @@ -501,11 +527,11 @@ def power_down_debug(self): return True - def _invalidate_cache(self): + def _invalidate_cache(self) -> None: """! @brief Invalidate cached DP registers.""" self._cached_dp_select = None - def _reset_did_occur(self, notification): + def _reset_did_occur(self, notification: "Notification") -> None: """! @brief Handles reset notifications to invalidate register cache. The cache is cleared on all resets just to be safe. On most devices, warm resets do not reset @@ -513,7 +539,7 @@ def _reset_did_occur(self, notification): """ self._invalidate_cache() - def post_reset_recovery(self): + def post_reset_recovery(self) -> None: """! @brief Wait for the target to recover from reset, with auto-reconnect if needed.""" # Check if we can access DP registers. If this times out, then reconnect the DP and retry. with Timeout(self.session.options.get('reset.dap_recover.timeout'), @@ -547,7 +573,7 @@ def post_reset_recovery(self): else: LOG.error("DAP is not accessible after reset followed by attempted reconnect") - def reset(self, *, send_notifications=True): + def reset(self, *, send_notifications: bool = True) -> None: """! @brief Hardware reset. Pre- and post-reset notifications are sent. @@ -570,7 +596,7 @@ def reset(self, *, send_notifications=True): if send_notifications: self.session.notify(Target.Event.POST_RESET, self) - def assert_reset(self, asserted, *, send_notifications=True): + def assert_reset(self, asserted: bool, *, send_notifications: bool = True) -> None: """! @brief Assert or deassert the hardware reset signal. A pre-reset notification is sent before asserting reset, whereas a post-reset notification is sent @@ -594,7 +620,7 @@ def assert_reset(self, asserted, *, send_notifications=True): if send_notifications and not asserted and is_asserted: self.session.notify(Target.Event.POST_RESET, self) - def is_reset_asserted(self): + def is_reset_asserted(self) -> bool: """! @brief Returns the current state of the nRESET signal. This method can be called before the DebugPort is initalized. @@ -604,14 +630,14 @@ def is_reset_asserted(self): """ return self.probe.is_reset_asserted() - def set_clock(self, frequency): + def set_clock(self, frequency: float) -> None: """! @brief Change the wire protocol's clock frequency. @param self This object. @param frequency New wire protocol frequency in Hertz. """ self.probe.set_clock(frequency) - def _write_dp_select(self, mask, value): + def _write_dp_select(self, mask: int, value: int) -> None: """! @brief Modify part of the DP SELECT register and write if cache is stale. The DP lock must already be acquired before calling this method. @@ -628,7 +654,7 @@ def _write_dp_select(self, mask, value): self.write_dp(DP_SELECT, select) self._cached_dp_select = select - def _set_dpbanksel(self, addr, is_write): + def _set_dpbanksel(self, addr: int, is_write: bool) -> bool: """! @brief Updates the DPBANKSEL field of the SELECT register as required. Several DP registers (most, actually) ignore DPBANKSEL. If one of those is being @@ -671,7 +697,23 @@ def _set_dpbanksel(self, addr, is_write): else: return False - def read_dp(self, addr, now=True): + @overload + def read_dp(self, addr: int) -> int: + ... + + @overload + def read_dp(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_dp(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_dp(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_dp(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: if (addr & DPADDR_MASK) % 4 != 0: raise ValueError("DP address must be word aligned") num = self.next_access_number @@ -692,7 +734,7 @@ def read_dp(self, addr, now=True): raise # Read callback returned for async reads. - def read_dp_cb(): + def read_dp_cb() -> int: try: result = result_cb() TRACE.debug("read_dp:%06d %s(addr=0x%08x) -> 0x%08x", num, "" if now else "...", addr, result) @@ -711,7 +753,7 @@ def read_dp_cb(): TRACE.debug("read_dp:%06d (addr=0x%08x) -> ...", num, addr) return read_dp_cb - def write_dp(self, addr, data): + def write_dp(self, addr: int, data: int) -> None: if (addr & DPADDR_MASK) % 4 != 0: raise ValueError("DP address must be word aligned") num = self.next_access_number @@ -730,9 +772,7 @@ def write_dp(self, addr, data): if did_lock: self.unlock() - return True - - def _select_ap(self, addr): + def _select_ap(self, addr: int) -> bool: """! @brief Write DP_SELECT to choose the given AP. Handles the case where the debug probe manages selecting an AP itself, in which case we @@ -755,7 +795,7 @@ def _select_ap(self, addr): assert False, "invalid ADI version" return True - def write_ap(self, addr, data): + def write_ap(self, addr: int, data: int) -> None: assert isinstance(addr, int) num = self.next_access_number did_lock = False @@ -771,9 +811,23 @@ def write_ap(self, addr, data): if did_lock: self.unlock() - return True + @overload + def read_ap(self, addr: int) -> int: + ... + + @overload + def read_ap(self, addr: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_ap(self, addr: int, now: Literal[False]) -> Callable[[], int]: + ... - def read_ap(self, addr, now=True): + @overload + def read_ap(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_ap(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: assert isinstance(addr, int) num = self.next_access_number did_lock = False @@ -792,7 +846,7 @@ def read_ap(self, addr, now=True): raise # Read callback returned for async reads. - def read_ap_cb(): + def read_ap_cb() -> int: try: result = result_cb() TRACE.debug("read_ap:%06d %s(addr=0x%08x) -> 0x%08x", num, "" if now else "...", addr, result) @@ -811,7 +865,7 @@ def read_ap_cb(): TRACE.debug("read_ap:%06d (addr=0x%08x) -> ...", num, addr) return read_ap_cb - def write_ap_multiple(self, addr, values): + def write_ap_multiple(self, addr: int, values: Sequence[int]) -> None: assert isinstance(addr, int) num = self.next_access_number did_lock = False @@ -827,7 +881,24 @@ def write_ap_multiple(self, addr, values): if did_lock: self.unlock() - def read_ap_multiple(self, addr, count=1, now=True): + @overload + def read_ap_multiple(self, addr: int, count: int = 1) -> Sequence[int]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: Literal[True] = True) -> Sequence[int]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: Literal[False]) -> Callable[[], Sequence[int]]: + ... + + @overload + def read_ap_multiple(self, addr: int, count: int, now: bool) -> Union[Sequence[int], Callable[[], Sequence[int]]]: + ... + + def read_ap_multiple(self, addr: int, count: int = 1, now: bool = True) \ + -> Union[Sequence[int], Callable[[], Sequence[int]]]: assert isinstance(addr, int) num = self.next_access_number did_lock = False @@ -845,7 +916,7 @@ def read_ap_multiple(self, addr, count=1, now=True): raise # Need to wrap the deferred callback to convert exceptions. - def read_ap_multiple_cb(): + def read_ap_multiple_cb() -> Sequence[int]: try: return result_cb() except exceptions.TargetError as error: @@ -861,7 +932,7 @@ def read_ap_multiple_cb(): else: return read_ap_multiple_cb - def _handle_error(self, error, num): + def _handle_error(self, error: Exception, num: int) -> None: TRACE.debug("error:%06d %s", num, error) # Clear sticky error for fault errors. if isinstance(error, exceptions.TransferFaultError): @@ -872,7 +943,7 @@ def _handle_error(self, error, num): # attempting to reset debug logic. self.write_reg(DP_ABORT, ABORT_DAPABORT) - def clear_sticky_err(self): + def clear_sticky_err(self) -> None: self._invalidate_cache() mode = self.probe.wire_protocol if mode == DebugProbe.Protocol.SWD: @@ -896,7 +967,7 @@ class APAccessMemoryInterface(memory_interface.MemoryInterface): Only 32-bit transfers are supported. """ - def __init__(self, dp, ap_address=None): + def __init__(self, dp: DebugPort, ap_address: Optional["APAddressBase"] = None) -> None: """! @brief Constructor. @param self @@ -912,17 +983,17 @@ def __init__(self, dp, ap_address=None): self._offset = 0 @property - def dp(self): + def dp(self) -> DebugPort: return self._dp @property - def short_description(self): + def short_description(self) -> str: if self._ap_address is None: return "Root Component" else: return "Root Component ({})".format(self._ap_address) - def write_memory(self, addr, data, transfer_size=32): + def write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: """! @brief Write a single memory location. By default the transfer size is a word.""" @@ -931,7 +1002,23 @@ def write_memory(self, addr, data, transfer_size=32): return self._dp.write_ap(self._offset + addr, data) - def read_memory(self, addr, transfer_size=32, now=True): + @overload + def read_memory(self, addr: int, transfer_size: int) -> int: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int, now: Literal[True] = True) -> int: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int, now: Literal[False]) -> Callable[[], int]: + ... + + @overload + def read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Callable[[], int]]: + ... + + def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: """! @brief Read a memory location. By default, a word will be read.""" @@ -940,14 +1027,14 @@ def read_memory(self, addr, transfer_size=32, now=True): return self._dp.read_ap(self._offset + addr, now) - def write_memory_block32(self, addr, data): + def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: """! @brief Write an aligned block of 32-bit words.""" addr += self._offset for word in data: - self._dp.write_ap(addr, data) + self._dp.write_ap(addr, word) addr += 4 - def read_memory_block32(self, addr, size): + def read_memory_block32(self, addr: int, size) -> Sequence[int]: """! @brief Read an aligned block of 32-bit words.""" addr += self._offset result_cbs = [self._dp.read_ap(addr + i * 4, now=False) for i in range(size)] From fe20832fb4e4b10ba5df38d25c57df7c59064a90 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 13 Jul 2021 15:54:02 -0500 Subject: [PATCH 071/123] types: annotate CMSIS pack classes; fix a few type errors. --- pyocd/target/family/__init__.py | 9 +- pyocd/target/pack/cmsis_pack.py | 161 ++++++++++++++++++++----------- pyocd/target/pack/pack_target.py | 59 +++++++---- 3 files changed, 151 insertions(+), 78 deletions(-) diff --git a/pyocd/target/family/__init__.py b/pyocd/target/family/__init__.py index 8faf8d7af..2e5a747e1 100644 --- a/pyocd/target/family/__init__.py +++ b/pyocd/target/family/__init__.py @@ -16,15 +16,18 @@ # limitations under the License. import re -from collections import namedtuple +from typing import (Any, NamedTuple, Pattern) from . import target_imxrt from . import target_kinetis from . import target_lpc5500 from . import target_nRF52 -## @brief Container for family matching information. -FamilyInfo = namedtuple("FamilyInfo", "vendor matches klass") +class FamilyInfo(NamedTuple): + """@brief Container for family matching information.""" + vendor: str + matches: Pattern[str] + klass: Any ## @brief Lookup table to convert from CMSIS-Pack family names to a family class. # diff --git a/pyocd/target/pack/cmsis_pack.py b/pyocd/target/pack/cmsis_pack.py index e5ddd6f50..115a874cb 100644 --- a/pyocd/target/pack/cmsis_pack.py +++ b/pyocd/target/pack/cmsis_pack.py @@ -22,7 +22,7 @@ import zipfile import logging import io -from typing import (Any, Dict, Iterator, Optional) +from typing import (Any, Callable, Dict, List, IO, Iterator, Optional, Tuple, TypeVar, Union) from .flash_algo import PackFlashAlgo from ...core import exceptions @@ -35,14 +35,14 @@ class MalformedCmsisPackError(exceptions.TargetSupportError): """! @brief Exception raised for errors parsing a CMSIS-Pack.""" pass -class _DeviceInfo(object): +class _DeviceInfo: """! @brief Simple container class to hold XML elements describing a device.""" - def __init__(self, **kwargs): - self.element = kwargs.get('element', None) - self.families = kwargs.get('families', []) - self.memories = kwargs.get('memories', []) - self.algos = kwargs.get('algos', []) - self.debugs = kwargs.get('debugs', []) + def __init__(self, element: Element, **kwargs): + self.element: Element = element + self.families: List[str] = kwargs.get('families', []) + self.memories: List[Element] = kwargs.get('memories', []) + self.algos: List[Element] = kwargs.get('algos', []) + self.debugs: List[Element] = kwargs.get('debugs', []) def _get_part_number_from_element(element: Element) -> str: """! @brief Extract the part number from a device or variant XML element.""" @@ -54,7 +54,7 @@ def _get_part_number_from_element(element: Element) -> str: else: raise ValueError("element is neither device nor variant") -class CmsisPack(object): +class CmsisPack: """! @brief Wraps a CMSIS Device Family Pack. This class provides a top-level interface for extracting device information from CMSIS-Packs. @@ -71,7 +71,7 @@ class CmsisPack(object): defined device and passes those to CmsisPackDevice. It is then CmsisPackDevice that performs the parsing of each element type into pyOCD-compatible data. """ - def __init__(self, file_or_path): + def __init__(self, file_or_path: Union[str, zipfile.ZipFile, IO[bytes]]) -> None: """! @brief Constructor. Opens the CMSIS-Pack and builds instances of CmsisPackDevice for all the devices @@ -104,21 +104,21 @@ def __init__(self, file_or_path): self._pdsc = CmsisPackDescription(self, pdscFile) @property - def filename(self): + def filename(self) -> Optional[str]: """! @brief Accessor for the filename or path of the .pack file.""" return self._pack_file.filename @property - def pdsc(self): + def pdsc(self) -> "CmsisPackDescription": """! @brief Accessor for the CmsisPackDescription instance for the pack's PDSC file.""" return self._pdsc @property - def devices(self): + def devices(self) -> List["CmsisPackDevice"]: """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._pdsc.devices - def get_file(self, filename): + def get_file(self, filename) -> IO[bytes]: """! @brief Return file-like object for a file within the pack. @param self @@ -130,8 +130,11 @@ def get_file(self, filename): filename = filename.replace('\\', '/') return io.BytesIO(self._pack_file.read(filename)) -class CmsisPackDescription(object): - def __init__(self, pack, pdsc_file): +class CmsisPackDescription: + """@brief Parser for the PDSC XML file describing a CMSIS-Pack. + """ + + def __init__(self, pack: CmsisPack, pdsc_file: IO) -> None: """! @brief Constructor. @param self This object. @@ -143,8 +146,8 @@ def __init__(self, pack, pdsc_file): # Convert PDSC into an ElementTree. self._pdsc = ElementTree(file=pdsc_file) - self._state_stack = [] - self._devices = [] + self._state_stack: List[_DeviceInfo] = [] + self._devices: List["CmsisPackDevice"] = [] # Remember if we have already warned about overlapping memory regions # so we can limit these to one warning per DFP @@ -155,19 +158,19 @@ def __init__(self, pack, pdsc_file): self._parse_devices(family) @property - def pack(self): + def pack(self) -> CmsisPack: """! @brief Reference to the containing CmsisPack object.""" return self._pack @property - def devices(self): + def devices(self) -> List["CmsisPackDevice"]: """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._devices - def _parse_devices(self, parent): + def _parse_devices(self, parent: Element) -> None: # Extract device description elements we care about. newState = _DeviceInfo(element=parent) - children = [] + children: List[Element] = [] for elem in parent: if elem.tag == 'memory': newState.memories.append(elem) @@ -201,7 +204,8 @@ def _parse_devices(self, parent): self._state_stack.pop() - def _extract_families(self): + def _extract_families(self) -> List[str]: + """! @brief Generate list of family names for a device.""" families = [] for state in self._state_stack: elem = state.element @@ -211,7 +215,24 @@ def _extract_families(self): families += [elem.attrib['DsubFamily']] return families - def _extract_items(self, state_info_name, filter): + ## Typevar used for _extract_items(). + V = TypeVar('V') + + def _extract_items(self, state_info_name: str, filter: Callable[[Dict[Any, V], Element], None]) -> List[V]: + """! @brief Generic extractor utility. + + Iterates over saved elements for the specified device state info for each level of the + device state stack, from outer to inner, calling the provided filter callback each + iteration. A dictionary object is created and repeatedly passed to the filter callback, so + state can be stored across calls to the filter. + + The general idea is that the filter callback extracts some identifying information from the + element it is given and uses that as a key in the dictionary. When the filter is called for + more deeply nested elements, those elements will override the any previously examined + elements with the same identifier. + + @return All values from the dictionary. + """ map = {} for state in self._state_stack: for elem in getattr(state, state_info_name): @@ -221,8 +242,16 @@ def _extract_items(self, state_info_name, filter): LOG.debug("error parsing CMSIS-Pack: " + str(err)) return list(map.values()) - def _extract_memories(self): - def get_start_and_size(elem): + def _extract_memories(self) -> List[Element]: + """! @brief Extract memory elements. + + The unique identifier is a bi-tuple of the memory's name, which is either the 'name' or 'id' attribute, + in that order, plus the pname. If neither attribute exists, the region base and size are turned into + a string. + + In addition to the name based filtering, memory regions are checked to prevent overlaps. + """ + def get_start_and_size(elem: Element) -> Tuple[int, int]: try: start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) @@ -230,7 +259,8 @@ def get_start_and_size(elem): LOG.warning("memory region missing address") raise return (start, size) - def filter(map, elem): + + def filter(map: Dict, elem: Element) -> None: # Inner memory regions are allowed to override outer memory # regions. If this is not done properly via name/id, we must make # sure not to report overlapping memory regions to gdb since it @@ -276,12 +306,19 @@ def filter(map, elem): return self._extract_items('memories', filter) - def _extract_algos(self): - def filter(map, elem): + def _extract_algos(self) -> List[Element]: + """! @brief Extract algorithm elements. + + The unique identifier is the algorithm's memory address range. + + Any algorithm elements with a 'style' attribuet not set to 'Keil' (case-insensitive) are + skipped. + """ + def filter(map: Dict, elem: Element) -> None: # We only support Keil FLM style flash algorithms (for now). - if ('style' in elem.attrib) and (elem.attrib['style'] != 'Keil'): + if ('style' in elem.attrib) and (elem.attrib['style'].lower() != 'keil'): LOG.debug("skipping non-Keil flash algorithm") - return None, None + return # Both start and size are required. start = int(elem.attrib['start'], base=0) @@ -293,8 +330,17 @@ def filter(map, elem): return self._extract_items('algos', filter) - def _extract_debugs(self): - def filter(map, elem): + def _extract_debugs(self) -> List[Element]: + """! @brief Extract debug elements. + + If the debug element does not have a 'Pname' element, its identifier is set to "*" to + represent that it applies to all processors. + + Otherwise, the identifier is the element's 'Pname' attribute combined with 'Punit' if + present. When 'Pname' is detected and a "*" key is in the map, the map is cleared before + adding the current element. + """ + def filter(map: Dict, elem: Element) -> None: if 'Pname' in elem.attrib: name = elem.attrib['Pname'] unit = elem.attrib.get('Punit', 0) @@ -311,7 +357,7 @@ def filter(map, elem): return self._extract_items('debugs', filter) -def _get_bool_attribute(elem, name, default=False): +def _get_bool_attribute(elem: Element, name: str, default: bool = False) -> bool: """! @brief Extract an XML attribute with a boolean value. Supports "true"/"false" or "1"/"0" as the attribute values. Leading and trailing whitespace @@ -333,7 +379,7 @@ def _get_bool_attribute(elem, name, default=False): else: return default -class CmsisPackDevice(object): +class CmsisPackDevice: """! @brief Wraps a device defined in a CMSIS Device Family Pack. Responsible for converting the XML elements that describe the device into objects @@ -343,21 +389,21 @@ class CmsisPackDevice(object): the PDSC. """ - def __init__(self, pack, device_info): + def __init__(self, pack: CmsisPack, device_info: _DeviceInfo): """! @brief Constructor. @param self @param pack The CmsisPack object that contains this device. @param device_info A _DeviceInfo object with the XML elements that describe this device. """ - self._pack = pack - self._info = device_info - self._part = _get_part_number_from_element(device_info.element) - self._regions = [] - self._saw_startup = False - self._default_ram = None - self._memory_map = None - - def _build_memory_regions(self): + self._pack: CmsisPack = pack + self._info: _DeviceInfo = device_info + self._part: str = _get_part_number_from_element(device_info.element) + self._regions: List[MemoryRegion] = [] + self._saw_startup: bool = False + self._default_ram: Optional[MemoryRegion] = None + self._memory_map: Optional[MemoryMap] = None + + def _build_memory_regions(self) -> None: """! @brief Creates memory region instances for the device. For each `` element in the device info, a memory region object is created and @@ -428,7 +474,7 @@ def _get_containing_region(self, addr: int) -> Optional[MemoryRegion]: return region return None - def _build_flash_regions(self): + def _build_flash_regions(self) -> None: """! @brief Converts ROM memory regions to flash regions. Each ROM region in the `_regions` attribute is converted to a flash region if a matching @@ -454,8 +500,9 @@ def _build_flash_regions(self): continue # Look for matching flash algo. - algo_element = self._find_matching_algo(region) - if algo_element is None: + try: + algo_element = self._find_matching_algo(region) + except KeyError: # Must be a mask ROM or non-programmable flash. continue @@ -577,7 +624,7 @@ def _split_flash_region_by_sector_size(self, is_testable=region.is_testable, alias=region.alias) - def _find_matching_algo(self, region): + def _find_matching_algo(self, region: MemoryRegion) -> Element: """! @brief Searches for a flash algo covering the regions's address range.'""" for algo in self._info.algos: # Both start and size are required attributes. @@ -588,7 +635,7 @@ def _find_matching_algo(self, region): # Check if the region indicated by start..size fits within the algo. if (algoStart <= region.start <= algoEnd) and (algoStart <= region.end <= algoEnd): return algo - return None + raise KeyError("no matching flash algorithm") def _load_flash_algo(self, filename: str) -> Optional[PackFlashAlgo]: """! @brief Return the PackFlashAlgo instance for the given flash algo filename.""" @@ -602,12 +649,12 @@ def _load_flash_algo(self, filename: str) -> Optional[PackFlashAlgo]: return None @property - def pack(self): + def pack(self) -> CmsisPack: """! @brief The CmsisPack object that defines this device.""" return self._pack @property - def part_number(self): + def part_number(self) -> str: """! @brief Part number for this device. This value comes from either the `Dname` or `Dvariant` attribute, depending on whether the @@ -616,17 +663,17 @@ def part_number(self): return self._part @property - def vendor(self): + def vendor(self) -> str: """! @brief Vendor or manufacturer name.""" return self._info.families[0].split(':')[0] @property - def families(self): + def families(self) -> List[str]: """! @brief List of families the device belongs to, ordered most generic to least.""" return [f for f in self._info.families[1:]] @property - def memory_map(self): + def memory_map(self) -> MemoryMap: """! @brief MemoryMap object.""" # Lazily construct the memory map. if self._memory_map is None: @@ -642,7 +689,7 @@ def memory_map(self): return self._memory_map @property - def svd(self): + def svd(self) -> Optional[IO[bytes]]: """! @brief File-like object for the device's SVD file. @todo Support multiple cores. """ @@ -653,7 +700,7 @@ def svd(self): return None @property - def default_reset_type(self): + def default_reset_type(self) -> Target.ResetType: """! @brief One of the Target.ResetType enums. @todo Support multiple cores. """ diff --git a/pyocd/target/pack/pack_target.py b/pyocd/target/pack/pack_target.py index c1ac9a4f0..3bb6cd232 100644 --- a/pyocd/target/pack/pack_target.py +++ b/pyocd/target/pack/pack_target.py @@ -17,13 +17,20 @@ import logging import os +from typing import (IO, TYPE_CHECKING, List, Optional, Tuple, Type, Union) -from .cmsis_pack import (CmsisPack, MalformedCmsisPackError) +from .cmsis_pack import (CmsisPack, CmsisPackDevice, MalformedCmsisPackError) from ..family import FAMILIES from .. import TARGET from ...coresight.coresight_target import CoreSightTarget from ...debug.svd.loader import SVDFile +if TYPE_CHECKING: + from zipfile import ZipFile + from cmsis_pack_manager import CmsisPackRef + from ...core.session import Session + from ...utility.sequencer import CallSequence + try: import cmsis_pack_manager CPM_AVAILABLE = True @@ -32,7 +39,20 @@ LOG = logging.getLogger(__name__) -class ManagedPacks(object): +class ManagedPacksStub: + @staticmethod + def get_installed_packs(cache: Optional[object] = None) -> List: + return [] + + @staticmethod + def get_installed_targets(cache: Optional[object] = None) -> List: + return [] + + @staticmethod + def populate_target(device_name: str) -> None: + pass + +class ManagedPacksImpl: """! @brief Namespace for managed CMSIS-Pack utilities. By managed, we mean managed by the cmsis-pack-manager package. All the methods on this class @@ -41,10 +61,8 @@ class ManagedPacks(object): """ @staticmethod - def get_installed_packs(cache=None): + def get_installed_packs(cache: Optional[cmsis_pack_manager.Cache] = None) -> List["CmsisPackRef"]: # type:ignore """! @brief Return a list containing CmsisPackRef objects for all installed packs.""" - if not CPM_AVAILABLE: - return [] if cache is None: cache = cmsis_pack_manager.Cache(True, True) results = [] @@ -59,10 +77,8 @@ def get_installed_packs(cache=None): return results @staticmethod - def get_installed_targets(cache=None): + def get_installed_targets(cache: Optional[cmsis_pack_manager.Cache] = None) -> List[CmsisPackDevice]: # type:ignore """! @brief Return a list of CmsisPackDevice objects for installed pack targets.""" - if not CPM_AVAILABLE: - return [] if cache is None: cache = cmsis_pack_manager.Cache(True, True) results = [] @@ -73,7 +89,7 @@ def get_installed_targets(cache=None): return sorted(results, key=lambda dev:dev.part_number) @staticmethod - def populate_target(device_name): + def populate_target(device_name: str) -> None: """! @brief Add targets from cmsis-pack-manager matching the given name. Targets are added to the `#TARGET` list. A case-insensitive comparison against the @@ -86,11 +102,16 @@ def populate_target(device_name): if device_name == dev.part_number.lower(): PackTargets.populate_device(dev) -class _PackTargetMethods(object): +if CPM_AVAILABLE: + ManagedPacks = ManagedPacksImpl +else: + ManagedPacks = ManagedPacksStub + +class _PackTargetMethods: """! @brief Container for methods added to the dynamically generated pack target subclass.""" @staticmethod - def _pack_target__init__(self, session): + def _pack_target__init__(self, session: "Session") -> None: # type:ignore """! @brief Constructor for dynamically created target class.""" super(self.__class__, self).__init__(session, self._pack_device.memory_map) @@ -101,7 +122,7 @@ def _pack_target__init__(self, session): self._svd_location = SVDFile(filename=self._pack_device.svd) @staticmethod - def _pack_target_create_init_sequence(self): + def _pack_target_create_init_sequence(self) -> "CallSequence": # type:ignore """! @brief Creates an init task to set the default reset type.""" seq = super(self.__class__, self).create_init_sequence() seq.wrap_task('discovery', @@ -112,16 +133,16 @@ def _pack_target_create_init_sequence(self): return seq @staticmethod - def _pack_target_set_default_reset_type(self): + def _pack_target_set_default_reset_type(self) -> None: # type:ignore """! @brief Set's the first core's default reset type to the one specified in the pack.""" if 0 in self.cores: self.cores[0].default_reset_type = self._pack_device.default_reset_type -class PackTargets(object): +class PackTargets: """! @brief Namespace for CMSIS-Pack target generation utilities. """ @staticmethod - def _find_family_class(dev): + def _find_family_class(dev: CmsisPackDevice) -> Type[CoreSightTarget]: """! @brief Search the families list for matching entry.""" for familyInfo in FAMILIES: # Skip if wrong vendor. @@ -139,7 +160,7 @@ def _find_family_class(dev): return CoreSightTarget @staticmethod - def _generate_pack_target_class(dev): + def _generate_pack_target_class(dev: CmsisPackDevice) -> Optional[type]: """! @brief Generates a new target class from a CmsisPackDevice. @param dev A CmsisPackDevice object. @@ -165,7 +186,7 @@ def _generate_pack_target_class(dev): return None @staticmethod - def populate_device(dev): + def populate_device(dev: CmsisPackDevice) -> None: """! @brief Generates and populates the target defined by a CmsisPackDevice. The new target class is added to the `#TARGET` list. @@ -186,8 +207,10 @@ def populate_device(dev): except (MalformedCmsisPackError, FileNotFoundError) as err: LOG.warning(err) + PackReferenceType = Union[CmsisPack, str, "ZipFile", IO[bytes]] + @staticmethod - def populate_targets_from_pack(pack_list): + def populate_targets_from_pack(pack_list: Union[PackReferenceType, List[PackReferenceType], Tuple[PackReferenceType]]) -> None: """! @brief Adds targets defined in the provided CMSIS-Pack. Targets are added to the `#TARGET` list. From 20559b1e3501527a843a5960655c384b883dbe42 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 23 Nov 2021 13:31:56 -0600 Subject: [PATCH 072/123] types: annotate breakpoint classes. --- pyocd/coresight/fpb.py | 55 +++++++++++++++++----------- pyocd/debug/breakpoints/manager.py | 56 ++++++++++++++++------------- pyocd/debug/breakpoints/provider.py | 45 +++++++++++++---------- pyocd/debug/breakpoints/software.py | 36 ++++++++++++------- 4 files changed, 115 insertions(+), 77 deletions(-) diff --git a/pyocd/coresight/fpb.py b/pyocd/coresight/fpb.py index c83a27555..091f370c6 100644 --- a/pyocd/coresight/fpb.py +++ b/pyocd/coresight/fpb.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2015-2019 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,15 +15,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +from typing import (List, Optional, TYPE_CHECKING) + from ..core.target import Target from .component import CoreSightComponent from ..debug.breakpoints.provider import (Breakpoint, BreakpointProvider) -import logging + +if TYPE_CHECKING: + from ..core.memory_interface import MemoryInterface + from .rom_table import CoreSightComponentID LOG = logging.getLogger(__name__) class HardwareBreakpoint(Breakpoint): - def __init__(self, comp_register_addr, provider): + def __init__(self, comp_register_addr: int, provider: BreakpointProvider) -> None: super(HardwareBreakpoint, self).__init__(provider) self.comp_register_addr = comp_register_addr self.type = Target.BreakpointType.HW @@ -39,21 +46,25 @@ class FPB(BreakpointProvider, CoreSightComponent): FP_CTRL_REV_SHIFT = 28 FP_COMP0 = 0x00000008 - def __init__(self, ap, cmpid=None, addr=None): + def __init__(self, + ap: "MemoryInterface", + cmpid: Optional["CoreSightComponentID"] = None, + addr: Optional[int] = None + ) -> None: CoreSightComponent.__init__(self, ap, cmpid, addr) BreakpointProvider.__init__(self) - self.hw_breakpoints = [] - self.nb_code = 0 - self.nb_lit = 0 - self.num_hw_breakpoint_used = 0 - self.enabled = False - self.fpb_rev = 1 + self.hw_breakpoints: List[HardwareBreakpoint] = [] + self.nb_code: int = 0 + self.nb_lit: int = 0 + self.num_hw_breakpoint_used: int = 0 + self.enabled: bool = False + self.fpb_rev: int = 1 @property - def revision(self): + def revision(self) -> int: return self.fpb_rev - def init(self): + def init(self) -> None: """! @brief Inits the FPB. Reads the number of hardware breakpoints available on the core and disable the FPB @@ -76,26 +87,24 @@ def init(self): self.ap.write_memory(bp.comp_register_addr, 0) @property - def bp_type(self): + def bp_type(self) -> Target.BreakpointType: return Target.BreakpointType.HW - def enable(self): + def enable(self) -> None: self.ap.write_memory(self.address + FPB.FP_CTRL, FPB.FP_CTRL_KEY | 1) self.enabled = True LOG.debug('fpb has been enabled') - return - def disable(self): + def disable(self) -> None: self.ap.write_memory(self.address + FPB.FP_CTRL, FPB.FP_CTRL_KEY | 0) self.enabled = False LOG.debug('fpb has been disabled') - return @property - def available_breakpoints(self): + def available_breakpoints(self) -> int: return len(self.hw_breakpoints) - self.num_hw_breakpoint_used - def can_support_address(self, addr): + def can_support_address(self, addr: int) -> bool: """! @brief Test whether an address is supported by the FPB. For FPBv1, hardware breakpoints are only supported in the range 0x00000000 - 0x1fffffff. @@ -103,7 +112,13 @@ def can_support_address(self, addr): """ return (self.fpb_rev == 2) or (addr < 0x20000000) - def set_breakpoint(self, addr): + def find_breakpoint(self, addr: int) -> Optional[Breakpoint]: + for hwbp in self.hw_breakpoints: + if hwbp.enabled and hwbp.addr == addr: + return hwbp + return None + + def set_breakpoint(self, addr: int) -> Optional[Breakpoint]: """! @brief Set a hardware breakpoint at a specific location in flash.""" if not self.enabled: self.enable() @@ -134,7 +149,7 @@ def set_breakpoint(self, addr): return bp return None - def remove_breakpoint(self, bp): + def remove_breakpoint(self, bp: Breakpoint) -> None: """! @brief Remove a hardware breakpoint at a specific location in flash.""" for hwbp in self.hw_breakpoints: if hwbp.enabled and hwbp.addr == bp.addr: diff --git a/pyocd/debug/breakpoints/manager.py b/pyocd/debug/breakpoints/manager.py index 20cbc9b91..1881d9320 100644 --- a/pyocd/debug/breakpoints/manager.py +++ b/pyocd/debug/breakpoints/manager.py @@ -17,18 +17,24 @@ import logging from copy import copy +from typing import (Dict, List, TYPE_CHECKING, Iterable, MutableSequence, Optional, Sequence, Tuple) from .provider import Breakpoint from ...core.target import Target +if TYPE_CHECKING: + from .provider import BreakpointProvider + from ...core.core_target import CoreTarget + from ...utility.notification import Notification + LOG = logging.getLogger(__name__) class UnrealizedBreakpoint(Breakpoint): - """! @brief Breakpoint class used until a breakpoint's type is decided.""" + """@brief Breakpoint class used until a breakpoint's type is decided.""" pass -class BreakpointManager(object): - """! @brief Manages all breakpoints for one core. +class BreakpointManager: + """@brief Manages all breakpoints for one core. The most important function of the breakpoint manager is to decide which breakpoint provider to use when a breakpoint is added. The caller can request a particular breakpoint type, but @@ -45,29 +51,29 @@ class BreakpointManager(object): ## Number of hardware breakpoints to try to keep available. MIN_HW_BREAKPOINTS = 0 - def __init__(self, core): - self._breakpoints = {} - self._updated_breakpoints = {} + def __init__(self, core: "CoreTarget") -> None: + self._breakpoints: Dict[int, Breakpoint] = {} + self._updated_breakpoints: Dict[int, Breakpoint] = {} self._session = core.session self._core = core - self._fpb = None - self._providers = {} - self._ignore_notifications = False + self._fpb: Optional["BreakpointProvider"] = None + self._providers: Dict[Target.BreakpointType, "BreakpointProvider"] = {} + self._ignore_notifications: bool = False # Subscribe to some notifications. self._session.subscribe(self._pre_run_handler, Target.Event.PRE_RUN) self._session.subscribe(self._pre_disconnect_handler, Target.Event.PRE_DISCONNECT) - def add_provider(self, provider): + def add_provider(self, provider: "BreakpointProvider") -> None: self._providers[provider.bp_type] = provider if provider.bp_type == Target.BreakpointType.HW: self._fpb = provider - def get_breakpoints(self): + def get_breakpoints(self) -> Iterable[int]: """! @brief Return a list of all breakpoint addresses.""" return self._breakpoints.keys() - def find_breakpoint(self, addr): + def find_breakpoint(self, addr: int) -> Optional[Breakpoint]: return self._updated_breakpoints.get(addr, None) def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): @@ -103,7 +109,7 @@ def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): self._updated_breakpoints[addr] = bp return True - def _check_added_breakpoint(self, bp): + def _check_added_breakpoint(self, bp: Breakpoint) -> bool: """! @brief Check whether a new breakpoint is likely to actually be added when we flush. First, software breakpoints are assumed to always be addable. For hardware breakpoints, @@ -133,7 +139,7 @@ def _check_added_breakpoint(self, bp): return free_hw_bp_count > self.MIN_HW_BREAKPOINTS - def remove_breakpoint(self, addr): + def remove_breakpoint(self, addr: int) -> None: """! @brief Remove a breakpoint at a specific location.""" try: LOG.debug("remove bkpt at 0x%x", addr) @@ -146,7 +152,7 @@ def remove_breakpoint(self, addr): except KeyError: LOG.debug("Tried to remove breakpoint 0x%08x that wasn't set" % addr) - def _get_updated_breakpoints(self): + def _get_updated_breakpoints(self) -> Tuple[List[Breakpoint], List[Breakpoint]]: """! @brief Compute added and removed breakpoints since last flush. @return Bi-tuple of (added breakpoint list, removed breakpoint list). """ @@ -166,7 +172,7 @@ def _get_updated_breakpoints(self): # Return the list of pages to update. return added, removed - def _select_breakpoint_type(self, bp, allow_all_hw_bps): + def _select_breakpoint_type(self, bp: Breakpoint, allow_all_hw_bps: bool) -> Optional[Target.BreakpointType]: type = bp.type # Look up the memory type for the requested address. @@ -223,7 +229,7 @@ def _select_breakpoint_type(self, bp, allow_all_hw_bps): LOG.debug("selected bkpt type %s for addr 0x%x", type.name, bp.addr) return type - def flush(self, is_step=False): + def flush(self, is_step: bool = False) -> None: try: # Ignore any notifications while we modify breakpoints. self._ignore_notifications = True @@ -267,45 +273,45 @@ def flush(self, is_step=False): finally: self._ignore_notifications = False - def get_breakpoint_type(self, addr): + def get_breakpoint_type(self, addr: int) -> Optional[Target.BreakpointType]: bp = self.find_breakpoint(addr) return bp.type if (bp is not None) else None - def filter_memory(self, addr, size, data): + def filter_memory(self, addr: int, size: int, data: int) -> int: for provider in [p for p in self._providers.values() if p.do_filter_memory]: data = provider.filter_memory(addr, size, data) return data - def filter_memory_unaligned_8(self, addr, size, data): + def filter_memory_unaligned_8(self, addr: int, size: int, data: MutableSequence[int]) -> Sequence[int]: for provider in [p for p in self._providers.values() if p.do_filter_memory]: for i, d in enumerate(data): data[i] = provider.filter_memory(addr + i, 8, d) return data - def filter_memory_aligned_32(self, addr, size, data): + def filter_memory_aligned_32(self, addr: int, size: int, data: MutableSequence[int]) -> Sequence[int]: for provider in [p for p in self._providers.values() if p.do_filter_memory]: for i, d in enumerate(data): data[i] = provider.filter_memory(addr + i, 32, d) return data - def remove_all_breakpoints(self): + def remove_all_breakpoints(self) -> None: """! @brief Remove all breakpoints immediately.""" for bp in self._breakpoints.values(): bp.provider.remove_breakpoint(bp) self._breakpoints = {} self._flush_all() - def _flush_all(self): + def _flush_all(self) -> None: # Flush all providers. for provider in self._providers.values(): provider.flush() - def _pre_run_handler(self, notification): + def _pre_run_handler(self, notification: "Notification") -> None: if not self._ignore_notifications: is_step = notification.data == Target.RunType.STEP self.flush(is_step) - def _pre_disconnect_handler(self, notification): + def _pre_disconnect_handler(self, notification: "Notification") -> None: pass diff --git a/pyocd/debug/breakpoints/provider.py b/pyocd/debug/breakpoints/provider.py index d0c735be7..0d2f9794f 100644 --- a/pyocd/debug/breakpoints/provider.py +++ b/pyocd/debug/breakpoints/provider.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2015-2017 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,48 +15,54 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + from ...core.target import Target -class Breakpoint(object): +class Breakpoint: def __init__(self, provider): - self.type = Target.BreakpointType.HW - self.enabled = False - self.addr = 0 - self.original_instr = 0 - self.provider = provider + self.type: Target.BreakpointType = Target.BreakpointType.HW + self.enabled: bool = False + self.addr: int = 0 + self.original_instr: int = 0 + self.provider: BreakpointProvider = provider - def __repr__(self): + def __repr__(self) -> str: return "<%s@0x%08x type=%s addr=0x%08x>" % (self.__class__.__name__, id(self), self.type.name, self.addr) -class BreakpointProvider(object): - """! @brief Abstract base class for breakpoint providers.""" - def init(self): +class BreakpointProvider: + """@brief Abstract base class for breakpoint providers.""" + def init(self) -> None: raise NotImplementedError() - def bp_type(self): - return 0 + @property + def bp_type(self) -> Target.BreakpointType: + raise NotImplementedError() @property - def do_filter_memory(self): + def do_filter_memory(self) -> bool: return False @property - def available_breakpoints(self): + def available_breakpoints(self) -> int: + raise NotImplementedError() + + def can_support_address(self, addr: int) -> bool: raise NotImplementedError() - def find_breakpoint(self, addr): + def find_breakpoint(self, addr: int) -> Optional[Breakpoint]: raise NotImplementedError() - def set_breakpoint(self, addr): + def set_breakpoint(self, addr: int) -> Optional[Breakpoint]: raise NotImplementedError() - def remove_breakpoint(self, bp): + def remove_breakpoint(self, bp: Breakpoint) -> None: raise NotImplementedError() - def filter_memory(self, addr, size, data): + def filter_memory(self, addr: int, size: int, data: int) -> int: return data - def flush(self): + def flush(self) -> None: pass diff --git a/pyocd/debug/breakpoints/software.py b/pyocd/debug/breakpoints/software.py index 663a11117..8a8ad9966 100644 --- a/pyocd/debug/breakpoints/software.py +++ b/pyocd/debug/breakpoints/software.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2015-2019 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,15 +15,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +from typing import (Dict, Optional, TYPE_CHECKING) + from .provider import (Breakpoint, BreakpointProvider) from ...core import exceptions from ...core.target import Target -import logging + +if TYPE_CHECKING: + from ...core.core_target import CoreTarget LOG = logging.getLogger(__name__) class SoftwareBreakpoint(Breakpoint): - def __init__(self, provider): + def __init__(self, provider: BreakpointProvider) -> None: super(SoftwareBreakpoint, self).__init__(provider) self.type = Target.BreakpointType.SW @@ -30,31 +36,35 @@ class SoftwareBreakpointProvider(BreakpointProvider): ## BKPT #0 instruction. BKPT_INSTR = 0xbe00 - def __init__(self, core): + def __init__(self, core: "CoreTarget") -> None: super(SoftwareBreakpointProvider, self).__init__() self._core = core - self._breakpoints = {} + self._breakpoints: Dict[int, SoftwareBreakpoint] = {} - def init(self): + def init(self) -> None: pass @property - def bp_type(self): + def bp_type(self) -> Target.BreakpointType: return Target.BreakpointType.SW @property - def do_filter_memory(self): + def do_filter_memory(self) -> bool: return True @property - def available_breakpoints(self): + def available_breakpoints(self) -> int: return -1 - def find_breakpoint(self, addr): + def can_support_address(self, addr: int) -> bool: + region = self._core.memory_map.get_region_for_address(addr) + return (region is not None) and region.is_writable + + def find_breakpoint(self, addr: int) -> Optional[Breakpoint]: return self._breakpoints.get(addr, None) - def set_breakpoint(self, addr): - assert self._core.memory_map.get_region_for_address(addr).is_ram + def set_breakpoint(self, addr: int) -> Optional[Breakpoint]: + assert self.can_support_address(addr) assert (addr & 1) == 0 try: @@ -77,7 +87,7 @@ def set_breakpoint(self, addr): LOG.debug("Failed to set sw bp at 0x%x" % addr) return None - def remove_breakpoint(self, bp): + def remove_breakpoint(self, bp: Breakpoint) -> None: assert bp is not None and isinstance(bp, Breakpoint) try: @@ -89,7 +99,7 @@ def remove_breakpoint(self, bp): except exceptions.TransferError: LOG.debug("Failed to remove sw bp at 0x%x" % bp.addr) - def filter_memory(self, addr, size, data): + def filter_memory(self, addr: int, size: int, data: int) -> int: for bp in self._breakpoints.values(): if size == 8: if bp.addr == addr: From ffbfa734a5e1ff087cc089140eddd1ec87a6d0ef Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 12 Dec 2021 14:40:15 -0600 Subject: [PATCH 073/123] cleanup: misc type fixes. - Type fix in RegisterCommandBase. - Fix type of SubcommandBase.SUBCOMMANDS. - FileProgrammer: fix invalid types:ignore comments preventing mypy from running. --- pyocd/commands/commands.py | 2 ++ pyocd/flash/file_programmer.py | 4 ++-- pyocd/subcommands/base.py | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pyocd/commands/commands.py b/pyocd/commands/commands.py index 21f101e90..84d4b360c 100755 --- a/pyocd/commands/commands.py +++ b/pyocd/commands/commands.py @@ -161,6 +161,7 @@ def _dump_peripheral_register(self, periph, reg, show_fields): lsb = f.bit_offset f_value = bfx(value, msb, lsb) v_enum = None + v = None if f.enumerated_values: for v in f.enumerated_values: if v.value == f_value: @@ -176,6 +177,7 @@ def _dump_peripheral_register(self, periph, reg, show_fields): f_value_bin_str = bin(f_value)[2:] f_value_bin_str = "0" * (f.bit_width - len(f_value_bin_str)) + f_value_bin_str if v_enum: + assert v f_value_enum_str = " %s: %s" % (v.name, v_enum.description) else: f_value_enum_str = "" diff --git a/pyocd/flash/file_programmer.py b/pyocd/flash/file_programmer.py index ca5f65d1a..238a3fa3f 100755 --- a/pyocd/flash/file_programmer.py +++ b/pyocd/flash/file_programmer.py @@ -125,14 +125,14 @@ def program(self, file_or_path: Union[str, IO[bytes]], file_format: Optional[str is_path = isinstance(file_or_path, str) # Check for valid path first. - if is_path and not os.path.isfile(file_or_path): # type: ignore (type checker doesn't use is_path) + if is_path and not os.path.isfile(file_or_path): # type: ignore # (type checker doesn't use is_path) raise FileNotFoundError(errno.ENOENT, "No such file: '{}'".format(file_or_path)) # If no format provided, use the file's extension. if not file_format: if is_path: # Extract the extension from the path. - file_format = os.path.splitext(file_or_path)[1][1:] # type: ignore (type checker doesn't use is_path) + file_format = os.path.splitext(file_or_path)[1][1:] # type: ignore # (type checker doesn't use is_path) # Explicitly check for no extension. if file_format == '': diff --git a/pyocd/subcommands/base.py b/pyocd/subcommands/base.py index d9f4c54af..275a2e5ad 100644 --- a/pyocd/subcommands/base.py +++ b/pyocd/subcommands/base.py @@ -17,7 +17,7 @@ import argparse import logging import prettytable -from typing import (List, Optional) +from typing import (List, Optional, Type) from ..utility.cmdline import convert_frequency @@ -29,7 +29,7 @@ class SubcommandBase: HELP: str = "" EPILOG: Optional[str] = None DEFAULT_LOG_LEVEL = logging.INFO - SUBCOMMANDS: List["SubcommandBase"] = [] + SUBCOMMANDS: List[Type["SubcommandBase"]] = [] ## Class attribute to store the built subcommand argument parser. parser: Optional[argparse.ArgumentParser] = None From 9d5aca122f96e9d6bd685fd24d6b84a2ca4abeed Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 11 Dec 2021 17:46:05 -0600 Subject: [PATCH 074/123] pyproject.toml: add mypy config. --- pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 37e024f9d..f3f3dea65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,3 +32,7 @@ passenv = CI_JOBS commands = python automated_test.py -j4 -q """ + +[tool.mypy] +files = "pyocd" +ignore_missing_imports = true From 0cb392bbfde5f2e1f5528d750fb8a47ae74abaad Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 16 Dec 2021 13:07:58 -0600 Subject: [PATCH 075/123] gdbserver: rewrite escape() and unescape() for Python 3 without using six. - Simplify and add type annotations. - Remove iter_single_bytes() utility that was not used elsewhere. - Add unit test. --- pyocd/gdbserver/gdbserver.py | 37 +++++++++--------- pyocd/utility/compatibility.py | 7 ---- test/unit/test_compatibility.py | 11 ------ test/unit/test_gdbserver.py | 67 +++++++++++++++++++++++++++++++++ 4 files changed, 86 insertions(+), 36 deletions(-) create mode 100644 test/unit/test_gdbserver.py diff --git a/pyocd/gdbserver/gdbserver.py b/pyocd/gdbserver/gdbserver.py index edb818d52..24612071d 100644 --- a/pyocd/gdbserver/gdbserver.py +++ b/pyocd/gdbserver/gdbserver.py @@ -17,20 +17,19 @@ import logging import threading -from struct import unpack from time import sleep import sys import six import io from xml.etree.ElementTree import (Element, SubElement, tostring) -from typing import (Dict, Optional) +from typing import (Dict, List, Optional) from ..core import exceptions from ..core.target import Target from ..flash.loader import FlashLoader from ..utility.cmdline import convert_vector_catch from ..utility.conversion import (hex_to_byte_list, hex_encode, hex_decode, hex8_to_u32le) -from ..utility.compatibility import (iter_single_bytes, to_bytes_safe, to_str_safe) +from ..utility.compatibility import (to_bytes_safe, to_str_safe) from ..utility.server import StreamServer from ..utility.timeout import Timeout from ..trace.swv import SWVReader @@ -57,7 +56,7 @@ TRACE_MEM = LOG.getChild("trace.mem") TRACE_MEM.setLevel(logging.CRITICAL) -def unescape(data): +def unescape(data: bytes) -> List[int]: """! @brief De-escapes binary data from Gdb. @param data Bytes-like object with possibly escaped values. @@ -66,18 +65,19 @@ def unescape(data): data_idx = 0 # unpack the data into binary array - str_unpack = str(len(data)) + 'B' - data = unpack(str_unpack, data) - data = list(data) + result = list(data) # check for escaped characters - while data_idx < len(data): - if data[data_idx] == 0x7d: - data.pop(data_idx) - data[data_idx] = data[data_idx] ^ 0x20 + while data_idx < len(result): + if result[data_idx] == 0x7d: + result.pop(data_idx) + result[data_idx] = result[data_idx] ^ 0x20 data_idx += 1 - return data + return result + +## Tuple of int values of characters that must be escaped. +_GDB_ESCAPED_CHARS = tuple(b'#$}*') def escape(data): """! @brief Escape binary data to be sent to Gdb. @@ -85,13 +85,14 @@ def escape(data): @param data Bytes-like object containing raw binary. @return Bytes object with the characters in '#$}*' escaped as required by Gdb. """ - result = b'' - for c in iter_single_bytes(data): - if c in b'#$}*': - result += b'}' + six.int2byte(six.byte2int(c) ^ 0x20) + result: List[int] = [] + for c in data: + if c in _GDB_ESCAPED_CHARS: + # Escape by prefixing with '}' and xor'ing the char with 0x20. + result += [0x7d, c ^ 0x20] else: - result += c - return result + result.append(c) + return bytes(result) class GDBError(exceptions.Error): """! @brief Error communicating with GDB.""" diff --git a/pyocd/utility/compatibility.py b/pyocd/utility/compatibility.py index 0b141e8b4..4da5852fe 100644 --- a/pyocd/utility/compatibility.py +++ b/pyocd/utility/compatibility.py @@ -15,15 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import functools from typing import Union -# iter_single_bytes() returns an iterator over a bytes object that produces -# single-byte bytes objects for each byte in the passed in value. Normally on -# py3 iterating over a bytes will give you ints for each byte, while on py3 -# you'll get single-char strs. -iter_single_bytes = functools.partial(map, lambda v: bytes((v,))) # pylint: disable=invalid-name - # to_bytes_safe() converts a unicode string to a bytes object by encoding as # latin-1. It will also accept a value that is already a bytes object and # return it unmodified. diff --git a/test/unit/test_compatibility.py b/test/unit/test_compatibility.py index 9622aa9a4..0edea4071 100644 --- a/test/unit/test_compatibility.py +++ b/test/unit/test_compatibility.py @@ -14,23 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest -import six - from pyocd.utility.compatibility import ( - iter_single_bytes, to_bytes_safe, to_str_safe, ) class TestCompatibility(object): - def test_iter_single_bytes_bytes(self): - i = iter_single_bytes(b"1234") - assert next(i) == b'1' - assert next(i) == b'2' - assert next(i) == b'3' - assert next(i) == b'4' - def test_to_bytes_safe(self): assert to_bytes_safe(b"hello") == b"hello" assert to_bytes_safe("string") == b"string" diff --git a/test/unit/test_gdbserver.py b/test/unit/test_gdbserver.py new file mode 100644 index 000000000..9ca1dd7d9 --- /dev/null +++ b/test/unit/test_gdbserver.py @@ -0,0 +1,67 @@ +# pyOCD debugger +# Copyright (c) 2021 Chris Reed +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pyocd.gdbserver.gdbserver import ( + escape, + unescape, +) + +# escaped chars: '#$}*' +# escaped by prefixing with '}' and xor'ing the char with 0x20 +# +# '#' (0x23) -> '}\x03' +# '$' (0x24) -> '}\x04' +# '}' (0x7d) -> '}]' +# '*' (0x2a) -> '}\x0a' + +class TestGdbServerEscaping: + def test_escape_transparent(self): + assert escape(b"hello") == b"hello" + + def test_escape_individual(self): + assert escape(b"hello#foo") == b"hello}\x03foo" + assert escape(b"hello$foo") == b"hello}\x04foo" + assert escape(b"hello}foo") == b"hello}]foo" + assert escape(b"hello*foo") == b"hello}\x0afoo" + + def test_escape_single(self): + assert escape(b"#") == b"}\x03" + assert escape(b"$") == b"}\x04" + assert escape(b"}") == b"}]" + assert escape(b"*") == b"}\x0a" + + def test_escape_combined(self): + assert escape(b"#$}*") == b"}\x03}\x04}]}\x0a" + assert escape(b'}}}') == b"}]}]}]" + + def test_unescape_transparent(self): + assert unescape(b"bytes") == list(b"bytes") + + def test_unescape_individual(self): + assert unescape(b"hello}\x03foo") == list(b"hello#foo") + assert unescape(b"hello}\x04foo") == list(b"hello$foo") + assert unescape(b"hello}]foo") == list(b"hello}foo") + assert unescape(b"hello}\x0afoo") == list(b"hello*foo") + + def test_unescape_single(self): + assert unescape(b"}\x03") == [b'#'[0]] + assert unescape(b"}\x04") == [b'$'[0]] + assert unescape(b"}]") == [b'}'[0]] + assert unescape(b"}\x0a") == [b'*'[0]] + + def test_unescape_combined(self): + assert unescape(b"}\x03}\x04}]}\x0a") == list(b"#$}*") + assert unescape(b"}]}]}]") == list(b"}}}") From daac6d96cc19df1bf451363222c8af48169a3540 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 16 Dec 2021 13:11:56 -0600 Subject: [PATCH 076/123] cleanup: replace many uses of six with simpler Python 3 code. Remove usage of six.b/u, six.iterbytes, six.add_metaclass --- pyocd/gdbserver/context_facade.py | 11 +++++------ pyocd/gdbserver/gdbserver.py | 3 +-- pyocd/gdbserver/packet_io.py | 5 ++--- pyocd/probe/stlink/detect/base.py | 4 +--- pyocd/probe/stlink/stlink.py | 11 +++++------ pyocd/utility/conversion.py | 3 +-- 6 files changed, 15 insertions(+), 22 deletions(-) diff --git a/pyocd/gdbserver/context_facade.py b/pyocd/gdbserver/context_facade.py index 6a089cd3a..80ac84ac6 100644 --- a/pyocd/gdbserver/context_facade.py +++ b/pyocd/gdbserver/context_facade.py @@ -16,7 +16,6 @@ # limitations under the License. import logging -import six from xml.etree import ElementTree from itertools import groupby @@ -107,7 +106,7 @@ def get_register_context(self): if reg_value is None: r = b"xx" * round_up_div(reg.bitsize, 8) else: - r = six.b(conversion.uint_to_hex_le(reg_value, reg.bitsize)) + r = conversion.uint_to_hex_le(reg_value, reg.bitsize).encode() resp += r LOG.debug("GDB get_reg_context: %s = %s -> %s", reg.name, "None" if (reg_value is None) else ("0x%08X" % reg_value), r) @@ -167,7 +166,7 @@ def gdb_get_register(self, gdb_regnum): try: reg_value = self._context.read_core_register_raw(reg.name) - resp = six.b(conversion.uint_to_hex_le(reg_value, reg.bitsize)) + resp = conversion.uint_to_hex_le(reg_value, reg.bitsize).encode() LOG.debug("GDB reg: %s = 0x%X", reg.name, reg_value) except exceptions.CoreRegisterAccessError: # Return x's if the register read failed. @@ -183,9 +182,9 @@ def get_t_response(self, force_signal=None): - The current value of the important registers (sp, lr, pc). """ if force_signal is not None: - response = six.b('T' + conversion.byte_to_hex2(force_signal)) + response = ('T' + conversion.byte_to_hex2(force_signal)).encode() else: - response = six.b('T' + conversion.byte_to_hex2(self.get_signal_value())) + response = ('T' + conversion.byte_to_hex2(self.get_signal_value())).encode() # Append fp(r7), sp(r13), lr(r14), pc(r15) response += self._get_reg_index_value_pairs(['r7', 'sp', 'lr', 'pc']) @@ -230,7 +229,7 @@ def _get_reg_index_value_pairs(self, reg_list): encoded_reg = "xx" * round_up_div(reg.bitsize, 8) else: encoded_reg = conversion.uint_to_hex_le(reg_value, reg.bitsize) - result += six.b(conversion.byte_to_hex2(reg.gdb_regnum) + ':' + encoded_reg + ';') + result += (conversion.byte_to_hex2(reg.gdb_regnum) + ':' + encoded_reg + ';').encode() return result def get_memory_map_xml(self): diff --git a/pyocd/gdbserver/gdbserver.py b/pyocd/gdbserver/gdbserver.py index 24612071d..778763d8c 100644 --- a/pyocd/gdbserver/gdbserver.py +++ b/pyocd/gdbserver/gdbserver.py @@ -19,7 +19,6 @@ import threading from time import sleep import sys -import six import io from xml.etree.ElementTree import (Element, SubElement, tostring) from typing import (Dict, List, Optional) @@ -934,7 +933,7 @@ def handle_query(self, msg): # Build our list of features. features = [b'qXfer:features:read+', b'QStartNoAckMode+', b'qXfer:threads:read+', b'QNonStop+'] - features.append(b'PacketSize=' + six.b(hex(self.packet_size))[2:]) + features.append(b'PacketSize=' + (hex(self.packet_size).encode())[2:]) if self.target_facade.get_memory_map_xml() is not None: features.append(b'qXfer:memory-map:read+') resp = b';'.join(features) diff --git a/pyocd/gdbserver/packet_io.py b/pyocd/gdbserver/packet_io.py index 3bb782926..9c47670a2 100644 --- a/pyocd/gdbserver/packet_io.py +++ b/pyocd/gdbserver/packet_io.py @@ -18,7 +18,6 @@ import logging import threading import socket -import six import queue CTRL_C = b'\x03' @@ -31,8 +30,8 @@ TRACE_PACKETS = LOG.getChild("trace.packet") TRACE_PACKETS.setLevel(logging.CRITICAL) -def checksum(data): - return ("%02x" % (sum(six.iterbytes(data)) % 256)).encode() +def checksum(data: bytes) -> bytes: + return ("%02x" % (sum(data) % 256)).encode() class ConnectionClosedException(Exception): """! @brief Exception used to signal the GDB server connection closed.""" diff --git a/pyocd/probe/stlink/detect/base.py b/pyocd/probe/stlink/detect/base.py index ab965e009..54b1596ec 100644 --- a/pyocd/probe/stlink/detect/base.py +++ b/pyocd/probe/stlink/detect/base.py @@ -20,12 +20,10 @@ from os import listdir from os.path import join, exists, isdir import logging -import six LOG = logging.getLogger(__name__) -@six.add_metaclass(ABCMeta) -class StlinkDetectBase(object): +class StlinkDetectBase(object, metaclass=ABCMeta): """ Base class for stlink detection, defines public interface for mbed-enabled stlink devices detection for various hosts """ diff --git a/pyocd/probe/stlink/stlink.py b/pyocd/probe/stlink/stlink.py index 7390bb41b..309384be2 100644 --- a/pyocd/probe/stlink/stlink.py +++ b/pyocd/probe/stlink/stlink.py @@ -17,7 +17,6 @@ import logging import struct -import six import threading from enum import Enum from typing import Optional @@ -363,7 +362,7 @@ def _read_mem(self, addr, size, memcmd, max, apsel): thisTransferSize = min(size, max) cmd = [Commands.JTAG_COMMAND, memcmd] - cmd.extend(six.iterbytes(struct.pack(' str: def hex_to_byte_list(data: str) -> ByteList: """! @brief Convert string of hex bytes to list of integers""" - return list(six.iterbytes(binascii.unhexlify(data))) + return list(binascii.unhexlify(data)) def hex_decode(cmd: str) -> bytes: """! @brief Return the binary data represented by the hexadecimal string.""" From 60e9fa530302ef62e920739d415f56822f99c1b4 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 16 Dec 2021 14:59:37 -0600 Subject: [PATCH 077/123] CMSIS-DAP: core: fix swd_sequence() not returning result. --- pyocd/probe/pydapaccess/dap_access_api.py | 5 +++-- pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pyocd/probe/pydapaccess/dap_access_api.py b/pyocd/probe/pydapaccess/dap_access_api.py index 9e6d97a3c..d51a8ff03 100644 --- a/pyocd/probe/pydapaccess/dap_access_api.py +++ b/pyocd/probe/pydapaccess/dap_access_api.py @@ -16,7 +16,7 @@ from enum import Enum - +from typing import (Tuple, Sequence) class DAPAccessIntf(object): @@ -188,7 +188,7 @@ def swj_sequence(self, length, bits): """ raise NotImplementedError() - def swd_sequence(self, sequences): + def swd_sequence(self, sequences) -> Tuple[int, Sequence[bytes]]: """! @brief Send a sequences of bits on the SWDIO signal. This method sends the DAP_SWD_Sequence CMSIS-DAP command. @@ -205,6 +205,7 @@ def swd_sequence(self, sequences): @return A 2-tuple of the response status, and a sequence of bytes objects, one for each input sequence. The length of the bytes object is ( + 7) / 8. Bits are in LSB first order. """ + raise NotImplementedError() def jtag_sequence(self, cycles, tms, read_tdo, tdi): """! @brief Send JTAG sequence. diff --git a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py index 0c2907426..cdd2d486d 100644 --- a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py +++ b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py @@ -839,7 +839,7 @@ def swj_sequence(self, length, bits): @locked def swd_sequence(self, sequences): self.flush() - self._protocol.swd_sequence(sequences) + return self._protocol.swd_sequence(sequences) @locked def jtag_sequence(self, cycles, tms, read_tdo, tdi): From e84486394e1a6cfc9f192ba70dc5c88bd1bef06e Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 15 Dec 2021 15:27:42 -0600 Subject: [PATCH 078/123] CMSIS-DAP: backend: annotate common; py3 and doc comment cleanup. - Includes fix for type annotation of generate_device_unique_id() parameter. --- pyocd/probe/pydapaccess/interface/common.py | 55 ++++++++++--------- .../pydapaccess/interface/hidapi_backend.py | 15 ++--- .../probe/pydapaccess/interface/interface.py | 18 ++++-- .../pydapaccess/interface/pyusb_backend.py | 34 +++++------- .../pydapaccess/interface/pyusb_v2_backend.py | 26 ++++----- .../pydapaccess/interface/pywinusb_backend.py | 17 ++---- 6 files changed, 81 insertions(+), 84 deletions(-) diff --git a/pyocd/probe/pydapaccess/interface/common.py b/pyocd/probe/pydapaccess/interface/common.py index df801b717..b121eac2d 100644 --- a/pyocd/probe/pydapaccess/interface/common.py +++ b/pyocd/probe/pydapaccess/interface/common.py @@ -19,7 +19,10 @@ import usb.util from hashlib import sha1 from base64 import b32encode -from typing import (List, Union) +from typing import (List, Tuple, Union, TYPE_CHECKING) + +if TYPE_CHECKING: + from usb.core import Interface # USB class codes. USB_CLASS_COMPOSITE = 0x00 @@ -35,21 +38,23 @@ CMSIS_DAP_HID_USAGE_PAGE = 0xff00 +VidPidPair = Tuple[int, int] + # Known USB VID/PID pairs. -ARM_DAPLINK_ID = (0x0d28, 0x0204) # Arm DAPLink firmware -ATMEL_ICE_ID = (0x03eb, 0x2141) # Atmel-ICE -CYPRESS_KITPROG1_2_ID = (0x04b4, 0xf138) # Cypress KitProg1, KitProg2 in CMSIS-DAP mode -CYPRESS_MINIPROG4_BULK_ID = (0x04b4, 0xf151) # Cypress MiniProg4 bulk -CYPRESS_MINIPROG4_HID_ID = (0x04b4, 0xf152) # Cypress MiniProg4 HID -CYPRESS_KITPROG3_HID_ID = (0x04b4, 0xf154) # Cypress KitProg3 HID -CYPRESS_KITPROG3_BULKD_ID = (0x04b4, 0xf155) # Cypress KitProg3 bulk -CYPRESS_KITPROG3_BULK_2_UART_ID = (0x04b4, 0xf166) # Cypress KitProg3 bulk with 2x UART -KEIL_ULINKPLUS_ID = (0xc251, 0x2750) # Keil ULINKplus -NXP_LPCLINK2_ID = (0x1fc9, 0x0090) # NXP LPC-LinkII -NXP_MCULINK_ID = (0x1fc9, 0x0143) # NXP MCU-Link +ARM_DAPLINK_ID: VidPidPair = (0x0d28, 0x0204) # Arm DAPLink firmware +ATMEL_ICE_ID: VidPidPair = (0x03eb, 0x2141) # Atmel-ICE +CYPRESS_KITPROG1_2_ID: VidPidPair = (0x04b4, 0xf138) # Cypress KitProg1, KitProg2 in CMSIS-DAP mode +CYPRESS_MINIPROG4_BULK_ID: VidPidPair = (0x04b4, 0xf151) # Cypress MiniProg4 bulk +CYPRESS_MINIPROG4_HID_ID: VidPidPair = (0x04b4, 0xf152) # Cypress MiniProg4 HID +CYPRESS_KITPROG3_HID_ID: VidPidPair = (0x04b4, 0xf154) # Cypress KitProg3 HID +CYPRESS_KITPROG3_BULKD_ID: VidPidPair = (0x04b4, 0xf155) # Cypress KitProg3 bulk +CYPRESS_KITPROG3_BULK_2_UART_ID: VidPidPair = (0x04b4, 0xf166) # Cypress KitProg3 bulk with 2x UART +KEIL_ULINKPLUS_ID: VidPidPair = (0xc251, 0x2750) # Keil ULINKplus +NXP_LPCLINK2_ID: VidPidPair = (0x1fc9, 0x0090) # NXP LPC-LinkII +NXP_MCULINK_ID: VidPidPair = (0x1fc9, 0x0143) # NXP MCU-Link ## List of VID/PID pairs for known CMSIS-DAP USB devices. -KNOWN_CMSIS_DAP_IDS = [ +KNOWN_CMSIS_DAP_IDS: List[VidPidPair] = [ ARM_DAPLINK_ID, ATMEL_ICE_ID, CYPRESS_KITPROG1_2_ID, @@ -65,17 +70,17 @@ ## List of VID/PID pairs for CMSIS-DAP probes that have multiple HID interfaces that must be # filtered by usage page. Currently these are only NXP probes. -CMSIS_DAP_IDS_TO_FILTER_BY_USAGE_PAGE = [ +CMSIS_DAP_IDS_TO_FILTER_BY_USAGE_PAGE: List[VidPidPair] = [ NXP_LPCLINK2_ID, NXP_MCULINK_ID, ] -def is_known_cmsis_dap_vid_pid(vid, pid): - """! @brief Test whether a VID/PID pair belong to a known CMSIS-DAP device.""" +def is_known_cmsis_dap_vid_pid(vid: int, pid: int) -> bool: + """@brief Test whether a VID/PID pair belong to a known CMSIS-DAP device.""" return (vid, pid) in KNOWN_CMSIS_DAP_IDS -def filter_device_by_class(vid, pid, device_class): - """! @brief Test whether the device should be ignored by comparing bDeviceClass. +def filter_device_by_class(vid: int, pid: int, device_class: int) -> bool: + """@brief Test whether the device should be ignored by comparing bDeviceClass. This function checks the device's bDeviceClass to determine whether it is likely to be a CMSIS-DAP device. It uses the vid and pid for device-specific quirks. @@ -92,8 +97,8 @@ def filter_device_by_class(vid, pid, device_class): # Any other class indicates the device is not CMSIS-DAP. return True -def filter_device_by_usage_page(vid, pid, usage_page): - """! @brief Test whether the device should be ignored by comparing the HID usage page. +def filter_device_by_usage_page(vid: int, pid: int, usage_page: int) -> bool: + """@brief Test whether the device should be ignored by comparing the HID usage page. This function performs device-specific tests to determine whether the device is a CMSIS-DAP interface. The only current test is for the NXP LPC-Link2, which has extra HID interfaces with @@ -106,14 +111,14 @@ def filter_device_by_usage_page(vid, pid, usage_page): return ((vid, pid) in CMSIS_DAP_IDS_TO_FILTER_BY_USAGE_PAGE) \ and (usage_page != CMSIS_DAP_HID_USAGE_PAGE) -def check_ep(interface, ep_index, ep_dir, ep_type): - """! @brief Tests an endpoint type and direction.""" +def check_ep(interface: "Interface", ep_index: int, ep_dir: int, ep_type: int) -> bool: + """@brief Tests an endpoint type and direction.""" ep = interface[ep_index] - return (usb.util.endpoint_direction(ep.bEndpointAddress) == ep_dir) \ - and (usb.util.endpoint_type(ep.bmAttributes) == ep_type) + return ((usb.util.endpoint_direction(ep.bEndpointAddress) == ep_dir) # type:ignore + and (usb.util.endpoint_type(ep.bmAttributes) == ep_type)) # type:ignore def generate_device_unique_id(vid: int, pid: int, *locations: List[Union[int, str]]) -> str: - """! @brief Generate a semi-stable unique ID from USB device properties. + """@brief Generate a semi-stable unique ID from USB device properties. This function is intended to be used in cases where a device does not provide a serial number string. pyocd still needs a valid unique ID so the device can be selected from amongst multiple diff --git a/pyocd/probe/pydapaccess/interface/hidapi_backend.py b/pyocd/probe/pydapaccess/interface/hidapi_backend.py index f0fd9abe5..5a4f651a3 100644 --- a/pyocd/probe/pydapaccess/interface/hidapi_backend.py +++ b/pyocd/probe/pydapaccess/interface/hidapi_backend.py @@ -46,8 +46,7 @@ _IS_WINDOWS = (platform.system() == 'Windows') class HidApiUSB(Interface): - """! @brief CMSIS-DAP USB interface class using hidapi backend. - """ + """@brief CMSIS-DAP USB interface class using hidapi backend.""" isAvailable = IS_AVAILABLE @@ -105,7 +104,7 @@ def rx_task(self): @staticmethod def get_all_connected_interfaces(): - """! @brief Returns all the connected devices with CMSIS-DAP in the name. + """@brief Returns all the connected devices with CMSIS-DAP in the name. returns an array of HidApiUSB (Interface) objects """ @@ -152,8 +151,7 @@ def get_all_connected_interfaces(): return boards def write(self, data): - """! @brief Write data on the OUT endpoint associated to the HID interface - """ + """@brief Write data on the OUT endpoint associated to the HID interface""" if TRACE.isEnabledFor(logging.DEBUG): TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) data.extend([0] * (self.packet_size - len(data))) @@ -162,8 +160,7 @@ def write(self, data): self.device.write([0] + data) def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): - """! @brief Read data on the IN endpoint associated to the HID interface - """ + """@brief Read data on the IN endpoint associated to the HID interface""" # Windows doesn't use the read thread, so read directly. if _IS_WINDOWS: read_data = self.device.read(self.packet_size) @@ -194,10 +191,8 @@ def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): return self.received_data.popleft() - def close(self): - """! @brief Close the interface - """ + """@brief Close the interface""" assert not self.closed_event.is_set() LOG.debug("closing interface") diff --git a/pyocd/probe/pydapaccess/interface/interface.py b/pyocd/probe/pydapaccess/interface/interface.py index 8a95afd1b..c4bae4889 100644 --- a/pyocd/probe/pydapaccess/interface/interface.py +++ b/pyocd/probe/pydapaccess/interface/interface.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2006-2013,2018 Arm Limited +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +16,11 @@ # limitations under the License. -class Interface(object): +class Interface: + + @staticmethod + def get_all_connected_interfaces(): + raise NotImplementedError() DEFAULT_READ_TIMEOUT = 20 @@ -38,16 +43,19 @@ def is_bulk(self): return False def open(self): - return + raise NotImplementedError() def close(self): - return + raise NotImplementedError() def write(self, data): - return + raise NotImplementedError() def read(self, timeout=DEFAULT_READ_TIMEOUT): - return + raise NotImplementedError() + + def read_swo(self): + raise NotImplementedError() def get_info(self): return self.vendor_name + " " + \ diff --git a/pyocd/probe/pydapaccess/interface/pyusb_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_backend.py index 82aa562b0..cf240043e 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_backend.py @@ -2,7 +2,7 @@ # Copyright (c) 2006-2021 Arm Limited # Copyright (c) 2020 Patrick Huesmann # Copyright (c) 2021 mentha -# Copyright (c) Chris Reed +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -46,15 +46,14 @@ IS_AVAILABLE = True class PyUSB(Interface): - """! @brief CMSIS-DAP USB interface class using pyusb for the backend. - """ + """@brief CMSIS-DAP USB interface class using pyusb for the backend.""" isAvailable = IS_AVAILABLE did_show_no_libusb_warning = False def __init__(self): - super(PyUSB, self).__init__() + super().__init__() self.ep_out = None self.ep_in = None self.dev = None @@ -156,7 +155,7 @@ def rx_task(self): @staticmethod def get_all_connected_interfaces(): - """! @brief Returns all the connected CMSIS-DAP devices. + """@brief Returns all the connected CMSIS-DAP devices. returns an array of PyUSB (Interface) objects """ @@ -184,8 +183,7 @@ def get_all_connected_interfaces(): return boards def write(self, data): - """! @brief Write data on the OUT endpoint associated to the HID interface - """ + """@brief Write data on the OUT endpoint associated to the HID interface""" report_size = self.packet_size if self.ep_out: @@ -211,8 +209,7 @@ def write(self, data): self.ep_out.write(data) def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): - """! @brief Read data on the IN endpoint associated to the HID interface - """ + """@brief Read data on the IN endpoint associated to the HID interface""" # Spin for a while if there's not data available yet. 100 µs sleep between checks. with Timeout(timeout, sleeptime=0.0001) as t_o: while t_o.check(): @@ -234,8 +231,7 @@ def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): return self.rcv_data.pop(0) def close(self): - """! @brief Close the interface - """ + """@brief Close the interface""" assert self.closed is False LOG.debug("closing interface") @@ -259,8 +255,8 @@ def close(self): self.kernel_driver_was_attached = False self.thread = None -class MatchCmsisDapv1Interface(object): - """! @brief Match class for finding CMSIS-DAPv1 interface. +class MatchCmsisDapv1Interface: + """@brief Match class for finding CMSIS-DAPv1 interface. This match class performs several tests on the provided USB interface descriptor, to determine whether it is a CMSIS-DAPv1 interface. These requirements must be met by the @@ -274,11 +270,11 @@ class MatchCmsisDapv1Interface(object): """ def __init__(self, hid_interface_count): - """! @brief Constructor.""" + """@brief Constructor.""" self._hid_count = hid_interface_count def __call__(self, interface): - """! @brief Return True if this is a CMSIS-DAPv1 interface.""" + """@brief Return True if this is a CMSIS-DAPv1 interface.""" try: if self._hid_count > 1: interface_name = usb.util.get_string(interface.device, interface.iInterface) @@ -327,15 +323,15 @@ def __call__(self, interface): # IndexError can be raised if an endpoint is missing. return False -class FindDap(object): - """! @brief CMSIS-DAP match class to be used with usb.core.find""" +class FindDap: + """@brief CMSIS-DAP match class to be used with usb.core.find""" def __init__(self, serial=None): - """! @brief Create a new FindDap object with an optional serial number""" + """@brief Create a new FindDap object with an optional serial number""" self._serial = serial def __call__(self, dev): - """! @brief Return True if this is a DAP device, False otherwise""" + """@brief Return True if this is a DAP device, False otherwise""" # Check if the device class is a valid one for CMSIS-DAP. if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass): return False diff --git a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py index 5245c438f..a9b0a1a0c 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py @@ -1,7 +1,7 @@ # pyOCD debugger # Copyright (c) 2019-2021 Arm Limited # Copyright (c) 2021 mentha -# Copyright (c) Chris Reed +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,14 +47,12 @@ IS_AVAILABLE = True class PyUSBv2(Interface): - """! - @brief CMSIS-DAPv2 interface using pyUSB. - """ + """@brief CMSIS-DAPv2 interface using pyUSB.""" isAvailable = IS_AVAILABLE def __init__(self): - super(PyUSBv2, self).__init__() + super().__init__() self.ep_out = None self.ep_in = None self.ep_swo = None @@ -186,7 +184,7 @@ def swo_rx_task(self): @staticmethod def get_all_connected_interfaces(): - """! @brief Returns all the connected devices with a CMSIS-DAPv2 interface.""" + """@brief Returns all the connected devices with a CMSIS-DAPv2 interface.""" # find all cmsis-dap devices try: all_devices = libusb_package.find(find_all=True, custom_match=HasCmsisDapv2Interface()) @@ -209,7 +207,7 @@ def get_all_connected_interfaces(): return boards def write(self, data): - """! @brief Write data on the OUT endpoint.""" + """@brief Write data on the OUT endpoint.""" if self.ep_out: if (len(data) > 0) and (len(data) < self.packet_size) and (len(data) % self.ep_out.wMaxPacketSize == 0): @@ -223,7 +221,7 @@ def write(self, data): self.ep_out.write(data) def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): - """! @brief Read data on the IN endpoint.""" + """@brief Read data on the IN endpoint.""" # Spin for a while if there's not data available yet. 100 µs sleep between checks. with Timeout(timeout, sleeptime=0.0001) as t_o: while t_o.check(): @@ -252,7 +250,7 @@ def read_swo(self): return data def close(self): - """! @brief Close the USB interface.""" + """@brief Close the USB interface.""" assert self.closed is False if self.is_swo_running: @@ -274,7 +272,7 @@ def close(self): self.thread = None def _match_cmsis_dap_v2_interface(interface): - """! @brief Returns true for a CMSIS-DAP v2 interface. + """@brief Returns true for a CMSIS-DAP v2 interface. This match function performs several tests on the provided USB interface descriptor, to determine whether it is a CMSIS-DAPv2 interface. These requirements must be met by the @@ -326,15 +324,15 @@ def _match_cmsis_dap_v2_interface(interface): # IndexError can be raised if an endpoint is missing. return False -class HasCmsisDapv2Interface(object): - """! @brief CMSIS-DAPv2 match class to be used with usb.core.find""" +class HasCmsisDapv2Interface: + """@brief CMSIS-DAPv2 match class to be used with usb.core.find""" def __init__(self, serial=None): - """! @brief Create a new FindDap object with an optional serial number""" + """@brief Create a new FindDap object with an optional serial number""" self._serial = serial def __call__(self, dev): - """! @brief Return True if this is a CMSIS-DAPv2 device, False otherwise""" + """@brief Return True if this is a CMSIS-DAPv2 device, False otherwise""" # Check if the device class is a valid one for CMSIS-DAP. if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass): return False diff --git a/pyocd/probe/pydapaccess/interface/pywinusb_backend.py b/pyocd/probe/pydapaccess/interface/pywinusb_backend.py index 76f44bb17..651cd8121 100644 --- a/pyocd/probe/pydapaccess/interface/pywinusb_backend.py +++ b/pyocd/probe/pydapaccess/interface/pywinusb_backend.py @@ -40,13 +40,12 @@ IS_AVAILABLE = True class PyWinUSB(Interface): - """! @brief CMSIS-DAP USB interface class using pyWinUSB for the backend. - """ + """@brief CMSIS-DAP USB interface class using pyWinUSB for the backend.""" isAvailable = IS_AVAILABLE def __init__(self): - super(PyWinUSB, self).__init__() + super().__init__() # Vendor page and usage_id = 2 self.report = None # deque used here instead of synchronized Queue @@ -98,8 +97,7 @@ def open(self): @staticmethod def get_all_connected_interfaces(): - """! @brief Returns all the connected CMSIS-DAP devices - """ + """@brief Returns all the connected CMSIS-DAP devices""" all_devices = hid.find_all_hid_devices() # find devices with good vid/pid @@ -142,8 +140,7 @@ def get_all_connected_interfaces(): return boards def write(self, data): - """! @brief Write data on the OUT endpoint associated to the HID interface - """ + """@brief Write data on the OUT endpoint associated to the HID interface""" if TRACE.isEnabledFor(logging.DEBUG): TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) @@ -151,8 +148,7 @@ def write(self, data): self.report.send([0] + data) def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): - """! @brief Read data on the IN endpoint associated to the HID interface - """ + """@brief Read data on the IN endpoint associated to the HID interface""" # Spin for a while if there's not data available yet. 100 µs sleep between checks. with Timeout(timeout, sleeptime=0.0001) as t_o: while t_o.check(): @@ -177,7 +173,6 @@ def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT): return self.rcv_data.popleft() def close(self): - """! @brief Close the interface - """ + """@brief Close the interface""" LOG.debug("closing interface") self.device.close() From ea97c08a8271bcb7554f7e816e749874c8335b2c Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 22 Dec 2021 14:17:50 -0600 Subject: [PATCH 079/123] lists: StubProbe class for creating target type class instances. SoCTarget now requires a non-None session.probe when it is constructed. ListGenerator.list_targets() needs to instantiate (but otherwise not use) the target type classes to extract data such as vendor, SVD file location, etc, but doesn't have a probe to use. This is solved by defining a StubProbe(DebugProbe) class that implements only the unique_id property accessed by Session's ctor. This is just enough to allow target instantiation. Added a note to SoCTarget's doc comment about the necessity to not try to use session.probe for much of anything in the ctor because of this use case. --- pyocd/core/soc_target.py | 10 ++++++++-- pyocd/tools/lists.py | 11 ++++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/pyocd/core/soc_target.py b/pyocd/core/soc_target.py index 1815c2782..1a6a95f78 100644 --- a/pyocd/core/soc_target.py +++ b/pyocd/core/soc_target.py @@ -1,6 +1,6 @@ # pyOCD debugger # Copyright (c) 2020 Arm Limited -# Copyright (c) Chris Reed +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,9 +44,15 @@ class SoCTarget(TargetGraphNode): to control the device, access memory, adjust breakpoints, and so on. For single core devices, the SoCTarget has mostly equivalent functionality to - the Target object for the core. Multicore devices work differently. This class tracks + the CoreTarget object for the core. Multicore devices work differently. This class tracks a "selected core", to which all actions are directed. The selected core can be changed at any time. You may also directly access specific cores and perform operations on them. + + SoCTarget subclasses must restrict usage of the DebugProbe instance in their constructor, ideally not + using it at all. This is required in order to be able to gather information about targets for commands + such as `pyocd json` and `pyocd list`. These commands create a session with the probe set to an instance + of StubProbe, which is a subclass of DebugProbe with the minimal implementation necessary to support + session creation but not opening. """ VENDOR = "Generic" diff --git a/pyocd/tools/lists.py b/pyocd/tools/lists.py index 9e851f729..7dd7e5d40 100644 --- a/pyocd/tools/lists.py +++ b/pyocd/tools/lists.py @@ -27,6 +27,13 @@ from ..board.board_ids import BOARD_ID_TO_INFO from ..target.pack import pack_target +from ..probe.debug_probe import DebugProbe + +class StubProbe(DebugProbe): + @property + def unique_id(self) -> str: + return "0" + class ListGenerator(object): @staticmethod def list_probes(): @@ -137,7 +144,9 @@ def list_targets(name_filter=None, vendor_filter=None, source_filter=None): if name_filter and name_filter not in name.lower(): continue - s = Session(None) # Create empty session + # Create session with a stub probe that allows us to instantiate the target. This will create + # Board and Target instances of its own, so set some options to control that. + s = Session(StubProbe(), no_config=True, target_override='cortex_m') t = TARGET[name](s) # Filter by vendor. From 5a1341f27a02cf78ad59292b1b3c923012592b26 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 22 Dec 2021 14:27:34 -0600 Subject: [PATCH 080/123] subcommand: json: catch and output exception info as JSON. --- pyocd/subcommands/json_cmd.py | 89 ++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 39 deletions(-) diff --git a/pyocd/subcommands/json_cmd.py b/pyocd/subcommands/json_cmd.py index cefb9a17d..c78a082b7 100644 --- a/pyocd/subcommands/json_cmd.py +++ b/pyocd/subcommands/json_cmd.py @@ -18,6 +18,7 @@ from typing import List import logging import json +import traceback from .base import SubcommandBase from ..core.session import Session @@ -65,50 +66,60 @@ def __init__(self, args: argparse.Namespace): def invoke(self) -> int: """! @brief Handle 'json' subcommand.""" - all_outputs = (self._args.probes, self._args.targets, self._args.boards, self._args.features) - - # Default to listing probes. - if not any(all_outputs): - self._args.probes = True - - # Check for more than one output option being selected. - if sum(int(x) for x in all_outputs) > 1: - # Because we're outputting JSON we can't just log the error, but must report the error - # via the JSON format. + exit_status = 0 + try: + all_outputs = (self._args.probes, self._args.targets, self._args.boards, self._args.features) + + # Default to listing probes. + if not any(all_outputs): + self._args.probes = True + + # Check for more than one output option being selected. + if sum(int(x) for x in all_outputs) > 1: + # Because we're outputting JSON we can't just log the error, but must report the error + # via the JSON format. + obj = { + 'pyocd_version' : __version__, + 'version' : { 'major' : 1, 'minor' : 0 }, + 'status' : 1, + 'error' : "More than one output data selected.", + } + exit_status = 1 + else: + # Create a session with no device so we load any config. + session = Session(None, + project_dir=self._args.project_dir, + config_file=self._args.config, + no_config=self._args.no_config, + pack=self._args.pack, + **convert_session_options(self._args.options) + ) + + if self._args.targets or self._args.boards: + # Create targets from provided CMSIS pack. + if session.options['pack'] is not None: + pack_target.PackTargets.populate_targets_from_pack(session.options['pack']) + + if self._args.probes: + obj = ListGenerator.list_probes() + elif self._args.targets: + obj = ListGenerator.list_targets() + elif self._args.boards: + obj = ListGenerator.list_boards() + elif self._args.features: + obj = ListGenerator.list_features() + else: + assert False + except Exception as e: + # Report exceptions via JSON output. obj = { 'pyocd_version' : __version__, 'version' : { 'major' : 1, 'minor' : 0 }, 'status' : 1, - 'error' : "More than one output data selected.", + 'error' : f"Error occurred during processing.\n" + traceback.format_exc(), } + exit_status = 1 - print(json.dumps(obj, indent=4)) - return 0 - - # Create a session with no device so we load any config. - session = Session(None, - project_dir=self._args.project_dir, - config_file=self._args.config, - no_config=self._args.no_config, - pack=self._args.pack, - **convert_session_options(self._args.options) - ) - - if self._args.targets or self._args.boards: - # Create targets from provided CMSIS pack. - if session.options['pack'] is not None: - pack_target.PackTargets.populate_targets_from_pack(session.options['pack']) - - if self._args.probes: - obj = ListGenerator.list_probes() - elif self._args.targets: - obj = ListGenerator.list_targets() - elif self._args.boards: - obj = ListGenerator.list_boards() - elif self._args.features: - obj = ListGenerator.list_features() - else: - assert False print(json.dumps(obj, indent=4)) - return 0 + return exit_status From d7494a6f8d5628d458e79f738a392ce6bc64543a Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 22 Dec 2021 15:42:20 -0600 Subject: [PATCH 081/123] coresight: GenericMemAPTarget: derive from CoreSightComponent instead of CoreSightCoreComponent. --- pyocd/coresight/generic_mem_ap.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pyocd/coresight/generic_mem_ap.py b/pyocd/coresight/generic_mem_ap.py index e12c45eb0..3bbbd10f1 100644 --- a/pyocd/coresight/generic_mem_ap.py +++ b/pyocd/coresight/generic_mem_ap.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2020 Cypress Semiconductor Corporation +# Copyright (c) 2021 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +17,7 @@ import logging -from .component import CoreSightCoreComponent +from .component import CoreSightComponent from ..core.target import Target from ..core.core_registers import CoreRegistersIndex @@ -25,7 +26,7 @@ DEAD_VALUE = 0 -class GenericMemAPTarget(Target, CoreSightCoreComponent): +class GenericMemAPTarget(Target, CoreSightComponent): """! @brief This target represents ARM debug Access Port without a CPU It may be used to access the address space of the target via Access Ports @@ -42,7 +43,7 @@ class GenericMemAPTarget(Target, CoreSightCoreComponent): def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address=None): Target.__init__(self, session, memory_map) - CoreSightCoreComponent.__init__(self, ap, cmpid, address) + CoreSightComponent.__init__(self, ap, cmpid, address) self.core_number = core_num self.core_type = DEAD_VALUE self._core_registers = CoreRegistersIndex() From 7965b655f9cda26e4ad2e1d9cbda68b7421c57e0 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 22 Dec 2021 15:43:32 -0600 Subject: [PATCH 082/123] family: imxrt: fix reset catch calls not passing reset type to super. --- pyocd/target/family/target_imxrt.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyocd/target/family/target_imxrt.py b/pyocd/target/family/target_imxrt.py index fa6144de8..99f5ae731 100644 --- a/pyocd/target/family/target_imxrt.py +++ b/pyocd/target/family/target_imxrt.py @@ -28,7 +28,7 @@ class IMXRT(CoreSightTarget): VENDOR = "NXP" def create_init_sequence(self): - seq = super(IMXRT, self).create_init_sequence() + seq = super().create_init_sequence() seq.wrap_task('discovery', lambda seq: seq.replace_task('create_cores', self.create_cores) ) @@ -63,7 +63,7 @@ class CortexM7_IMXRT(CortexM): } def __init__(self, *args, **kwargs): - super(CortexM7_IMXRT, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.get_boot_mode() def get_boot_mode(self): @@ -134,11 +134,11 @@ def set_reset_catch(self, reset_type=None): # normal reset catch LOG.debug("normal_set_reset_catch") self.did_normal_reset_catch = True - super(CortexM7_IMXRT, self).set_reset_catch() + super().set_reset_catch(reset_type) def clear_reset_catch(self, reset_type=None): if self.did_normal_reset_catch: - super(CortexM7_IMXRT, self).clear_reset_catch() + super().clear_reset_catch(reset_type) else: # Disable Reset Vector Catch in DEMCR value = self.read_memory(CortexM.DEMCR) From 36d6fbca4fa2831eccbca6ae4e8580d970cb8bb9 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 25 Dec 2021 19:58:05 +0000 Subject: [PATCH 083/123] stlink: usb: fix typo in USB PID map for 0x3752. (#1279) --- pyocd/probe/stlink/usb.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pyocd/probe/stlink/usb.py b/pyocd/probe/stlink/usb.py index 12ab5776d..84e5680f4 100644 --- a/pyocd/probe/stlink/usb.py +++ b/pyocd/probe/stlink/usb.py @@ -49,14 +49,18 @@ class STLinkUSBInterface: USB_VID = 0x0483 ## Map of USB PID to firmware version name and device endpoints. + # + # Other PIDs: + # - 0x3744: STLink V1 + # - 0x374d: STLink V3 DFU USB_PID_EP_MAP = { # PID Version OUT IN SWV 0x3748: STLinkInfo('V2', 0x02, 0x81, 0x83), - 0x374b: STLinkInfo('V2-1', 0x01, 0x81, 0x82), 0x374a: STLinkInfo('V2-1', 0x01, 0x81, 0x82), # Audio - 0x3742: STLinkInfo('V2-1', 0x01, 0x81, 0x82), # No MSD + 0x374b: STLinkInfo('V2-1', 0x01, 0x81, 0x82), 0x374e: STLinkInfo('V3', 0x01, 0x81, 0x82), 0x374f: STLinkInfo('V3', 0x01, 0x81, 0x82), # Bridge + 0x3752: STLinkInfo('V2-1', 0x01, 0x81, 0x82), # No MSD 0x3753: STLinkInfo('V3', 0x01, 0x81, 0x82), # 2VCP, No MSD 0x3754: STLinkInfo('V3', 0x01, 0x81, 0x82), # No MSD 0x3755: STLinkInfo('V3', 0x01, 0x81, 0x82), From c11c1a3fb55ebfd7760caa5c725b361f1428f4eb Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 26 Dec 2021 19:23:21 +0000 Subject: [PATCH 084/123] coresight: coresight_target: disconnect DP only if resuming on disconnect. (#1281) --- pyocd/coresight/coresight_target.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyocd/coresight/coresight_target.py b/pyocd/coresight/coresight_target.py index ba28ba62d..6024a9b68 100644 --- a/pyocd/coresight/coresight_target.py +++ b/pyocd/coresight/coresight_target.py @@ -117,7 +117,10 @@ def disconnect(self, resume: bool = True) -> None: self.call_delegate('will_disconnect', target=self, resume=resume) for core in self.cores.values(): core.disconnect(resume) - self.dp.disconnect() + # Only disconnect the DP if resuming; otherwise it will power down debug and potentially + # let the core continue running. + if resume: + self.dp.disconnect() self.call_delegate('did_disconnect', target=self, resume=resume) def create_discoverer(self) -> None: From 57820bba0bcede025166472f713470a342097774 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 26 Dec 2021 22:01:00 +0000 Subject: [PATCH 085/123] gdbserver: context_facade: return no regs in T response in reading regs fails. (#1282) If reading registers for the T response fails, don't include any register values in the T response. Unlike other gdb register read responses, we shouldn't set the value of failed register reads to x's since gdb dislikes x's in a T response. --- pyocd/gdbserver/context_facade.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pyocd/gdbserver/context_facade.py b/pyocd/gdbserver/context_facade.py index 80ac84ac6..027d11f2f 100644 --- a/pyocd/gdbserver/context_facade.py +++ b/pyocd/gdbserver/context_facade.py @@ -210,7 +210,7 @@ def get_signal_value(self): return signal def _get_reg_index_value_pairs(self, reg_list): - """! @brief Return register values as pairs. + """! @brief Return register values as pairs for the T response. Returns a string like NN:MMMMMMMM;NN:MMMMMMMM;... for the T response string. NN is the index of the @@ -220,15 +220,14 @@ def _get_reg_index_value_pairs(self, reg_list): try: reg_values = self._context.read_core_registers_raw(reg_list) except exceptions.CoreRegisterAccessError: - reg_values = [None] * len(reg_list) + # If we cannot read registers, return an empty string. We mustn't return 'x's like the other + # register read methods do, because gdb terribly dislikes 'x's in a T response. + return result for reg_name, reg_value in zip(reg_list, reg_values): reg = self._context.core.core_registers.by_name[reg_name] - # Return x's if the register read failed. - if reg_value is None: - encoded_reg = "xx" * round_up_div(reg.bitsize, 8) - else: - encoded_reg = conversion.uint_to_hex_le(reg_value, reg.bitsize) + assert reg_value is not None + encoded_reg = conversion.uint_to_hex_le(reg_value, reg.bitsize) result += (conversion.byte_to_hex2(reg.gdb_regnum) + ':' + encoded_reg + ';').encode() return result From 347b5482485ca9d571383425c76ba0936cf6e966 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 26 Dec 2021 23:09:30 +0000 Subject: [PATCH 086/123] stlink: improve/fix setting swd frequency for STLinkV3. (#1283) - Use the SWITCH_STLINK_FREQ command to change the STLinkV3 HCLK prescaler on open. By default it's set to 1 (high performance). - Add 'stlink.v3_prescaler' session option to control the prescaler. - Fix bug in setting the SWD frequency for V3, in that it was always setting JTAG frequency instead. - Fix a param name shadowing the builtin max(). --- docs/options.md | 18 ++++++++++++++++++ pyocd/probe/stlink/constants.py | 10 +++++++--- pyocd/probe/stlink/stlink.py | 24 +++++++++++++++++++----- pyocd/probe/stlink_probe.py | 17 +++++++++++++++++ 4 files changed, 61 insertions(+), 8 deletions(-) diff --git a/docs/options.md b/docs/options.md index f6eeddac3..437247811 100644 --- a/docs/options.md +++ b/docs/options.md @@ -598,3 +598,21 @@ Default is False, so possible WAIT or FAULT SWD acknowldeges and protocol errors
### News +- A new CI pipeline for functional tests is now running on a new test farm. Full results are [publicly + accessible](https://dev.azure.com/pyocd/pyocd/_build?definitionId=1&_a=summary) on Azure Pipelines. - pyOCD has several new community resources: the [pyocd.io](https://pyocd.io/) website, a [Slack workspace](https://join.slack.com/t/pyocd/shared_invite/zt-zqjv6zr5-ZfGAXl_mFCGGmFlB_8riHA), and a [mailing list](https://groups.google.com/g/pyocd) for announcements. diff --git a/docs/automated_tests.md b/docs/automated_tests.md index e1eb7311c..ad0df70b7 100644 --- a/docs/automated_tests.md +++ b/docs/automated_tests.md @@ -37,17 +37,36 @@ Functional tests: - `basic_test.py`: a simple test that checks a range of basic functionality, from flash programming to accessing memory and core registers. - `blank_test.py`: tests ability to connect to devices with with blank flash. (Not run by `automated_test.py`.) +- `commander_test.py`: tests the `pyocd commander` functionality. +- `commands_test.py`: tests commands supported by commander and gdb monitor commands. +- `concurrency_test.py`: verify multiple threads can simultaneously access a debug probe, specifically for memory + transfers. - `connect_test.py`: tests all combinations of the halt on connect and disconnect resume options. - `cortex_test.py`: validates CPU control operations and memory accesses. - `debug_context_test.py`: tests some `DebugContext` classes. -- `flash_test.py`: comprehensive test of flash programming. - `flash_loader_test.py`: test the classes in the `pyocd.flash.loader` module. -- `gdb_server_json_test.py`: validates the JSON output from pyocd-gdbserver used by tools like the GNU MCU Eclipse pyOCD plugin. +- `flash_test.py`: comprehensive test of flash programming. +- `import_all.py`: imports all pyocd modules. (Not run by `automated_test.py`.) - `gdb_test.py`: tests the gdbserver by running a script in a gdb process. Note that on Windows, the 32-bit Python 2.7 must be installed for the Python-enabled gdb to work properly and for this test to pass. +- `json_lists_test.py`: validates the JSON output from `pyocd json`. - `parallel_test.py`: checks for issues with accessing debug probes from multiple processes and threads simultaneously. (Not run by `automated_test.py`.) +- `probeserver_test.py`: verify remote probe server and client. - `speed_test.py`: performance test for memory reads and writes. +- `user_script_test.py`: verify loading of user scripts. + +## Azure Pipelines + +PyOCD uses Azure Pipelines to run the CI tests for commits and pull requests. The pipeline runs the functional tests on +a set of test machines, called self-hosted test agents in Azure Pipelines parlance. There is one each of Mac, Linux, and +Windows test agents. + +The complete results from pipeline runs are [publicly +accessible](https://dev.azure.com/pyocd/pyocd/_build?definitionId=1&_a=summary). + +For pull requests, a pyOCD team member or collaborator must manually initiate the pipeline run by entering a special +comment of the form "/azp run" or "/AzurePipelines run". ## Testing with tox From 75f47b9c84d2e41fe2f5231e5667b2b02fdb62bf Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 14 Dec 2021 16:47:11 -0600 Subject: [PATCH 056/123] session: ensure get_current() always returns a valid Session object. --- pyocd/core/session.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pyocd/core/session.py b/pyocd/core/session.py index c9e4531e6..f0d41e784 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -96,9 +96,11 @@ def get_current(cls): or for debug or other purposes. """ if cls._current_session is not None: - return cls._current_session() - else: - return Session(None) + session = cls._current_session() + if session is not None: + return session + + return Session(None) def __init__(self, probe, auto_open=True, options=None, option_defaults=None, **kwargs): """! @brief Session constructor. From 18ffd1cd9cb779160399319ed95cd086bdee826f Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 14 Dec 2021 16:48:08 -0600 Subject: [PATCH 057/123] commands: add 'flushprobe' command. --- pyocd/commands/commands.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pyocd/commands/commands.py b/pyocd/commands/commands.py index 50e3d4a59..21f101e90 100755 --- a/pyocd/commands/commands.py +++ b/pyocd/commands/commands.py @@ -1279,6 +1279,19 @@ def execute(self): self.context.target.dp.aps[self.ap_addr] = ap # Same mutable dict as target.aps self.context.writef("AP#{:d} IDR = {:#010x}", self.apsel, ap.idr) +class FlushProbeCommand(CommandBase): + INFO = { + 'names': ['flushprobe'], + 'group': 'commander', + 'category': 'probe', + 'nargs': 0, + 'usage': "", + 'help': "Ensure all debug probe requests have been completed.", + } + + def execute(self): + self.context.probe.flush() + class ReinitCommand(CommandBase): INFO = { 'names': ['reinit'], From ef36e7719ba69dc171343458f53f84342b0f8cde Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 8 Dec 2021 16:28:31 -0600 Subject: [PATCH 058/123] CMSIS-DAP: pydapaccess: better handling of protocol versions. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Log v1/v2 based on use of bulk endpoints, so it’s easy to see in a debug log. - Explicit check for beta versions. - Only compare against major protocol versions. - Reintroduce explicit handling of DAPLink 0254 and 0255 so they are recognised as v2.x. - Output debug log if an unknown major version is found or there is an error parsing. - Set fallback protocol version based on bulk endpoint usage. --- pyocd/probe/pydapaccess/cmsis_dap_core.py | 8 ++ .../probe/pydapaccess/dap_access_cmsis_dap.py | 100 +++++++++++------- .../probe/pydapaccess/interface/interface.py | 8 ++ .../pydapaccess/interface/pyusb_v2_backend.py | 5 + 4 files changed, 80 insertions(+), 41 deletions(-) diff --git a/pyocd/probe/pydapaccess/cmsis_dap_core.py b/pyocd/probe/pydapaccess/cmsis_dap_core.py index e84cf05b8..04013fe1c 100644 --- a/pyocd/probe/pydapaccess/cmsis_dap_core.py +++ b/pyocd/probe/pydapaccess/cmsis_dap_core.py @@ -89,6 +89,14 @@ class CMSISDAPVersion: V2_0_0 = (2, 0, 0) V2_1_0 = (2, 1, 0) + @classmethod + def major_versions(cls) -> Set[int]: + """@brief Returns a set of major versions.""" + return { + v[0] for k, v in cls.__dict__.items() + if k.startswith('V') + } + @classmethod def minor_versions(cls) -> Set[Tuple[int, int]]: """@brief Returns a set of minor version tuples.""" diff --git a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py index c455da928..0f06c7cfb 100644 --- a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py +++ b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py @@ -26,6 +26,7 @@ from .dap_access_api import DAPAccessIntf from .cmsis_dap_core import CMSISDAPProtocol from .interface import (INTERFACE, USB_BACKEND, USB_BACKEND_V2) +from .interface.common import ARM_DAPLINK_ID from .cmsis_dap_core import ( Command, Pin, @@ -608,47 +609,60 @@ def unlock(self): def _read_protocol_version(self): """! Determine the CMSIS-DAP protocol version.""" + # The fallback version to use when version parsing fails depends on whether v2 bulk endpoints are used + # (unfortunately conflating transport with protocol). + fallback_protocol_version = (CMSISDAPVersion.V1_0_0, CMSISDAPVersion.V2_0_0)[self._interface.is_bulk] + protocol_version_str = self._protocol.dap_info(self.ID.CMSIS_DAP_PROTOCOL_VERSION) # Just in case we don't get a valid response, default to the lowest version (not including betas). if not protocol_version_str: - self._cmsis_dap_version = CMSISDAPVersion.V1_0_0 - return - - # Convert the version to a 3-tuple for easy comparison. - # 1.2.3 will be converted to (1,2,3), 1.10 to (1,1,0), and so on. - # - # There are two version formats returned from the reference CMSIS-DAP code: 2-field and 3-field. - # The older versions return versions like "1.07" and "1.10", while recent versions return "1.2.0" - # or "2.0.0". - # - # Some CMSIS-DAP compatible debug probes from various vendors return the probe's firmware version - # rather than protocol version (like DAPLink versions 0254 and 0255 do) due to a misunderstanding - # based on unclear documentation. These cases are handled by the additional error checking below. - # - # Note that the exact version identified here is not that important, as it's not used much in - # this code (so far at least). There are also DAP_Info Capability bits for availability of certain - # commands that should be used instead of checking the version. - try: - fw_version = protocol_version_str.split('.') - major = int(fw_version[0]) - # Handle version of the form "1.10" by treating the two digits after the dot as minor and patch. - if (len(fw_version) == 2) and len(fw_version[1]) == 2: - minor = int(fw_version[1][0]) - patch = int(fw_version[1][1]) - # All other forms. - else: - minor = int(fw_version[1] if len(fw_version) > 1 else 0) - patch = int(fw_version[2] if len(fw_version) > 2 else 0) - self._cmsis_dap_version = (major, minor, patch) - except ValueError: - # One of the protocol version fields had a non-numeric character, indicating it is not a valid - # CMSIS-DAP version number. Default to the lowest version. - self._cmsis_dap_version = CMSISDAPVersion.V1_0_0 - - # Validate the version against known CMSIS-DAP minor versions. This will also catch the beta release - # versions of CMSIS-DAP, 0.01 and 0.02, and raise them to 1.0.0. - if self._cmsis_dap_version[:2] not in CMSISDAPVersion.minor_versions(): - self._cmsis_dap_version = CMSISDAPVersion.V1_0_0 + self._cmsis_dap_version = fallback_protocol_version + # Deal with DAPLink broken version number, where these versions of the firmware reported the DAPLink + # version number for DAP_INFO_FW_VER instead of the CMSIS-DAP version, due to a misunderstanding + # based on unclear documentation. + elif (self._vidpid == ARM_DAPLINK_ID) and (protocol_version_str in ("0254", "0255")): + self._cmsis_dap_version = CMSISDAPVersion.V2_0_0 + else: + # Convert the version to a 3-tuple for easy comparison. + # 1.2.3 will be converted to (1,2,3), 1.10 to (1,1,0), and so on. + # + # There are two version formats returned from the reference CMSIS-DAP code: 2-field and 3-field. + # The older versions return versions like "1.07" and "1.10", while recent versions return "1.2.0" + # or "2.0.0". + # + # Some CMSIS-DAP compatible debug probes from various vendors return the probe's firmware version + # rather than protocol version (like DAPLink versions 0254 and 0255 do) due to a misunderstanding + # based on unclear documentation. These cases are handled by the additional error checking below. + # + # Note that the exact version identified here is not that important, as it's not used much in + # this code (so far at least). There are also DAP_Info Capability bits for availability of certain + # commands that should be used instead of checking the version. + try: + fw_version = protocol_version_str.split('.') + major = int(fw_version[0]) + # Handle version of the form "1.10" by treating the two digits after the dot as minor and patch. + if (len(fw_version) == 2) and len(fw_version[1]) == 2: + minor = int(fw_version[1][0]) + patch = int(fw_version[1][1]) + # All other forms. + else: + minor = int(fw_version[1] if len(fw_version) > 1 else 0) + patch = int(fw_version[2] if len(fw_version) > 2 else 0) + self._cmsis_dap_version = (major, minor, patch) + except ValueError: + # One of the protocol version fields had a non-numeric character, indicating it is not a valid + # CMSIS-DAP version number. Default to the lowest version. + LOG.debug("Error parsing CMSIS-DAP protocol version '%s'", protocol_version_str) + self._cmsis_dap_version = fallback_protocol_version + + # Catch the beta release versions of CMSIS-DAP, 0.01 and 0.02, and raise them to 1.0.0. + if self._cmsis_dap_version[:2] == (0, 0): + self._cmsis_dap_version = CMSISDAPVersion.V1_0_0 + # Validate the version against known CMSIS-DAP major versions. + elif self._cmsis_dap_version[0] not in CMSISDAPVersion.major_versions(): + LOG.debug("Unrecognised major version of CMSIS-DAP: protocol version %i.%i.%i", + *self._cmsis_dap_version) + self._cmsis_dap_version = fallback_protocol_version @locked def open(self): @@ -673,12 +687,16 @@ def open(self): and self._cmsis_dap_version < CMSISDAPVersion.V2_0_0): self._fw_version = self._protocol.dap_info(self.ID.PRODUCT_FW_VERSION) + # Major protocol version based on use of bulk endpoints. + proto_major = (2 if self._interface.is_bulk else 1) + # Log probe's firmware version. if self._fw_version: - LOG.debug("CMSIS-DAP probe %s: firmware version %s, protocol version %i.%i.%i", - self._unique_id, self._fw_version, *self._cmsis_dap_version) + LOG.debug("CMSIS-DAP v%d probe %s: firmware version %s, protocol version %i.%i.%i", + proto_major, self._unique_id, self._fw_version, *self._cmsis_dap_version) else: - LOG.debug("CMSIS-DAP probe %s: protocol version %i.%i.%i", self._unique_id, *self._cmsis_dap_version) + LOG.debug("CMSIS-DAP v%d probe %s: protocol version %i.%i.%i", + proto_major, self._unique_id, *self._cmsis_dap_version) self._interface.set_packet_count(self._packet_count) self._packet_size = self._protocol.dap_info(self.ID.MAX_PACKET_SIZE) diff --git a/pyocd/probe/pydapaccess/interface/interface.py b/pyocd/probe/pydapaccess/interface/interface.py index e67f80d9e..56c078ce9 100644 --- a/pyocd/probe/pydapaccess/interface/interface.py +++ b/pyocd/probe/pydapaccess/interface/interface.py @@ -30,6 +30,11 @@ def __init__(self): def has_swo_ep(self): return False + @property + def is_bulk(self): + """@brief Whether the interface uses CMSIS-DAP v2 bulk endpoints.""" + return False + def open(self): return @@ -63,3 +68,6 @@ def get_packet_size(self): def get_serial_number(self): return self.serial_number + + def __repr__(self): + return f"<{type(self).__name__}@{id(self):x} {self.get_info()} {self.serial_number}>" diff --git a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py index 8167787a6..312a17cbf 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py @@ -75,6 +75,11 @@ def __init__(self): def has_swo_ep(self): return self.ep_swo is not None + @property + def is_bulk(self): + """@brief Whether the interface uses CMSIS-DAP v2 bulk endpoints.""" + return True + def open(self): assert self.closed is True From bec8f3ece53f3fe5fa5c0ae31cbb1b7db8cf5a63 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 9 Dec 2021 17:04:36 -0600 Subject: [PATCH 059/123] CMSIS-DAP: pydapaccess: backend: add USB packet trace logging. --- docs/configuring_logging.md | 4 ++++ .../pydapaccess/interface/hidapi_backend.py | 9 ++++++-- .../pydapaccess/interface/pyusb_backend.py | 21 ++++++++++++++++++- .../pydapaccess/interface/pyusb_v2_backend.py | 17 ++++++++++++++- .../pydapaccess/interface/pywinusb_backend.py | 18 ++++++++++++++-- pyocd/probe/stlink/usb.py | 12 +++++++---- 6 files changed, 71 insertions(+), 10 deletions(-) diff --git a/docs/configuring_logging.md b/docs/configuring_logging.md index a890e2bfe..832fcef24 100644 --- a/docs/configuring_logging.md +++ b/docs/configuring_logging.md @@ -89,6 +89,10 @@ Trace logger | Trace output `pyocd.probe.cmsis_dap_probe.trace` | CMSIS-DAP probe API calls `pyocd.probe.jlink_probe.trace` | Log output from JLink library `pyocd.probe.pydapaccess.dap_access_cmsis_dap.trace` | CMSIS-DAP packet building +`pyocd.probe.pydapaccess.interface.hidapi_backend.trace` | CMSIS-DAP v1 hidapi backend USB transfers +`pyocd.probe.pydapaccess.interface.pyusb_backend.trace` | CMSIS-DAP v1 pyusb backend USB transfers +`pyocd.probe.pydapaccess.interface.pyusb_v2_backend.trace` | CMSIS-DAP v2 pyusb backend USB transfers +`pyocd.probe.pydapaccess.interface.pywinusb_backend.trace` | CMSIS-DAP v1 pywinusb backend USB transfers `pyocd.probe.stlink.usb.trace` | STLink USB transfers `pyocd.probe.tcp_client_probe.trace` | Remote probe client requests and responses `pyocd.probe.tcp_probe_server.trace` | Remote probe server requests and responses diff --git a/pyocd/probe/pydapaccess/interface/hidapi_backend.py b/pyocd/probe/pydapaccess/interface/hidapi_backend.py index b5c249ceb..e2639ffed 100644 --- a/pyocd/probe/pydapaccess/interface/hidapi_backend.py +++ b/pyocd/probe/pydapaccess/interface/hidapi_backend.py @@ -27,6 +27,8 @@ from ....utility.compatibility import to_str_safe LOG = logging.getLogger(__name__) +TRACE = LOG.getChild("trace") +TRACE.setLevel(logging.CRITICAL) try: import hid @@ -104,15 +106,18 @@ def get_all_connected_interfaces(): def write(self, data): """! @brief Write data on the OUT endpoint associated to the HID interface """ + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) data.extend([0] * (self.packet_size - len(data))) -# LOG.debug("snd>(%d) %s" % (len(data), ' '.join(['%02x' % i for i in data]))) self.device.write([0] + data) def read(self, timeout=-1): """! @brief Read data on the IN endpoint associated to the HID interface """ data = self.device.read(self.packet_size) -# LOG.debug("rcv<(%d) %s" % (len(data), ' '.join(['%02x' % i for i in data]))) + if TRACE.isEnabledFor(logging.DEBUG): + # Strip off trailing zero bytes to reduce clutter. + TRACE.debug(" USB IN < (%d) %s", len(data), ' '.join([f'{i:02x}' for i in bytes(data).rstrip(b'\x00')])) return data def close(self): diff --git a/pyocd/probe/pydapaccess/interface/pyusb_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_backend.py index dea50add0..dd1c4bc7a 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_backend.py @@ -33,6 +33,8 @@ from ..dap_access_api import DAPAccessIntf LOG = logging.getLogger(__name__) +TRACE = LOG.getChild("trace") +TRACE.setLevel(logging.CRITICAL) try: import libusb_package @@ -141,7 +143,13 @@ def rx_task(self): while not self.closed: self.read_sem.acquire() if not self.closed: - self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000)) + read_data = self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000) + + if TRACE.isEnabledFor(logging.DEBUG): + # Strip off trailing zero bytes to reduce clutter. + TRACE.debug(" USB IN < (%d) %s", len(read_data), ' '.join([f'{i:02x}' for i in bytes(read_data).rstrip(b'\x00')])) + + self.rcv_data.append(read_data) finally: # Set last element of rcv_data to None on exit self.rcv_data.append(None) @@ -183,6 +191,10 @@ def write(self, data): if self.ep_out: report_size = self.ep_out.wMaxPacketSize + # Trace output data before padding. + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) + for _ in range(report_size - len(data)): data.append(0) @@ -207,6 +219,13 @@ def read(self): if self.rcv_data[0] is None: raise DAPAccessIntf.DeviceError("Device %s read thread exited" % self.serial_number) + + # Trace when the higher layer actually gets a packet previously read. + if TRACE.isEnabledFor(logging.DEBUG): + # Strip off trailing zero bytes to reduce clutter. + TRACE.debug(" USB RD < (%d) %s", len(self.rcv_data[0]), + ' '.join([f'{i:02x}' for i in bytes(self.rcv_data[0]).rstrip(b'\x00')])) + return self.rcv_data.pop(0) def close(self): diff --git a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py index 312a17cbf..22a041fdd 100644 --- a/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py +++ b/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py @@ -34,6 +34,8 @@ from ... import common LOG = logging.getLogger(__name__) +TRACE = LOG.getChild("trace") +TRACE.setLevel(logging.CRITICAL) try: import libusb_package @@ -161,7 +163,12 @@ def rx_task(self): while not self.rx_stop_event.is_set(): self.read_sem.acquire() if not self.rx_stop_event.is_set(): - self.rcv_data.append(self.ep_in.read(self.packet_size, 10 * 1000)) + read_data = self.ep_in.read(self.packet_size, 10 * 1000) + + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB IN < (%d) %s", len(read_data), ' '.join([f'{i:02x}' for i in read_data])) + + self.rcv_data.append(read_data) finally: # Set last element of rcv_data to None on exit self.rcv_data.append(None) @@ -208,6 +215,9 @@ def write(self, data): if (len(data) > 0) and (len(data) < self.packet_size) and (len(data) % self.ep_out.wMaxPacketSize == 0): data.append(0) + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) + self.read_sem.release() self.ep_out.write(data) @@ -220,6 +230,11 @@ def read(self): if self.rcv_data[0] is None: raise DAPAccessIntf.DeviceError("Device %s read thread exited unexpectedly" % self.serial_number) + + # Trace when the higher layer actually gets a packet previously read. + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB RD < (%d) %s", len(self.rcv_data[0]), ' '.join([f'{i:02x}' for i in self.rcv_data[0]])) + return self.rcv_data.pop(0) def read_swo(self): diff --git a/pyocd/probe/pydapaccess/interface/pywinusb_backend.py b/pyocd/probe/pydapaccess/interface/pywinusb_backend.py index d0e69c3d3..1549309f8 100644 --- a/pyocd/probe/pydapaccess/interface/pywinusb_backend.py +++ b/pyocd/probe/pydapaccess/interface/pywinusb_backend.py @@ -30,6 +30,8 @@ OPEN_TIMEOUT_S = 60.0 LOG = logging.getLogger(__name__) +TRACE = LOG.getChild("trace") +TRACE.setLevel(logging.CRITICAL) try: import pywinusb.hid as hid @@ -56,7 +58,10 @@ def __init__(self): # handler called when a report is received def rx_handler(self, data): -# LOG.debug("rcv<(%d) %s" % (len(data), ' '.join(['%02x' % i for i in data]))) + if TRACE.isEnabledFor(logging.DEBUG): + # Strip off trailing zero bytes to reduce clutter. + TRACE.debug(" USB IN < (%d) %s", len(data), ' '.join([f'{i:02x}' for i in bytes(data).rstrip(b'\x00')])) + self.rcv_data.append(data[1:]) def open(self): @@ -140,8 +145,10 @@ def get_all_connected_interfaces(): def write(self, data): """! @brief Write data on the OUT endpoint associated to the HID interface """ + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) + data.extend([0] * (self.packet_size - len(data))) -# LOG.debug("snd>(%d) %s" % (len(data), ' '.join(['%02x' % i for i in data]))) self.report.send([0] + data) def read(self, timeout=20.0): @@ -161,6 +168,13 @@ def read(self, timeout=20.0): # 3. CMSIS-DAP is performing a long operation or is being # halted in a debugger raise DAPAccessIntf.DeviceError("Read timed out") + + # Trace when the higher layer actually gets a packet previously read. + if TRACE.isEnabledFor(logging.DEBUG): + # Strip off trailing zero bytes to reduce clutter. + TRACE.debug(" USB RD < (%d) %s", len(self.rcv_data[0]), + ' '.join([f'{i:02x}' for i in bytes(self.rcv_data[0]).rstrip(b'\x00')])) + return self.rcv_data.popleft() def close(self): diff --git a/pyocd/probe/stlink/usb.py b/pyocd/probe/stlink/usb.py index f16a420d6..12ab5776d 100644 --- a/pyocd/probe/stlink/usb.py +++ b/pyocd/probe/stlink/usb.py @@ -233,21 +233,25 @@ def transfer(self, cmd, writeData=None, readSize=None, timeout=1000): try: # Command phase. - TRACE.debug(" USB CMD> %s" % ' '.join(['%02x' % i for i in paddedCmd])) + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB CMD> (%d) %s", len(paddedCmd), ' '.join([f'{i:02x}' for i in paddedCmd])) count = self._ep_out.write(paddedCmd, timeout) assert count == len(paddedCmd) # Optional data out phase. if writeData is not None: - TRACE.debug(" USB OUT> %s" % ' '.join(['%02x' % i for i in writeData])) + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB OUT> (%d) %s", len(writeData), ' '.join([f'{i:02x}' for i in writeData])) count = self._ep_out.write(writeData, timeout) assert count == len(writeData) # Optional data in phase. if readSize is not None: - TRACE.debug(" USB IN < (%d bytes)" % readSize) + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB IN < (req %d bytes)", readSize) data = self._read(readSize) - TRACE.debug(" USB IN < %s" % ' '.join(['%02x' % i for i in data])) + if TRACE.isEnabledFor(logging.DEBUG): + TRACE.debug(" USB IN < (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data])) return data except usb.core.USBError as exc: raise exceptions.ProbeError("USB Error: %s" % exc) from exc From 9bd268db8eeb2ea054dc043a03bf2af190dd88eb Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 10 Dec 2021 15:18:43 -0600 Subject: [PATCH 060/123] CMSIS-DAP: pydapaccess: additional trace logging; command ids. - _Command objects have unique IDs, output in trace logs. - Trace logging of _read_packet(), _send_packet(), and aborts. - Renamed _Command._get_free_words() to _get_free_transfers() to reflect actual return value. --- .../probe/pydapaccess/dap_access_cmsis_dap.py | 48 +++++++++++++------ 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py index 0f06c7cfb..bee56a335 100644 --- a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py +++ b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py @@ -168,7 +168,11 @@ class _Command(object): encode_data. The response to the command is decoded with decode_data. """ + _command_counter = 0 + def __init__(self, size): + self._id = _Command._command_counter + _Command._command_counter += 1 self._size = size self._read_count = 0 self._write_count = 0 @@ -177,10 +181,14 @@ def __init__(self, size): self._data = [] self._dap_index = None self._data_encoded = False - TRACE.debug("New _Command") + TRACE.debug("[cmd:%d] New _Command", self._id) + + @property + def uid(self) -> int: + return self._id - def _get_free_words(self, blockAllowed, isRead): - """! @brief Return the number of words free in the transmit packet + def _get_free_transfers(self, blockAllowed, isRead): + """! @brief Return the number of available read or write transfers. """ if blockAllowed: # DAP_TransferBlock request packet: @@ -233,7 +241,7 @@ def get_request_space(self, count, request, dap_index): # Compute the portion of the request that will fit in this packet. is_read = request & READ - free = self._get_free_words(blockAllowed, is_read) + free = self._get_free_transfers(blockAllowed, is_read) size = min(count, free) # Non-block transfers only have 1 byte for request count. @@ -241,11 +249,13 @@ def get_request_space(self, count, request, dap_index): max_count = self._write_count + self._read_count + size delta = max_count - 255 size = min(size - delta, size) - TRACE.debug("get_request_space(%d, %02x:%s)[wc=%d, rc=%d, ba=%d->%d] -> (sz=%d, free=%d, delta=%d)" % - (count, request, 'r' if is_read else 'w', self._write_count, self._read_count, self._block_allowed, blockAllowed, size, free, delta)) + TRACE.debug("[cmd:%d] get_request_space(%d, %02x:%s)[wc=%d, rc=%d, ba=%d->%d] -> (sz=%d, free=%d, delta=%d)", + self.uid, count, request, 'r' if is_read else 'w', self._write_count, self._read_count, + self._block_allowed, blockAllowed, size, free, delta) else: - TRACE.debug("get_request_space(%d, %02x:%s)[wc=%d, rc=%d, ba=%d->%d] -> (sz=%d, free=%d)" % - (count, request, 'r' if is_read else 'w', self._write_count, self._read_count, self._block_allowed, blockAllowed, size, free)) + TRACE.debug("[cmd:%d] get_request_space(%d, %02x:%s)[wc=%d, rc=%d, ba=%d->%d] -> (sz=%d, free=%d)", + self.uid, count, request, 'r' if is_read else 'w', self._write_count, self._read_count, + self._block_allowed, blockAllowed, size, free) # We can get a negative free count if the packet already contains more data than can be # sent by a DAP_Transfer command, but the new request forces DAP_Transfer. In this case, @@ -253,8 +263,8 @@ def get_request_space(self, count, request, dap_index): return max(size, 0) def get_full(self): - return (self._get_free_words(self._block_allowed, True) == 0) or \ - (self._get_free_words(self._block_allowed, False) == 0) + return (self._get_free_transfers(self._block_allowed, True) == 0) or \ + (self._get_free_transfers(self._block_allowed, False) == 0) def get_empty(self): """! @brief Return True if no transfers have been added to this packet @@ -281,8 +291,9 @@ def add(self, count, request, data, dap_index): self._write_count += count self._data.append((count, request, data)) - TRACE.debug("add(%d, %02x:%s) -> [wc=%d, rc=%d, ba=%d]" % - (count, request, 'r' if (request & READ) else 'w', self._write_count, self._read_count, self._block_allowed)) + TRACE.debug("[cmd:%d] add(%d, %02x:%s) -> [wc=%d, rc=%d, ba=%d]", + self.uid, count, request, 'r' if (request & READ) else 'w', self._write_count, self._read_count, + self._block_allowed) def _encode_transfer_data(self): """! @brief Encode this command into a byte array that can be sent @@ -356,7 +367,8 @@ def _decode_transfer_data(self, data): """ assert self.get_empty() is False if data[0] != Command.DAP_TRANSFER: - raise ValueError('DAP_TRANSFER response error') + TRACE.debug("[cmd:%d] response not DAP_TRANSFER", self.uid) + raise DAPAccessIntf.TransferError(f'DAP_TRANSFER response error: response is for command {data[0]:02x}') # Check response and raise an exception on errors. self._check_response(data[2]) @@ -416,7 +428,8 @@ def _decode_transfer_block_data(self, data): """ assert self.get_empty() is False if data[0] != Command.DAP_TRANSFER_BLOCK: - raise ValueError('DAP_TRANSFER_BLOCK response error') + TRACE.debug("[cmd:%d] response not DAP_TRANSFER_BLOCK", self.uid) + raise DAPAccessIntf.TransferError(f'DAP_TRANSFER_BLOCK response error: response is for command {data[0]:02x}') # Check response and raise an exception on errors. self._check_response(data[3]) @@ -762,6 +775,7 @@ def set_deferred_transfer(self, enable): @locked def flush(self): + TRACE.debug("flush: sending cmd:%d; reading %d outstanding", self._crnt_cmd.uid, len(self._commands_to_read)) # Send current packet self._send_packet() # Read all backlogged @@ -1028,11 +1042,13 @@ def _read_packet(self): """ # Grab command, send it and decode response cmd = self._commands_to_read.popleft() + TRACE.debug("[cmd:%d] _read_packet: reading", cmd.uid) try: raw_data = self._interface.read() raw_data = bytearray(raw_data) decoded_data = cmd.decode_data(raw_data) except Exception as exception: + TRACE.debug("[cmd:%d] _read_packet: got exception %r; aborting all transfers!", cmd.uid, exception) self._abort_all_transfers(exception) raise @@ -1076,7 +1092,10 @@ def _send_packet(self): max_packets = self._interface.get_packet_count() if len(self._commands_to_read) >= max_packets: + TRACE.debug("[cmd:%d] _send_packet: reading packet; outstanding=%d >= max=%d", + cmd.uid, len(self._commands_to_read), max_packets) self._read_packet() + TRACE.debug("[cmd:%d] _send_packet: sending", cmd.uid) data = cmd.encode_data() try: self._interface.write(list(data)) @@ -1143,6 +1162,7 @@ def _abort_all_transfers(self, exception): """! @brief Abort any ongoing transfers and clear all buffers """ pending_reads = len(self._commands_to_read) + TRACE.debug("aborting %d pending reads after exception %r", pending_reads, exception) # invalidate _transfer_list for transfer in self._transfer_list: transfer.add_error(exception) From b89bacf2b466b34c4090d71e6e21ad493a89f738 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 14 Dec 2021 16:46:29 -0600 Subject: [PATCH 061/123] =?UTF-8?q?CMSIS-DAP:=20pydapaccess:=20=E2=80=99cm?= =?UTF-8?q?sis=5Fdap.prefer=5Fv1'=20test=20option.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/options.md | 8 ++++++++ pyocd/core/options.py | 3 +++ pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | 17 ++++++++++++----- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/docs/options.md b/docs/options.md index 2cc66c320..f6eeddac3 100644 --- a/docs/options.md +++ b/docs/options.md @@ -70,6 +70,14 @@ Restrict CMSIS-DAP backend to using a single in-flight command at a time. This i where USB is problematic, in particular virtual machines.
cmsis_dap.prefer_v1boolFalse +If a device provides both CMSIS-DAP v1 and v2 interfaces, use the v1 interface in preference of v2. +Normal behaviour is to prefer the v2 interface. This option is primarily intended for testing. +
commander.history_length int 1000
+ +## STLink options + +These session options are available when the STLink debug probe plugin is active. + + + + + + + + + + +
Option NameTypeDefaultDescription
stlink.v3_prescalerint1 +Sets the HCLK prescaler of an STLinkV3, changing performance versus power tradeoff. +The value must be one of 1=high performance (default), 2=normal, or 4=low power. +
diff --git a/pyocd/probe/stlink/constants.py b/pyocd/probe/stlink/constants.py index 9d75d48b7..90b1daa56 100644 --- a/pyocd/probe/stlink/constants.py +++ b/pyocd/probe/stlink/constants.py @@ -16,7 +16,7 @@ # limitations under the License. class Commands: - """! + """ @brief STLink V2 and V3 commands. """ @@ -90,7 +90,7 @@ class Commands: JTAG_STLINK_JTAG_COM = 0x01 class Status: - """! + """ @brief STLink status codes and messages. """ # Status codes. @@ -120,7 +120,9 @@ class Status: SWD_AP_WDATA_ERROR = 0x18 SWD_AP_STICKY_ERROR = 0x19 SWD_AP_STICKYORUN_ERROR = 0x1a + BAD_AP = 0x1d SWV_NOT_AVAILABLE = 0x20 + JTAG_CONF_CHANGED = 0x40 JTAG_FREQ_NOT_SUPPORTED = 0x41 JTAG_UNKNOWN_CMD = 0x42 @@ -151,13 +153,15 @@ class Status: SWD_AP_WDATA_ERROR : "AP WDATA error", SWD_AP_STICKY_ERROR : "AP sticky error", SWD_AP_STICKYORUN_ERROR : "AP sticky overrun error", + BAD_AP : "Bad AP", SWV_NOT_AVAILABLE : "SWV not available", + JTAG_CONF_CHANGED : "Configuration changed", JTAG_FREQ_NOT_SUPPORTED : "Frequency not supported", JTAG_UNKNOWN_CMD : "Unknown command", } @staticmethod - def get_error_message(status): + def get_error_message(status: int) -> str: return "STLink error ({}): {}".format(status, Status.MESSAGES.get(status, "Unknown error")) ## Map from SWD frequency in Hertz to delay loop count. diff --git a/pyocd/probe/stlink/stlink.py b/pyocd/probe/stlink/stlink.py index 309384be2..40cad9b31 100644 --- a/pyocd/probe/stlink/stlink.py +++ b/pyocd/probe/stlink/stlink.py @@ -246,10 +246,23 @@ def enter_idle(self): self._device.transfer([Commands.SWIM_COMMAND, Commands.SWIM_EXIT]) self._protocol = None + def set_prescaler(self, prescaler: int) -> None: + assert prescaler in (1, 2, 4) + + # The SWITCH_STLINK_FREQ command is only supported on V3. + if self._hw_version < 3: + return + with self._lock: + cmd = [Commands.JTAG_COMMAND, Commands.SWITCH_STLINK_FREQ, prescaler] + response = self._device.transfer(cmd, readSize=8) + # The JTAG_CONF_CHANGED status is ok and expected. + if response[0] != Status.JTAG_CONF_CHANGED: + self._check_status(response[0:2]) + def set_swd_frequency(self, freq=1800000): with self._lock: if self._hw_version >= 3: - self.set_com_frequency(self.Protocol.JTAG, freq) + self.set_com_frequency(self.Protocol.SWD, freq) else: for f, d in SWD_FREQ_MAP.items(): if freq >= f: @@ -293,6 +306,7 @@ def set_com_frequency(self, protocol, freq): self._check_status(response[0:2]) freqs = conversion.byte_list_to_u32le_list(response[4:8]) + LOG.debug("actual %s frequency is %d kHz", protocol.name, freqs[0]) return freqs[0] def enter_debug(self, protocol): @@ -355,11 +369,11 @@ def _clear_sticky_error(self): self.write_dap_register(self.DP_PORT, dap.DP_CTRL_STAT, dap.CTRLSTAT_STICKYERR | dap.CTRLSTAT_STICKYCMP | dap.CTRLSTAT_STICKYORUN) - def _read_mem(self, addr, size, memcmd, max, apsel): + def _read_mem(self, addr, size, memcmd, maxrx, apsel): with self._lock: result = [] while size: - thisTransferSize = min(size, max) + thisTransferSize = min(size, maxrx) cmd = [Commands.JTAG_COMMAND, memcmd] cmd.extend(struct.pack(' List[OptionInfo]: + return [ + OptionInfo('stlink.v3_prescaler', int, 1, + "Sets the HCLK prescaler of an STLinkV3, changing performance versus power tradeoff. " + "The value must be one of 1=high performance (default), 2=normal, or 4=low power.") + ] From 51f7a7ec89f060db93937e72cf18a7d57dd25b32 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 27 Dec 2021 17:30:51 +0000 Subject: [PATCH 087/123] core: memory_interface: add missing type annotation on read_memory_block32() param. (#1284) --- pyocd/core/memory_interface.py | 2 +- pyocd/core/soc_target.py | 2 +- pyocd/coresight/cortex_m.py | 2 +- pyocd/coresight/dap.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyocd/core/memory_interface.py b/pyocd/core/memory_interface.py index 702124751..cc34746d9 100644 --- a/pyocd/core/memory_interface.py +++ b/pyocd/core/memory_interface.py @@ -55,7 +55,7 @@ def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: """! @brief Write an aligned block of 32-bit words.""" raise NotImplementedError() - def read_memory_block32(self, addr: int, size) -> Sequence[int]: + def read_memory_block32(self, addr: int, size: int) -> Sequence[int]: """! @brief Read an aligned block of 32-bit words.""" raise NotImplementedError() diff --git a/pyocd/core/soc_target.py b/pyocd/core/soc_target.py index 1a6a95f78..823fdec54 100644 --- a/pyocd/core/soc_target.py +++ b/pyocd/core/soc_target.py @@ -211,7 +211,7 @@ def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: def read_memory_block8(self, addr: int, size: int) -> Sequence[int]: return self.selected_core_or_raise.read_memory_block8(addr, size) - def read_memory_block32(self, addr: int, size) -> Sequence[int]: + def read_memory_block32(self, addr: int, size: int) -> Sequence[int]: return self.selected_core_or_raise.read_memory_block32(addr, size) def read_core_register(self, id: "CoreRegisterNameOrNumberType") -> "CoreRegisterValueType": diff --git a/pyocd/coresight/cortex_m.py b/pyocd/coresight/cortex_m.py index f824509a4..676beaac6 100644 --- a/pyocd/coresight/cortex_m.py +++ b/pyocd/coresight/cortex_m.py @@ -484,7 +484,7 @@ def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: """! @brief Write an aligned block of 32-bit words.""" self.ap.write_memory_block32(addr, data) - def read_memory_block32(self, addr: int, size) -> Sequence[int]: + def read_memory_block32(self, addr: int, size: int) -> Sequence[int]: """! @brief Read an aligned block of 32-bit words.""" data = self.ap.read_memory_block32(addr, size) return self.bp_manager.filter_memory_aligned_32(addr, size, data) diff --git a/pyocd/coresight/dap.py b/pyocd/coresight/dap.py index 9e1ba6eb7..f8024b091 100644 --- a/pyocd/coresight/dap.py +++ b/pyocd/coresight/dap.py @@ -1034,7 +1034,7 @@ def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: self._dp.write_ap(addr, word) addr += 4 - def read_memory_block32(self, addr: int, size) -> Sequence[int]: + def read_memory_block32(self, addr: int, size: int) -> Sequence[int]: """! @brief Read an aligned block of 32-bit words.""" addr += self._offset result_cbs = [self._dp.read_ap(addr + i * 4, now=False) for i in range(size)] From 5a30f5b82d89785fd1b279e7baed2f21792ff6ef Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 27 Dec 2021 17:31:12 +0000 Subject: [PATCH 088/123] session: change working dir to project dir. (#1285) --- pyocd/core/session.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyocd/core/session.py b/pyocd/core/session.py index c87efd049..f51586bcf 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -170,6 +170,9 @@ def __init__( self._project_dir: str = os.path.abspath(os.path.expanduser(self.options.get('project_dir'))) LOG.debug("Project directory: %s", self.project_dir) + # Switch the working dir to the project dir. + os.chdir(self.project_dir) + # Load options from the config file. config = self._get_config() probes_config = config.pop('probes', None) From 81a4baf0d85d1714a730b94680f0074a95596966 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 28 Dec 2021 16:28:12 -0600 Subject: [PATCH 089/123] core: session: UserScriptDelegateProxy returns function proxy only for callables. --- pyocd/core/session.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pyocd/core/session.py b/pyocd/core/session.py index f51586bcf..44e893977 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -531,11 +531,12 @@ class UserScriptFunctionProxy: This proxy makes arguments to user script functions optional. """ - def __init__(self, fn) -> None: + def __init__(self, fn: Callable) -> None: + assert isinstance(fn, Callable) self._fn = fn self._spec = getfullargspec(fn) - def __call__(self, **kwargs) -> Any: + def __call__(self, **kwargs: Any) -> Any: args = {} for arg in self._spec.args: if arg in kwargs: @@ -551,7 +552,11 @@ def __init__(self, script_namespace: Dict) -> None: def __getattr__(self, name: str) -> Any: if name in self._script: - fn = self._script[name] - return UserScriptFunctionProxy(fn) + obj = self._script[name] + # Only return the function proxy if the object is indeed callable. + if isinstance(obj, Callable): + return UserScriptFunctionProxy(obj) + else: + return obj else: raise AttributeError(name) From fbc3a871b9018f9d7ba4dbb07a6adfaf3dc0454e Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 27 Dec 2021 17:00:25 -0600 Subject: [PATCH 090/123] user scripts: add 'command' decorator to create user-defined commands. Using the 'command' decorator in a user script is a way to define custom commands for use in commander or gdb monitor commands. The command function's signature and type annotations are used to automatically convert arguments. Add user defined function test to user_script_test.py. --- pyocd/commands/execution_context.py | 3 + pyocd/core/session.py | 115 +++++++++++++++++++++++++++- test/test_user_script.py | 14 +++- test/user_script_test.py | 33 ++++++++ 4 files changed, 162 insertions(+), 3 deletions(-) diff --git a/pyocd/commands/execution_context.py b/pyocd/commands/execution_context.py index c43c7e7b2..dd8da79af 100755 --- a/pyocd/commands/execution_context.py +++ b/pyocd/commands/execution_context.py @@ -231,6 +231,9 @@ def attach_session(self, session): self.selected_ap_address = ap_num break + # Add user-defined commands once we know we have a session created. + self.command_set.add_command_group('user') + return True @property diff --git a/pyocd/core/session.py b/pyocd/core/session.py index 44e893977..ec449f685 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -20,8 +20,8 @@ import yaml import os import weakref -from inspect import getfullargspec -from typing import (Any, cast, Dict, List, Mapping, Optional, TYPE_CHECKING) +from inspect import (getfullargspec, signature) +from typing import (Any, Callable, Sequence, Union, cast, Dict, List, Mapping, Optional, TYPE_CHECKING) from . import exceptions from .options_manager import OptionsManager @@ -427,6 +427,7 @@ def _init_user_script_namespace(self, user_script_path: str) -> None: 'session': self, 'options': self.options, 'LOG': logging.getLogger('pyocd.user_script'), + 'command': new_command_decorator, } def _update_user_script_namespace(self) -> None: @@ -560,3 +561,113 @@ def __getattr__(self, name: str) -> Any: return obj else: raise AttributeError(name) + +def new_command_decorator(name: Optional[Union[str, Sequence[str]]] = None, help: str = ""): + """@brief User script decorator for creating new commands. + + Supported parameter types: + - `str` + - `int` + - `float` + - Extra args, e.g. `*args`. + + Keyword parameters and extra keyword args (**args) are not allowed. + + The decorated function remains accessible as a regular function in the namespace in which it was defined. + This is true even if the function definition is not compatible with the command decorator, for instance + if it has invalid parameter types. + + This is an example of defining a command with this decorator. + ```py + @command('cmdname', help='Optional help') + def mycommand(s: str, i: int, f: float, *args): + print("Hello") + ``` + """ + import types + from ..commands.base import CommandBase + def _command_decorator(fn: Callable): + if name is None: + names_list: Sequence[str] = [getattr(fn, '__name__')] + else: + names_list: Sequence[str] = [name] if isinstance(name, str) else name[0] + classname = names_list[0].capitalize() + "Command" + + # Examine the command function's signature to extract arguments and their types. + sig = signature(fn) + arg_converters = [] + has_var_args = False + usage_fields: List[str] = [] + for parm in sig.parameters.values(): + typ = parm.annotation + + # Check if this is a *args kind of argument. + if parm.kind == parm.VAR_POSITIONAL: + has_var_args = True + usage_fields.append("*") + continue + # Disallow keyword params. + elif parm.kind in (parm.KEYWORD_ONLY, parm.VAR_KEYWORD): + LOG.error("ser command function '%s' uses unsupported keyword parameters", fn.__name__) + return fn + + # Require type annotations. + if typ is parm.empty: + LOG.error("user command function '%s' is missing type annotation for parameter '%s'", + fn.__name__, parm.name) + return fn + + # Otherwise add to param converter list. + if issubclass(typ, str): + arg_converters.append(lambda _, x: x) + elif issubclass(typ, float): + arg_converters.append(lambda _, x: float(x)) + elif issubclass(typ, int): + arg_converters.append(CommandBase._convert_value) + else: + LOG.error("parameter '%s' of user command function '%s' has an unsupported type", + parm.name, fn.__name__) + return fn + usage_fields.append(parm.name.upper()) + + # parse() method of the new command class. + def parse(self, args: List[str]): + arg_values: List[Any] = [] + + if len(args) > len(arg_converters): + assert has_var_args + extra_args = args[len(arg_converters):] + args = args[:len(arg_converters)] + else: + extra_args = [] + + for arg, converter in zip(args, arg_converters): + arg_values.append(converter(self, arg)) + if has_var_args: + arg_values += extra_args + + self._args = arg_values + + # execute() method of the new command class. + def execute(self): + fn(*self._args) + + # Callback to populate the new command class' namespace dict. + def populate_command_class(ns: Dict[str, Any]) -> None: + ns['INFO'] = { + 'names': names_list, + 'group': 'user', + 'category': 'user', + 'nargs': "*" if has_var_args else len(sig.parameters), + 'usage': " ".join(usage_fields), + 'help': help, + } + ns['parse'] = parse + ns['execute'] = execute + + types.new_class(classname, bases=(CommandBase,), exec_body=populate_command_class) + + # Return original function. This makes it accessible from the rest of the user script + # and Python expression commands. + return fn + return _command_decorator diff --git a/test/test_user_script.py b/test/test_user_script.py index fa28c5d56..bcad53b9b 100644 --- a/test/test_user_script.py +++ b/test/test_user_script.py @@ -1,5 +1,17 @@ # Test user script. -# + +@command(help="test command") +def testcmd(f: float, i: int, s: str): + assert isinstance(f, float) + assert isinstance(i, int) + assert isinstance(s, str) + +@command("anothertestcmd", help="second test command") +def testcmd2(*args): + assert isinstance(args, tuple) + assert all(isinstance(s, str) for s in args) + + # Provides stub implementations of all hooks. def will_connect(board): diff --git a/test/user_script_test.py b/test/user_script_test.py index dc2e42097..1c101c6ff 100644 --- a/test/user_script_test.py +++ b/test/user_script_test.py @@ -24,6 +24,7 @@ from pyocd.core.helpers import ConnectHelper from pyocd.probe.pydapaccess import DAPAccess from pyocd.core.memory_map import MemoryType +from pyocd.commands.execution_context import CommandExecutionContext from test_util import ( Test, @@ -75,6 +76,10 @@ def user_script_test(board_id): test_count = 0 result = UserScriptTestResult() + # TEST basic functionality + print("\n------ Testing delegates ------") + + # TODO verify user script delegates were called target.reset_and_halt() target.resume() target.halt() @@ -82,6 +87,34 @@ def user_script_test(board_id): test_count += 1 test_pass_count += 1 + print("TEST PASSED") + + # TEST user defined commands + print("\n------ Testing user defined commands ------") + context = CommandExecutionContext() + context.attach_session(session) + + def test_command(cmd): + try: + print("\nTEST: %s" % cmd) + context.process_command_line(cmd) + except: + print("TEST FAILED") + traceback.print_exc(file=sys.stdout) + return False + else: + print("TEST PASSED") + return True + + # Verify command with float, int, str args. + if test_command("testcmd 3.14 0xbeef foobar"): + test_pass_count += 1 + test_count += 1 + + # Verify varargs: all should be strings in the cmd's args + if test_command("anothertestcmd a b 1 2 fee fie foe"): + test_pass_count += 1 + test_count += 1 print("\nTest Summary:") print("Pass count %i of %i tests" % (test_pass_count, test_count)) From 27770deab1e2e9b983ea2082910998fc14e88343 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 28 Dec 2021 16:26:06 -0600 Subject: [PATCH 091/123] user scripts: python '$' command shares user script namespace. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - CommandExecutionContext uses the user script's namespace, with a couple additions, instead of building its own from scratch. This allows definitions from the user script to be used in Python expression commands. - Add 'debug', 'info', 'warning', 'error' functions to user script namespace to call the relevant methods of the user script logger. - Session.user_script_proxy property creates a user script namespace if there isn’t already one, to handle $-commands when there is a user script file hasn’t been loaded. - Clean up variable names in Session._load_user_script(). --- pyocd/commands/execution_context.py | 15 +++------ pyocd/core/session.py | 47 ++++++++++++++++++++--------- 2 files changed, 38 insertions(+), 24 deletions(-) diff --git a/pyocd/commands/execution_context.py b/pyocd/commands/execution_context.py index dd8da79af..306c8e381 100755 --- a/pyocd/commands/execution_context.py +++ b/pyocd/commands/execution_context.py @@ -414,19 +414,14 @@ def execute_command(self, invocation: CommandInvocation) -> None: def _build_python_namespace(self) -> None: """! @brief Construct the dictionary used as the namespace for python commands.""" - import pyocd + assert self.session assert self.target - self._python_namespace = { - 'session': self.session, - 'board': self.board, - 'target': self.target, - 'probe': self.probe, - 'dp': self.target.dp, - 'aps': self.target.dp.aps, + ns = self.session.user_script_proxy.namespace + ns.update({ 'elf': self.elf, 'map': self.target.memory_map, - 'pyocd': pyocd, - } + }) + self._python_namespace = ns def handle_python(self, invocation: CommandInvocation) -> None: """! @brief Evaluate a python expression.""" diff --git a/pyocd/core/session.py b/pyocd/core/session.py index ec449f685..98ab44cea 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -19,6 +19,7 @@ import logging.config import yaml import os +from pathlib import Path import weakref from inspect import (getfullargspec, signature) from typing import (Any, Callable, Sequence, Union, cast, Dict, List, Mapping, Optional, TYPE_CHECKING) @@ -342,8 +343,14 @@ def delegate(self, new_delegate: Any) -> None: self._delegate = new_delegate @property - def user_script_proxy(self) -> Optional["UserScriptDelegateProxy"]: + def user_script_proxy(self) -> "UserScriptDelegateProxy": """! @brief The UserScriptDelegateProxy object for a loaded user script.""" + # Create a proxy if there isn't already one. This is a fallback in case there isn't a user script, + # yet a Python $-command is executed and needs the user script namespace in which to run. + if not self._user_script_proxy: + self._init_user_script_namespace('__script__', '') + self._update_user_script_namespace() + self._user_script_proxy = UserScriptDelegateProxy(self._user_script_namespace) return self._user_script_proxy @property @@ -380,7 +387,7 @@ def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> bo self.close() return False - def _init_user_script_namespace(self, user_script_path: str) -> None: + def _init_user_script_namespace(self, script_name: str, script_path: str) -> None: """! @brief Create the namespace dict used for user scripts. This initial namespace has only those objects that are available very early in the @@ -394,6 +401,9 @@ def _init_user_script_namespace(self, user_script_path: str) -> None: from ..flash import file_programmer from ..flash import eraser from ..flash import loader + + user_script_logger = logging.getLogger('pyocd.user_script') + self._user_script_namespace = { # Modules and classes 'pyocd': pyocd, @@ -421,13 +431,18 @@ def _init_user_script_namespace(self, user_script_path: str) -> None: 'FlashEraser': eraser.FlashEraser, 'FlashLoader': loader.FlashLoader, # User script info - '__name__': os.path.splitext(os.path.basename(user_script_path))[0], - '__file__': user_script_path, + '__name__': script_name, + '__file__': script_path, # Objects 'session': self, 'options': self.options, - 'LOG': logging.getLogger('pyocd.user_script'), + 'LOG': user_script_logger, + # Functions 'command': new_command_decorator, + 'debug': user_script_logger.debug, + 'info': user_script_logger.info, + 'warning': user_script_logger.warning, + 'error': user_script_logger.error, } def _update_user_script_namespace(self) -> None: @@ -442,23 +457,23 @@ def _update_user_script_namespace(self) -> None: }) def _load_user_script(self) -> None: - scriptPath = self.find_user_file('user_script', _USER_SCRIPT_NAMES) + script_path = self.find_user_file('user_script', _USER_SCRIPT_NAMES) - if scriptPath is not None: + if script_path is not None: try: # Read the script source. - with open(scriptPath, 'r') as scriptFile: - LOG.debug("Loading user script: %s", scriptPath) - scriptSource = scriptFile.read() + with open(script_path, 'r') as script_file: + LOG.debug("Loading user script: %s", script_path) + script_source = script_file.read() - self._init_user_script_namespace(scriptPath) + self._init_user_script_namespace(Path(script_path).stem, script_path) - scriptCode = compile(scriptSource, scriptPath, 'exec') + script_code = compile(script_source, script_path, 'exec') # Executing the code will create definitions in the namespace for any # functions or classes. A single namespace is shared for both globals and # locals so that script-level definitions are available within the # script functions. - exec(scriptCode, self._user_script_namespace, self._user_script_namespace) + exec(script_code, self._user_script_namespace) # Create the proxy for the user script. It becomes the delegate unless # another delegate was already set. @@ -466,7 +481,7 @@ def _load_user_script(self) -> None: if self._delegate is None: self._delegate = self._user_script_proxy except IOError as err: - LOG.warning("Error attempting to load user script '%s': %s", scriptPath, err) + LOG.warning("Error attempting to load user script '%s': %s", script_path, err) def open(self, init_board: bool = True) -> None: """! @brief Open the session. @@ -551,6 +566,10 @@ def __init__(self, script_namespace: Dict) -> None: super().__init__() self._script = script_namespace + @property + def namespace(self) -> Dict: + return self._script + def __getattr__(self, name: str) -> Any: if name in self._script: obj = self._script[name] From ac4395590207a415f284409d4ed05be2883b075e Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 29 Dec 2021 17:31:05 -0600 Subject: [PATCH 092/123] user scripts: print proxy. The print() builtin is overridden in the user script namespace with an instance of the new PrintProxy class, primarily to allow redirecting of print() calls to a CommandExecutionContext's output stream. This ensure that prints from functions and commands in user scripts will be sent over gdbserver monitor command output. The print proxy is available via the user_script_print_proxy property on Session. --- pyocd/commands/execution_context.py | 26 +++++++++++------ pyocd/core/session.py | 44 ++++++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 9 deletions(-) diff --git a/pyocd/commands/execution_context.py b/pyocd/commands/execution_context.py index 306c8e381..425ddb31a 100755 --- a/pyocd/commands/execution_context.py +++ b/pyocd/commands/execution_context.py @@ -410,7 +410,13 @@ def execute_command(self, invocation: CommandInvocation) -> None: cmd_object = cmd_class(self) cmd_object.check_arg_count(invocation.args) cmd_object.parse(invocation.args) - cmd_object.execute() + + if self.session: + # Reroute print() in user-defined functions so it will come out our output stream. + with self.session.user_script_print_proxy.push_target(self.write): + cmd_object.execute() + else: + cmd_object.execute() def _build_python_namespace(self) -> None: """! @brief Construct the dictionary used as the namespace for python commands.""" @@ -425,18 +431,22 @@ def _build_python_namespace(self) -> None: def handle_python(self, invocation: CommandInvocation) -> None: """! @brief Evaluate a python expression.""" + assert self.session try: # Lazily build the python environment. if not self._python_namespace: self._build_python_namespace() - result = eval(invocation.cmd, globals(), self._python_namespace) - if result is not None: - if isinstance(result, int): - self.writei("0x%08x (%d)", result, result) - else: - w, h = get_terminal_size() - self.write(pprint.pformat(result, indent=2, width=w, depth=10)) + # Reroute print() in user-defined functions so it will come out our output stream. Not that + # we expect much use of print() from expressions... + with self.session.user_script_print_proxy.push_target(self.write): + result = eval(invocation.cmd, self._python_namespace) + if result is not None: + if isinstance(result, int): + self.writei("0x%08x (%d)", result, result) + else: + w, h = get_terminal_size() + self.write(pprint.pformat(result, indent=2, width=w, depth=10)) except Exception as e: # Log the traceback before raising the exception. if self.session and self.session.log_tracebacks: diff --git a/pyocd/core/session.py b/pyocd/core/session.py index 98ab44cea..d1cd457c4 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from contextlib import contextmanager import logging import logging.config import yaml @@ -22,7 +23,7 @@ from pathlib import Path import weakref from inspect import (getfullargspec, signature) -from typing import (Any, Callable, Sequence, Union, cast, Dict, List, Mapping, Optional, TYPE_CHECKING) +from typing import (Any, Callable, Generator, Sequence, Union, cast, Dict, List, Mapping, Optional, TYPE_CHECKING) from . import exceptions from .options_manager import OptionsManager @@ -150,6 +151,7 @@ def __init__( self._inited: bool = False self._user_script_namespace: Dict[str, Any] = {} self._user_script_proxy: Optional[UserScriptDelegateProxy] = None + self._user_script_print_proxy = PrintProxy() self._delegate: Optional[Any] = None self._auto_open = auto_open self._options = OptionsManager() @@ -353,6 +355,10 @@ def user_script_proxy(self) -> "UserScriptDelegateProxy": self._user_script_proxy = UserScriptDelegateProxy(self._user_script_namespace) return self._user_script_proxy + @property + def user_script_print_proxy(self) -> "PrintProxy": + return self._user_script_print_proxy + @property def gdbservers(self) -> Dict[int, "GDBServer"]: """! @brief Dictionary of core numbers to @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances.""" @@ -402,9 +408,15 @@ def _init_user_script_namespace(self, script_name: str, script_path: str) -> Non from ..flash import eraser from ..flash import loader + # Duplicate builtins and override print() without our proxy. + import builtins + bi = builtins.__dict__.copy() + bi['print'] = self._user_script_print_proxy + user_script_logger = logging.getLogger('pyocd.user_script') self._user_script_namespace = { + '__builtins__': bi, # Modules and classes 'pyocd': pyocd, 'exceptions': exceptions, @@ -690,3 +702,33 @@ def populate_command_class(ns: Dict[str, Any]) -> None: # and Python expression commands. return fn return _command_decorator + +class PrintProxy: + """@brief Proxy for print() that can be retargeted to different functions. + + When the object is created, the target function is initially the real print(). This can be changed by calling + `set_target()`. + + To simplify requirements of the target function when it isn't the real print(), all positional parameters are + converted to strings and joined with spaces. The target function is then called with a single string argument + plus any keyword arguments. + """ + _target: Callable = print + + def set_target(self, new_target: Callable) -> None: + self._target = new_target + + def __call__(self, *args: Any, **kwds: Any) -> None: + # Convert all args to strings and concatenate, to simplify requirements of the target function + # when it isn't the real print(). + combined_args = " ".join(str(a) for a in args) + self._target(combined_args, **kwds) + + @contextmanager + def push_target(self, new_target: Callable) -> Generator: + save_target = self._target + try: + self._target = new_target + yield + finally: + self._target = save_target From 1e7e6275bd4140f1f84e8882696d15e31871bebd Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 28 Dec 2021 16:29:29 -0600 Subject: [PATCH 093/123] commands: help: fix addendum regarding register name commands. --- pyocd/commands/commands.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyocd/commands/commands.py b/pyocd/commands/commands.py index 84d4b360c..a52cb644a 100755 --- a/pyocd/commands/commands.py +++ b/pyocd/commands/commands.py @@ -1561,8 +1561,7 @@ class HelpCommand(CommandBase): } HELP_ADDENDUM = """ -All register names are also available as commands that print the register's value. -Any ADDR or LEN argument will accept a register name. +Any integer argument will accept a register name. Prefix line with $ to execute a Python expression. Prefix line with ! to execute a shell command.""" From 565788e0528cf7d6dc863e00aa69254f2417a084 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 29 Dec 2021 12:12:49 -0600 Subject: [PATCH 094/123] docs: user_scripts: cleanup; reformat script functions; add user defined functions. --- docs/user_scripts.md | 353 +++++++++++++++++++++++++++++++------------ 1 file changed, 253 insertions(+), 100 deletions(-) diff --git a/docs/user_scripts.md b/docs/user_scripts.md index d89e4789e..10cd4ffcf 100644 --- a/docs/user_scripts.md +++ b/docs/user_scripts.md @@ -15,12 +15,6 @@ automatically detect it and load it as a user script. If you prefer another name line argument. If a relative path is set either with the option or command line, it will be searched for in the project directory. -The arguments for hook functions defined in user scripts are the same arguments accepted by delegate -methods. However, all arguments to user script functions are optional. If provided, the argument -names must match the specification. But you can specify arguments in any order, and exclude any or -all arguments if they are not needed. In fact, most arguments are not required because the same -objects are available as script globals, for instance `board` and `target`. - ## Examples @@ -31,11 +25,11 @@ This example user script shows how to add a new memory region. def will_connect(board): # Create the new ROM region for the FICR. - ficr = pyocd.core.memory_map.RomRegion( - name="ficr", - start=0x10000000, - length=0x460 - ) + ficr = RomRegion( + name="ficr", + start=0x10000000, + length=0x460 + ) # Add the FICR region to the memory map. target.memory_map.add_region(ficr) @@ -55,20 +49,65 @@ def will_connect(): extFlash.flm = "MIMXRT105x_QuadSPI_4KB_SEC.FLM" ``` -This example demonstrates setting the DBGMCU_CR register on reset for STM32 devices. +This example demonstrates setting the DBGMCU_CR register after connecting for STM32 devices. ```py # This example applies to the ST STM32L0x1 devicess. DBG_CR = 0x40015804 -def did_reset(): +def did_connect(): # Set STANDBY, STOP, and SLEEP bits all to 1. target.write32(DBG_CR, 0x7) ``` Another common use for a script is to initialize external memory such as SDRAM. + +## User-defined commands + +New commands accessible from the commander subcommand or gdbserver monitor commands can be easily created in a user +script. + +User defined commands are created by using the `@command()` decorator on a function. The name of the new command can +either be the same as the name of the decorated function, or can be set explicitly with a `name` (or first positional) +argument to the decorator. For instance, either `@command('mycmd')` or `@command(name='anothercmd')`. Note that the +decorator requires parentheses (it must be called as a function) even if there are no parameters. + +Parameters for the new command are automatically determined using introspection and type annotations. Arguments for +parameters of these types are converted to the appropriate type before the function is called as a command. + +Supported parameter types: +- `int` +- `float` +- `str` +- Variable arguments, e.g. `*args`. + +Keyword parameters are not allowed. + +An `int` parameter is converted using the same method as for other pyOCD commands. Hexadecimal and binary numbers are +allowed, digits can be separated by underscores, and so on. For variable arguments, type annotations are ignored and the +tuple passed to the function will contain strings as entered in the command invocation. + +The decorated function remains accessible as a regular function in the user script namespace, and is therefore callable +from other functions within the user script. This is true even if the function definition is not compatible with the +command decorator, for instance if it has invalid parameter types. + +Help for the new command can be specified by passing a `help` argument to the `@command` decorator. + +Example: + +```py +@command(help="Decode and print the first few vectors") +def vectable(base: int): + vecs = target.read_memory_block32(base, 4) + print(f"Initial SP: {vecs[0]:#010x}") + print(f"ResetHandler: {vecs[1]:#010x}") + print(f"NMI: {vecs[2]:#010x}") + print(f"HardFault: {vecs[3]:#010x}") +``` + + ## Script globals A number of useful symbols are made available in the global namespace of user scripts. These include @@ -81,11 +120,15 @@ both target related objects, as well as parts of the pyOCD Python API. | `aps` | Dictionary of CoreSight Access Port (AP) objects. The keys are the APSEL value. | | `board` | The `Board` object. | | `BreakpointType` | Enumeration of breakpoint types. | +| `command` | Decorator for defining new commands. See [user-defined commands](#user_defined_commands) for details. | | `DeviceRegion` | Device-type memory region class. | +| `debug` | Log a debug message. | | `dp` | The CoreSight Debug Port (DP) object. | | `Error` | The base class for all pyOCD exceptions. | | `Event` | Enumeration of notification event types. | | `exceptions` | Module containing the exception classes. | +| `error` | Output an error log. | +| `info` | Output an info-level log message. | | `FileProgrammer` | Utility class to program files to target flash. | | `FlashEraser` | Utility class to erase target flash. | | `FlashLoader` | Utility class to program raw binary data to target flash. | @@ -109,128 +152,238 @@ both target related objects, as well as parts of the pyOCD Python API. | `TransferError` | Exception class for all transfer errors. | | `TransferFaultError` | Exception subclass of `TransferError` for bus faults. | | `VectorCatch` | Namespace class containing bit mask constants for vector catch options. | +| `warning` | Log a warning. | | `WatchpointType` | Enumeration of watchpoint types. | -## Script functions +## Delegate functions + +This section documents all functions that user scripts can provide to modify pyOCD's behaviour. Some are simply +notifications, while others allow for overriding of default behaviour. Collectively, these are called delegate functions. + +All parameters of script delegate functions are optional. Parameters can be declared in any order, and that are not +needed can be excluded. In fact, most parameters are not necessary because the same objects are available as script +globals, for instance `session` and `target`. + +Those parameters that are present must have names matching the specification below, and there must not be unspecified, +required parameters (those without a default value). (Extra optional parameters are allowed but will never be passed any +value other than the default, unless you call the function yourself from within the script.) + + +### will_connect + +Pre-init notification for the board. +``` +will_connect(board: Board) -> None +``` + +**Parameters** \ +*board* - A `Board` instance that is about to be initialized. \ +**Result** \ +Ignored. + +### did_connect + +Post-initialization notification for the board. +``` +did_connect(board: Board) -> None +``` + +**Parameters** \ +*board* - A `Board` instance. \ +**Result** \ +Ignored. + +### will_init_target + +Hook to review and modify init call sequence prior to execution. +``` +will_init_target(target: SoCTarget, init_sequence: CallSequence) -> None +``` + +**Parameters** \ +*target* - An `SoCTarget` object about to be initialized. \ +*init_sequence* - The `CallSequence` that will be invoked. Because call sequences are + mutable, this parameter can be modified before return to change the init calls. \ +**Result** \ +Ignored. + +### did_init_target -This section documents all functions that user scripts can provide to modify pyOCD's behaviour. +Post-initialization notification. +``` +did_init_target(target: SoCTarget) -> None +``` -- `will_connect(board)`
- Pre-init hook for the board. +**Parameters** \ +*target* - An `SoCTarget` object. \ +**Result** \ +Ignored. - *board* - A `Board` instance that is about to be initialized.
- **Result** - Ignored. +### will_start_debug_core -- `did_connect(board)`
- Post-initialization hook for the board. +Hook to enable debug for the given core. +``` +will_start_debug_core(core: CoreTarget) -> Optional[bool] +``` - *board* - A `Board` instance.
- **Result** - Ignored. +**Parameters** \ +*core* - A `CoreTarget` object about to be initialized. \ +**Result** \ +*True* Do not perform the normal procedure to start core debug. \ +*False/None* Continue with normal behaviour. -- `will_init_target(target, init_sequence)`
- Hook to review and modify init call sequence prior to execution. +### did_start_debug_core - *target* - A `CoreSightTarget` object about to be initialized.
- *init_sequence* - The `CallSequence` that will be invoked. Because call sequences are - mutable, this parameter can be modified before return to change the init calls.
- **Result** - Ignored. +Post-initialization hook. +``` +did_start_debug_core(core: CoreTarget) -> None +``` -- `did_init_target(target)`
- Post-initialization hook. +**Parameters** \ +*core* - A `CoreTarget` object. \ +**Result** \ +Ignored. - *target* - Either a `CoreSightTarget` or `CortexM` object.
- **Result** - Ignored. +### will_stop_debug_core -- `will_start_debug_core(core)`
- Hook to enable debug for the given core. +Pre-cleanup hook for the core. +``` +will_stop_debug_core(core: CoreTarget) -> Optional[bool] +``` - *core* - A `CortexM` object about to be initialized.
- **Result** - *True* Do not perform the normal procedure to start core debug. \ - *False/None* Continue with normal behaviour. +**Parameters** \ +*core* - A `CoreTarget` object. \ +**Result** \ +*True* Do not perform the normal procedure to disable core debug. \ +*False/None* Continue with normal behaviour. -- `did_start_debug_core(core)`
- Post-initialization hook. +### did_stop_debug_core - *core* - A `CortexM` object.
- **Result** - Ignored. +Post-cleanup notification for the core. +``` +did_stop_debug_core(core: CoreTarget) -> None +``` -- `will_stop_debug_core(core)`
- Pre-cleanup hook for the core. +**Parameters** \ +*core* - A `CoreTarget` object. \ +**Result** \ +Ignored. - *core* - A `CortexM` object.
- **Result** - *True* Do not perform the normal procedure to disable core debug. \ - *False/None* Continue with normal behaviour. +### will_disconnect -- `did_stop_debug_core(core)`
- Post-cleanup hook for the core. +Pre-disconnect notification. +``` +will_disconnect(target: SoCTarget, resume: bool) -> None +``` - *core* - A `CortexM` object.
- **Result** - Ignored. +**Parameters** \ +*target* - An `SoCTarget` object. \ +*resume* - The value of the `disconnect_on_resume` option. \ +**Result** \ +Ignored. -- `will_disconnect(target, resume)`
- Pre-disconnect hook. +### did_disconnect - *target* - Either a `CoreSightTarget` or `CortexM` object.
- *resume* - The value of the `disconnect_on_resume` option.
- **Result** - Ignored. +Post-disconnect notification. +``` +did_disconnect(target: SoCTarget, resume: bool) -> None +``` -- `did_disconnect(target, resume)`
- Post-disconnect hook. +**Parameters** \ +*target* - An `SoCTarget` object. \ +*resume* - The value of the `disconnect_on_resume` option. \ +**Result** \ +Ignored. - *target* - Either a `CoreSightTarget` or `CortexM` object.
- *resume* - The value of the `disconnect_on_resume` option.
- **Result** - Ignored. +### will_reset -- `will_reset(core, reset_type)`
- Pre-reset hook. +``` +will_reset(core: CoreTarget, reset_type: Target.ResetType) -> Optional[bool] +``` +Pre-reset hook. - *core* - A CortexM instance.
- *reset_type* - One of the `Target.ResetType` enumerations.
- **Result** - *True* The hook performed the reset. \ - *False/None* Caller should perform the normal - reset procedure. +**Parameters** \ +*core* - A `CoreTarget` instance. \ +*reset_type* - One of the `Target.ResetType` enumerations. \ +**Result** \ +*True* The hook performed the reset. \ +*False/None* Caller should perform the normal reset procedure. -- `did_reset(core, reset_type)`
- Post-reset hook. +### did_reset - *core* - A CortexM instance.
- *reset_type* - One of the `Target.ResetType` enumerations.
- **Result** - Ignored. +Post-reset notification. +``` +did_reset(core: CoreTarget, reset_type: Target.ResetType) -> None +``` -- `set_reset_catch(core, reset_type)`
- Hook to prepare target for halting on reset. +**Parameters** \ +*core* - A `CoreTarget` instance. \ +*reset_type* - One of the `Target.ResetType` enumerations. \ +**Result** \ +Ignored. - *core* - A CortexM instance.
- *reset_type* - One of the `Target.ResetType` enumerations.
- **Result** - *True* This hook handled setting up reset catch, caller should do nothing. \ - *False/None* Perform the default reset catch set using vector catch. +### set_reset_catch -- `clear_reset_catch(core, reset_type)`
- Hook to clean up target after a reset and halt. +Hook to prepare target for halting on reset. +``` +set_reset_catch(core: CoreTarget, reset_type: Target.ResetType) -> Optional[bool] +``` - *core* - A `CortexM` instance.
- *reset_type* - One of the `Target.ResetType` enumerations.
- **Result** - Ignored. +**Parameters** \ +*core* - A `CoreTarget` instance. \ +*reset_type* - One of the `Target.ResetType` enumerations. \ +**Result** \ +*True* This hook handled setting up reset catch, caller should do nothing. \ +*False/None* Perform the default reset catch set using vector catch. -- `mass_erase(target)`
- Hook to override mass erase. +### clear_reset_catch - *target* - A `CoreSightTarget` object.
- **Result** - *True* Indicate that mass erase was performed by the hook. \ - *False/None* Mass erase was not overridden and the caller should proceed with the - standard mass erase procedure. +Hook to clean up target after a reset and halt. +``` +clear_reset_catch(core: CoreTarget, reset_type: Target.ResetType) -> None +``` -- `trace_start(self, target, mode)`
- Hook to prepare for tracing the target. +**Parameters** \ +*core* - A `CoreTarget` instance. \ +*reset_type* - One of the `Target.ResetType` enumerations. \ +**Result** \ +Ignored. + +### mass_erase + +Hook to override mass erase. +``` +mass_erase(target: SoCTarget) -> Optional[bool] +``` - *target* - A CoreSightTarget object.
- *mode* - The trace mode. Currently always 0 to indicate SWO.
- *Result* - Ignored. +**Parameters** \ +*target* - An `SoCTarget` object. \ +**Result** \ +*True* Indicate that mass erase was performed by the hook. \ +*False/None* Mass erase was not overridden and the caller should proceed with the standard mass erase procedure. -- `trace_stop(self, target, mode)`
- Hook to clean up after tracing the target. +### trace_start + +Notification to prepare for tracing the target. +``` +trace_start(target: SoCTarget, mode: int) -> None +``` + +**Parameters** \ +*target* - A `CoreSightTarget` object. \ +*mode* - The trace mode. Currently always 0 to indicate SWO. \ +*Result* - Ignored. + +### trace_stop + +Notification to clean up after tracing the target. +``` +trace_stop(target: SoCTarget, mode: int) -> None +``` - *target* - A CoreSightTarget object.
- *mode* - The trace mode. Currently always 0 to indicate SWO.
- *Result* - Ignored. +**Parameters** \ +*target* - A `CoreSightTarget` object. \ +*mode* - The trace mode. Currently always 0 to indicate SWO. \ +**Result** \ +Ignored. From cee177912b037ae78a1280b45890ddac32e6a321 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 29 Dec 2021 14:32:22 -0600 Subject: [PATCH 095/123] lgtm: exclude import and sensitive data logging errors. py/import-and-import-from: This query has been triggered falsely a few times recently. py/clear-text-logging-sensitive-data: Triggered by probe UID from command line appearing in log output. --- .lgtm.yml | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/.lgtm.yml b/.lgtm.yml index 7be4bf3fc..4893b0519 100644 --- a/.lgtm.yml +++ b/.lgtm.yml @@ -3,7 +3,7 @@ path_classifiers: - scripts/generate_command_help.py test: # Mark all code under the src/ directory as test related. - # Really, the crc analyzer is not for test, but LGTL can't build embedded. + # Really, the crc analyzer is not for test, but LGTM can't build embedded. - src - exclude: src/analyzer @@ -21,4 +21,32 @@ queries: # # In some cases the 'while' block will exit with only a 'return', so the 'else' isn't really # necessary, but retaining it maintains the expected pattern. - - exclude: py/redundant-else + - exclude: "py/redundant-else" + + # Exclude cases where there is an import of the same module via 'import foo' and 'from foo import ...'. + # The query for this is too general and catches a lot of cases that are not really errors, especially + # with includes of types for annotations. + # + # Example 1: + # + # import pyocd + # from pyocd import debug + # + # These two imports trigger this query, even though it's pretty clearly a reasonable usage. + # + # Example 2: + # + # if TYPE_CHECKING: + # import types + # + # def foo(): + # import types + # + # In this case, types is only needed within foo() at runtime and is not imported globally to reduce + # namespace pollution and import times. But when type checking it's types is also imported. The query + # doesn't see the TYPE_CHECKING predicate and triggers. + - exclude: "py/import-and-import-from" + + # This query triggers on logging of the probe's UID as specified on the command line in cleartext. + # Clearly the UID is not sensitive data, at least in the traditional sense. + - exclude: "py/clear-text-logging-sensitive-data" From 24ee217fb0d43970a8a5eea1cec946d8f50116cf Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 29 Dec 2021 15:09:17 -0600 Subject: [PATCH 096/123] setup.cfg: add flake8 config to exclude functional test scripts. --- setup.cfg | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/setup.cfg b/setup.cfg index 2d4dff981..e927e448d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -98,3 +98,12 @@ pyocd.rtos = rtx5 = pyocd.rtos.rtx5:RTX5Plugin threadx = pyocd.rtos.threadx:ThreadXPlugin zephyr = pyocd.rtos.zephyr:ZephyrPlugin + +[flake8] +exclude = + # Ignore the test user script since it uses globals not available to flake8, and will thus generate + # many failures. + test_user_script.py, + # Ignore gdb test script for similar reasons. + gdb_test_script.py + From 980f7c4ba30fc53603f97dab7fef966b0b64c5f3 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 29 Dec 2021 14:35:36 -0600 Subject: [PATCH 097/123] probe: j-link: fix SWO methods. The SWO methods were completely broken due to using an incorrect attribute name. --- pyocd/probe/jlink_probe.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyocd/probe/jlink_probe.py b/pyocd/probe/jlink_probe.py index fec97bacf..b41bcd594 100644 --- a/pyocd/probe/jlink_probe.py +++ b/pyocd/probe/jlink_probe.py @@ -338,19 +338,19 @@ def write_ap_multiple(self, addr, values): def swo_start(self, baudrate): try: - self._jlink.swo_start(baudrate) + self._link.swo_start(baudrate) except JLinkException as exc: raise self._convert_exception(exc) from exc def swo_stop(self): try: - self._jlink.swo_stop() + self._link.swo_stop() except JLinkException as exc: raise self._convert_exception(exc) from exc def swo_read(self): try: - return self._jlink.swo_read(0, self._jlink.swo_num_bytes(), True) + return self._link.swo_read(0, self._link.swo_num_bytes(), True) except JLinkException as exc: raise self._convert_exception(exc) from exc From 31cd7d310982ab0fa45cce5790024aa01752cd25 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 1 Jan 2022 22:24:41 +0000 Subject: [PATCH 098/123] probe: j-link: fix is_reset_asserted(). (#1290) Incorrectly accessing the JLink.hardware_status property as a callable method. --- pyocd/probe/jlink_probe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyocd/probe/jlink_probe.py b/pyocd/probe/jlink_probe.py index b41bcd594..cdcebb957 100644 --- a/pyocd/probe/jlink_probe.py +++ b/pyocd/probe/jlink_probe.py @@ -279,7 +279,7 @@ def assert_reset(self, asserted): def is_reset_asserted(self): try: - status = self._link.hardware_status() + status = self._link.hardware_status return status.tres == 0 except JLinkException as exc: raise self._convert_exception(exc) from exc From 8c998503e382336b2ee3af7cc941e28b6adbb895 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 1 Jan 2022 22:31:58 +0000 Subject: [PATCH 099/123] Cortex-M: fix regression with writing CFBP and xPSR subregisters. (#1289) * coresight: cortex-m: fix regression with writing cfbp and psr subregisters. * test: cortex_test: verify writing of almost all core registers. Only a few core register variants are not tested. --- pyocd/coresight/cortex_m.py | 2 ++ test/cortex_test.py | 71 +++++++++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 6 deletions(-) diff --git a/pyocd/coresight/cortex_m.py b/pyocd/coresight/cortex_m.py index 676beaac6..3151337e1 100644 --- a/pyocd/coresight/cortex_m.py +++ b/pyocd/coresight/cortex_m.py @@ -1189,8 +1189,10 @@ def _base_write_core_registers_raw(self, reg_list, data_list): reg_data_list += [(-reg, singleLow), (-reg + 1, singleHigh)] elif CortexMCoreRegisterInfo.get(reg).is_cfbp_subregister and cfbpValue is None: cfbpValue = self._base_read_core_registers_raw([CortexMCoreRegisterInfo.get('cfbp').index])[0] + reg_data_list.append((reg, data)) elif CortexMCoreRegisterInfo.get(reg).is_psr_subregister and xpsrValue is None: xpsrValue = self._base_read_core_registers_raw([CortexMCoreRegisterInfo.get('xpsr').index])[0] + reg_data_list.append((reg, data)) else: # Other register, just copy directly. reg_data_list.append((reg, data)) diff --git a/test/cortex_test.py b/test/cortex_test.py index c9c9365ed..67fb6a00c 100644 --- a/test/cortex_test.py +++ b/test/cortex_test.py @@ -410,16 +410,75 @@ def reset_methods(fnc): target.write_core_registers_raw(['s0', 's1'], origRegs) print("Verify that all listed core registers can be accessed") + + def test_reg_rw(r, new_value: int, test_write: bool) -> bool: + did_pass = True + try: + # Read original value. + original_val = target.read_core_register_raw(r.name) + + if not test_write: + return did_pass + + # Make sure the new value changes. + if new_value == original_val: + new_value = 0 + + # Change the value. + target.write_core_register_raw(r.name, new_value) + read_val = target.read_core_register_raw(r.name) + if read_val != new_value: + print(f"Failed to change value of register {r.name} to {new_value:#x}; read {read_val:#x}") + did_pass = False + + target.write_core_register_raw(r.name, original_val) + read_val = target.read_core_register_raw(r.name) + if read_val != original_val: + print(f"Failed to restore value of register {r.name} back to original {original_val:#x}; read {read_val:#x}") + did_pass = False + except exceptions.CoreRegisterAccessError: + did_pass = False + return did_pass + reg_count = 0 passed_reg_count = 0 for r in target.selected_core.core_registers.as_set: - try: - reg_count += 1 - val = target.read_core_register(r.name) - target.write_core_register(r.name, val) + test_write = True + + # Decide on a new value, ensuring it changes and taking into account register specifics. + r_mask = (1 << r.bitsize) - 1 + if 'sp' in r.name: + r_mask &= ~0x3 + elif r.name == 'pc': + r_mask &= ~0x1 + elif 'xpsr' in r.name: + r_mask = 0xd0000000 + elif 'control' == r.name: + # SPSEL is available on all cores. + r_mask = 0x2 + elif r.name in ('primask', 'faultmask'): + r_mask = 0x1 + elif r.name == 'basepri': + r_mask = 0x80 + elif r.name == 'fpscr': + # v7-M bits + r_mask = 0xf7c0009f + new_value = 0xdeadbeef & r_mask + + # Skip write tests on some regs: + # - combined CFBP + # - PSR variants not including XPSR + # - all _NS and _S variants + if ((r.name in ('cfbp',)) + or (('psr' in r.name) and (r.name != 'xpsr')) + or ('_ns' in r.name) or ('_s' in r.name) + ): + test_write = False + + reg_count += 1 + if test_reg_rw(r, new_value, test_write): passed_reg_count += 1 - except exceptions.CoreRegisterAccessError: - pass + test_count += 1 if passed_reg_count == reg_count: test_pass_count += 1 From b64b7fcc3fadb720e618e81f3cc692e2fe2b5165 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 3 Jan 2022 00:41:54 +0000 Subject: [PATCH 100/123] targets: MAX32630: correct RAM size to 512 KiB. (#1292) --- pyocd/target/builtin/target_MAX32630.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyocd/target/builtin/target_MAX32630.py b/pyocd/target/builtin/target_MAX32630.py index 509710c03..f9016ed8d 100644 --- a/pyocd/target/builtin/target_MAX32630.py +++ b/pyocd/target/builtin/target_MAX32630.py @@ -63,7 +63,7 @@ class MAX32630(CoreSightTarget): MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x200000, blocksize=0x2000, is_boot_memory=True, algo=FLASH_ALGO), - RamRegion( start=0x20000000, length=0x40000), + RamRegion( start=0x20000000, length=0x80000), ) def __init__(self, session): From 25772f7a77a8ec8b416d9f457afba0b5bb70f153 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 3 Jan 2022 00:42:14 +0000 Subject: [PATCH 101/123] Test: gdb test script improvements (#1291) * test: gdb_test: handle halting in a frame without a function. * test: gdb_test: shorter timeout for gdbserver exiting. --- test/gdb_test.py | 5 ++--- test/gdb_test_script.py | 24 ++++++++++++++++++------ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/test/gdb_test.py b/test/gdb_test.py index 3bb66be4b..ea1dbdf9b 100644 --- a/test/gdb_test.py +++ b/test/gdb_test.py @@ -30,7 +30,6 @@ Popen, STDOUT, PIPE, - check_output, ) import argparse import logging @@ -39,7 +38,6 @@ from pyocd.__main__ import PyOCDTool from pyocd.core.helpers import ConnectHelper -from pyocd.utility.compatibility import to_str_safe from pyocd.core.memory_map import MemoryType from pyocd.flash.file_programmer import FileProgrammer from test_util import ( @@ -63,6 +61,7 @@ PYTHON_GDB = "arm-none-eabi-gdb-py" TEST_TIMEOUT_SECONDS = 60.0 * 5 +SERVER_EXIT_TIMEOUT = 10.0 GDB_SCRIPT_PATH = os.path.join(TEST_DIR, "gdb_test_script.py") @@ -170,7 +169,7 @@ def test_gdb(board_id=None, n=0): LOG.info('Waiting for gdb to finish...') did_complete = wait_with_deadline(gdb_program, TEST_TIMEOUT_SECONDS) LOG.info('Waiting for server to finish...') - server_thread.join(timeout=TEST_TIMEOUT_SECONDS) + server_thread.join(timeout=SERVER_EXIT_TIMEOUT) if not did_complete: LOG.error("Test timed out!") if server_thread.is_alive(): diff --git a/test/gdb_test_script.py b/test/gdb_test_script.py index 19d9499f6..82019c66e 100644 --- a/test/gdb_test_script.py +++ b/test/gdb_test_script.py @@ -345,10 +345,16 @@ def run_test(): if not is_event_breakpoint(event, breakpoint): fail_count += 1 print("Error - breakpoint 1 test failed") - func_name = gdb.selected_frame().function().name - if rmt_func != func_name: + func = gdb.selected_frame().function() + if func is None: fail_count += 1 - print("ERROR - break occurred at wrong function %s" % func_name) + print("ERROR - selected frame has no function!?") + print(gdb.selected_frame()) + else: + func_name = func.name + if rmt_func != func_name: + fail_count += 1 + print("ERROR - break occurred at wrong function %s" % func_name) breakpoint.delete() gdb_execute("set var run_breakpoint_test = 0") @@ -371,10 +377,16 @@ def run_test(): # if not is_event_breakpoint(event): # fail_count += 1 # print("Error - breakpoint 2 test failed") - func_name = gdb.selected_frame().function().name - if rmt_func != func_name and not ignore_hw_bkpt_result: + func = gdb.selected_frame().function() + if func is None: fail_count += 1 - print("ERROR - break occurred at wrong function %s" % func_name) + print("ERROR - selected frame has no function!?") + print(gdb.selected_frame()) + else: + func_name = func.name + if rmt_func != func_name and not ignore_hw_bkpt_result: + fail_count += 1 + print("ERROR - break occurred at wrong function %s" % func_name) gdb_execute("clear %s" % rmt_func) gdb_execute("set var run_breakpoint_test = 0") From a5eb116bd7ea5248971ad7b3d8799291056f1653 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 3 Jan 2022 23:07:19 +0000 Subject: [PATCH 102/123] Test: automated test supports remote probes (#1293) * probe: tcp_probe_server: flush the probe on client disconnect. This is intended to prevent an immediate error on the first probe operation of a new connection, for those probes that have deferred operation support. * probe: tcp_client_probe: unique_id property is full probe address. * test: automated_test: allow remote probes to be specified by -b args. --- pyocd/probe/tcp_client_probe.py | 2 +- pyocd/probe/tcp_probe_server.py | 6 ++++++ test/automated_test.py | 3 +++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pyocd/probe/tcp_client_probe.py b/pyocd/probe/tcp_client_probe.py index c0d0f37b0..18aea67cb 100644 --- a/pyocd/probe/tcp_client_probe.py +++ b/pyocd/probe/tcp_client_probe.py @@ -79,8 +79,8 @@ def get_probe_with_id(cls, unique_id, is_explicit=False): def __init__(self, unique_id): """! @brief Constructor.""" super(TCPClientProbe, self).__init__() - self._uid = unique_id hostname, port = self._extract_address(unique_id) + self._uid = f"remote:{hostname}:{port}" self._socket = ClientSocket(hostname, port) self._is_open = False self._request_id = 0 diff --git a/pyocd/probe/tcp_probe_server.py b/pyocd/probe/tcp_probe_server.py index 62f84f774..ed07f053f 100644 --- a/pyocd/probe/tcp_probe_server.py +++ b/pyocd/probe/tcp_probe_server.py @@ -258,6 +258,12 @@ def setup(self): def finish(self): LOG.info("Remote probe client disconnected (%s from port %i)", self._client_domain, self.client_address[1]) + # Flush the probe and ignore any lingering errors. + try: + self._session.probe.flush() + except exceptions.Error as err: + LOG.debug("exception while flushing probe on disconnect: %s", err) + self._session = None StreamRequestHandler.finish(self) diff --git a/test/automated_test.py b/test/automated_test.py index bc442432c..9864e7d7c 100755 --- a/test/automated_test.py +++ b/test/automated_test.py @@ -380,7 +380,10 @@ def main(): # Filter boards. if args.board: + # Get the full unique ID of any matching probes. board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())] + # Add in any requested remotes. + board_id_list += [a for a in args.board if a.startswith('remote:')] # Generate board test configs. test_configs = [ From cb1ef41f93c09e778fc8d3990f6b41725c3fe5f7 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 4 Jan 2022 18:07:57 +0000 Subject: [PATCH 103/123] test: test_semihosting: fix issues; restore telnet tests. (#1294) - Fix unbound name error if no probe is available. - Uncomment semihosting telnet server tests and fix them. - Add is_running property to StreamServer for use in the tests. --- pyocd/utility/server.py | 7 ++ test/unit/test_semihosting.py | 171 ++++++++++++++++++---------------- 2 files changed, 97 insertions(+), 81 deletions(-) diff --git a/pyocd/utility/server.py b/pyocd/utility/server.py index 695c90947..fc34a6663 100644 --- a/pyocd/utility/server.py +++ b/pyocd/utility/server.py @@ -66,6 +66,7 @@ def __init__(self, port, serve_local_only=True, name=None, is_read_only=True, ex self._buffer_lock = threading.Lock() self.connected = None self._shutdown_event = threading.Event() + self._is_running: bool = False self.daemon = True self.start() @@ -73,6 +74,10 @@ def __init__(self, port, serve_local_only=True, name=None, is_read_only=True, ex def port(self): return self._port + @property + def is_running(self) -> bool: + return self._is_running + def stop(self): self._shutdown_event.set() self.join() @@ -82,6 +87,7 @@ def run(self): (" (%s)" % self._extra_info) if self._extra_info else "") self.connected = None try: + self._is_running = True while not self._shutdown_event.is_set(): # Wait for a client to connect. # TODO support multiple client connections @@ -115,6 +121,7 @@ def run(self): except socket.timeout: pass finally: + self._is_running = False self._abstract_socket.cleanup() LOG.info("%sserver stopped", self._formatted_name) diff --git a/test/unit/test_semihosting.py b/test/unit/test_semihosting.py index d99658fcd..93d0f41da 100644 --- a/test/unit/test_semihosting.py +++ b/test/unit/test_semihosting.py @@ -26,10 +26,11 @@ from pyocd.core.helpers import ConnectHelper from pyocd.core.target import Target from pyocd.debug import semihost +from pyocd.utility.server import StreamServer @pytest.fixture(scope='module') def tgt(request): - board = None + session = None try: session = ConnectHelper.session_with_chosen_probe(blocking=False, return_first=True) except Exception as error: @@ -38,15 +39,15 @@ def tgt(request): pytest.skip("No probe present") return session.open() - board = session.board session.options['resume_on_disconnect'] = False - board.target.reset_and_halt() + assert session.target + session.target.reset_and_halt() - def cleanup(): - board.uninit() + def close_session(): + session.close() - request.addfinalizer(cleanup) - return board.target + request.addfinalizer(close_session) + return session.target @pytest.fixture(scope='module') def ctx(tgt): @@ -472,80 +473,88 @@ def test_istty_non_stdio(self, semihost_builder, delete_testfile): result = semihost_builder.do_close(fd) assert result == 0 -# @pytest.fixture(scope='function') -# def telnet(request): -# telnet = semihost.TelnetSemihostIOHandler(4444) -# def stopit(): -# telnet.stop() -# request.addfinalizer(stopit) -# return telnet -# -# @pytest.fixture(scope='function') -# def semihost_telnet_agent(ctx, telnet, request): -# agent = semihost.SemihostAgent(ctx, console=telnet) -# def cleanup(): -# agent.cleanup() -# request.addfinalizer(cleanup) -# return agent -# -# @pytest.fixture(scope='function') -# def semihost_telnet_builder(tgt, semihost_telnet_agent, ramrgn): -# return SemihostRequestBuilder(tgt, semihost_telnet_agent, ramrgn) -# -# @pytest.fixture(scope='function') -# def telnet_conn(request): -# from time import sleep -# # Sleep for a bit to ensure the semihost telnet server has started up in its own thread. -# sleep(0.25) -# telnet = telnetlib.Telnet('localhost', 4444, 10.0) -# def cleanup(): -# telnet.close() -# request.addfinalizer(cleanup) -# return telnet -# -# class TestSemihostingTelnet: -# def test_connect(self, semihost_telnet_builder, telnet_conn): -# result = semihost_telnet_builder.do_no_args_call(semihost.TARGET_SYS_ERRNO) -# assert result == 0 -# -# def test_write(self, semihost_telnet_builder, telnet_conn): -# result = semihost_telnet_builder.do_write(semihost.STDOUT_FD, 'hello world') -# assert result == 0 -# -# index, _, text = telnet_conn.expect(['hello world']) -# assert index != -1 -# assert text == 'hello world' -# -# def test_writec(self, semihost_telnet_builder, telnet_conn): -# for c in 'xyzzy': -# result = semihost_telnet_builder.do_writec(c) -# assert result == 0 -# -# index, _, text = telnet_conn.expect([c]) -# assert index != -1 -# assert text == c -# -# def test_write0(self, semihost_telnet_builder, telnet_conn): -# result = semihost_telnet_builder.do_write0('hello world') -# assert result == 0 -# -# index, _, text = telnet_conn.expect(['hello world']) -# assert index != -1 -# assert text == 'hello world' -# -# def test_read(self, semihost_telnet_builder, telnet_conn): -# telnet_conn.write('hello world') -# -# result, data = semihost_telnet_builder.do_read(semihost.STDIN_FD, 11) -# assert result == 0 -# assert data == 'hello world' -# -# def test_readc(self, semihost_telnet_builder, telnet_conn): -# telnet_conn.write('xyz') -# -# for c in 'xyz': -# rc = semihost_telnet_builder.do_no_args_call(semihost.TARGET_SYS_READC) -# assert chr(rc) == c +@pytest.fixture(scope='function') +def telnet_server(request): + telnet_server = StreamServer( + 0, # port 0 to automatically allocate a free port + True, # local only + "Semihost", # name + False, # is read only + extra_info="test" + ) + def stopit(): + telnet_server.stop() + request.addfinalizer(stopit) + return telnet_server + +@pytest.fixture(scope='function') +def semihost_telnet_agent(ctx, telnet_server, request): + semihost_console = semihost.ConsoleIOHandler(telnet_server) + agent = semihost.SemihostAgent(ctx, console=semihost_console) + def cleanup(): + agent.cleanup() + request.addfinalizer(cleanup) + return agent + +@pytest.fixture(scope='function') +def semihost_telnet_builder(tgt, semihost_telnet_agent, ramrgn): + return SemihostRequestBuilder(tgt, semihost_telnet_agent, ramrgn) + +@pytest.fixture(scope='function') +def telnet_conn(request, telnet_server): + from time import sleep + # Sleep for a bit to ensure the semihost telnet server has started up in its own thread. + while not telnet_server.is_running: + sleep(0.005) + telnet = telnetlib.Telnet('localhost', telnet_server.port, 10.0) + def cleanup(): + telnet.close() + request.addfinalizer(cleanup) + return telnet + +class TestSemihostingTelnet: + def test_connect(self, semihost_telnet_builder, telnet_conn): + result = semihost_telnet_builder.do_no_args_call(semihost.TARGET_SYS_ERRNO) + assert result == 0 + + def test_write(self, semihost_telnet_builder, telnet_conn): + result = semihost_telnet_builder.do_write(semihost.STDOUT_FD, b'hello world') + assert result == 0 + + index, _, text = telnet_conn.expect([b'hello world']) + assert index != -1 + assert text == b'hello world' + + def test_writec(self, semihost_telnet_builder, telnet_conn): + for c in (bytes([i]) for i in b'xyzzy'): + result = semihost_telnet_builder.do_writec(c) + assert result == 0 + + index, _, text = telnet_conn.expect([c]) + assert index != -1 + assert text == c + + def test_write0(self, semihost_telnet_builder, telnet_conn): + result = semihost_telnet_builder.do_write0(b'hello world') + assert result == 0 + + index, _, text = telnet_conn.expect([b'hello world']) + assert index != -1 + assert text == b'hello world' + + def test_read(self, semihost_telnet_builder, telnet_conn): + telnet_conn.write(b'hello world') + + result, data = semihost_telnet_builder.do_read(semihost.STDIN_FD, 11) + assert result == 0 + assert data == b'hello world' + + def test_readc(self, semihost_telnet_builder, telnet_conn): + telnet_conn.write(b'xyz') + + for c in 'xyz': + rc = semihost_telnet_builder.do_no_args_call(semihost.TARGET_SYS_READC) + assert chr(rc) == c class TestSemihostAgent: def test_no_io_handler(self, ctx): From e48592eaa927c233e7cb8558c55f00365c1c1d6f Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 4 Jan 2022 23:07:12 +0000 Subject: [PATCH 104/123] probe: ensure set_clock() works with float. (#1297) --- pyocd/probe/jlink_probe.py | 2 +- pyocd/probe/picoprobe.py | 2 +- pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | 2 +- pyocd/probe/stlink/stlink.py | 3 ++- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pyocd/probe/jlink_probe.py b/pyocd/probe/jlink_probe.py index cdcebb957..267720878 100644 --- a/pyocd/probe/jlink_probe.py +++ b/pyocd/probe/jlink_probe.py @@ -255,7 +255,7 @@ def disconnect(self): def set_clock(self, frequency): try: - self._link.set_speed(frequency // 1000) + self._link.set_speed(int(frequency) // 1000) except JLinkException as exc: raise self._convert_exception(exc) from exc diff --git a/pyocd/probe/picoprobe.py b/pyocd/probe/picoprobe.py index 1b729dea4..3f25fc90a 100644 --- a/pyocd/probe/picoprobe.py +++ b/pyocd/probe/picoprobe.py @@ -467,7 +467,7 @@ def disconnect(self): self._is_connected = False def set_clock(self, frequency): - self._link.set_swd_frequency(frequency // 1000) + self._link.set_swd_frequency(int(frequency) // 1000) def reset(self): self.assert_reset(True) diff --git a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py index cdd2d486d..99d5f3a8a 100644 --- a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py +++ b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py @@ -757,7 +757,7 @@ def is_reset_asserted(self): @locked def set_clock(self, frequency): self.flush() - self._protocol.set_swj_clock(frequency) + self._protocol.set_swj_clock(int(frequency)) self._frequency = frequency def get_swj_mode(self): diff --git a/pyocd/probe/stlink/stlink.py b/pyocd/probe/stlink/stlink.py index 40cad9b31..b96c096fe 100644 --- a/pyocd/probe/stlink/stlink.py +++ b/pyocd/probe/stlink/stlink.py @@ -300,8 +300,9 @@ def set_com_frequency(self, protocol, freq): assert self._hw_version >= 3 with self._lock: + freq_khz = int(freq) // 1000 cmd = [Commands.JTAG_COMMAND, Commands.SET_COM_FREQ, protocol.value - 1, 0] - cmd.extend(conversion.u32le_list_to_byte_list([freq // 1000])) + cmd.extend(conversion.u32le_list_to_byte_list([freq_khz // 1000])) response = self._device.transfer(cmd, readSize=8) self._check_status(response[0:2]) From 47c6228e9a256ff041e809bf66d669fb76b908cb Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Wed, 5 Jan 2022 16:33:49 +0000 Subject: [PATCH 105/123] commands: 'reg' and 'wreg' improvements. (#1298) - '-p' option to force lookup as peripheral register. - Print any number of registers. - '-p' and '-f' options for wreg. - Add 'rr' alias for 'reg', 'wr' alias for 'wreg'. - Updated command reference. - Fix an issue in generate_command_help.py with the base classes having empty 'names' lists in their INFO attribute (which they have for documentation purposes). --- docs/command_reference.md | 91 +++++++++++------- pyocd/commands/commands.py | 160 +++++++++++++++++-------------- scripts/generate_command_help.py | 18 ++-- 3 files changed, 158 insertions(+), 111 deletions(-) diff --git a/docs/command_reference.md b/docs/command_reference.md index 9870b5912..1b495a9ab 100644 --- a/docs/command_reference.md +++ b/docs/command_reference.md @@ -366,20 +366,31 @@ init Ignored; for OpenOCD compatibility. +Probe + + +flushprobe + + +Ensure all debug probe requests have been completed. + + Registers -reg +reg, +rr -[-f] [REG] +[-p] [-f] [REG...] Print core or peripheral register(s). -wreg +wreg, +wr -[-r] REG VALUE +[-r] [-p] [-f] REG VALUE Set the value of a core or peripheral register. @@ -650,37 +661,37 @@ Commands ##### `break` **Usage**: ADDR \ -Set a breakpoint address. +Set a breakpoint address. ##### `lsbreak` **Usage**: \ -List breakpoints. +List breakpoints. ##### `lswatch` **Usage**: \ -List watchpoints. +List watchpoints. ##### `rmbreak` **Usage**: ADDR \ -Remove a breakpoint. +Remove a breakpoint. ##### `rmwatch` **Usage**: ADDR \ -Remove a watchpoint. +Remove a watchpoint. ##### `watch` **Usage**: ADDR [r|w|rw] [1|2|4] \ -Set a watchpoint address, and optional access type (default rw) and size (4). +Set a watchpoint address, and optional access type (default rw) and size (4). ### Bringup @@ -689,7 +700,7 @@ These commands are meant to be used when starting up Commander in no-init mode. ##### `initdp` **Usage**: \ -Init DP and power up debug. +Init DP and power up debug. ##### `makeap` @@ -701,7 +712,7 @@ Creates a new AP object for the given APSEL. The type of AP, MEM-AP or generic, ##### `reinit` **Usage**: \ -Reinitialize the target object. +Reinitialize the target object. ### Commander @@ -710,13 +721,13 @@ Reinitialize the target object. **Aliases**: `quit` \ **Usage**: \ -Quit pyocd commander. +Quit pyocd commander. ##### `list` **Usage**: \ -Show available targets. +Show available targets. ### Core @@ -731,21 +742,21 @@ Resume execution of the target. The target's state is read back after resuming. ##### `core` **Usage**: [NUM] \ -Select CPU core by number or print selected core. +Select CPU core by number or print selected core. ##### `halt` **Aliases**: `h` \ **Usage**: \ -Halt the target. +Halt the target. ##### `step` **Aliases**: `s` \ **Usage**: [COUNT] \ -Step one or more instructions. +Step one or more instructions. ### Dap @@ -754,28 +765,28 @@ Step one or more instructions. **Aliases**: `rap` \ **Usage**: [APSEL] ADDR \ -Read AP register. +Read AP register. ##### `readdp` **Aliases**: `rdp` \ **Usage**: ADDR \ -Read DP register. +Read DP register. ##### `writeap` **Aliases**: `wap` \ **Usage**: [APSEL] ADDR DATA \ -Write AP register. +Write AP register. ##### `writedp` **Aliases**: `wdp` \ **Usage**: ADDR DATA \ -Write DP register. +Write DP register. ### Device @@ -789,7 +800,7 @@ Reset the target, optionally specifying the reset type. The reset type must be o ##### `unlock` **Usage**: \ -Unlock security on the target. +Unlock security on the target. ### General @@ -798,7 +809,7 @@ Unlock security on the target. **Aliases**: `?` \ **Usage**: [CMD] \ -Show help for commands. +Show help for commands. ### Memory @@ -820,7 +831,7 @@ Disassemble instructions at an address. Only available if the capstone library i ##### `erase` **Usage**: [ADDR] [COUNT] \ -Erase all internal flash or a range of sectors. +Erase all internal flash or a range of sectors. ##### `fill` @@ -838,7 +849,7 @@ Search for a value in memory within the given address range. A pattern of any nu ##### `load` **Usage**: FILENAME [ADDR] \ -Load a binary, hex, or elf file with optional base address. +Load a binary, hex, or elf file with optional base address. ##### `loadmem` @@ -878,7 +889,7 @@ Read 8-bit bytes. Optional length parameter is the number of bytes to read. If t ##### `savemem` **Usage**: ADDR LEN FILENAME \ -Save a range of memory to a binary file. +Save a range of memory to a binary file. ##### `write16` @@ -914,21 +925,31 @@ Write 8-bit bytes to memory. The data arguments are 8-bit bytes. Can write to bo ##### `init` **Usage**: init \ -Ignored; for OpenOCD compatibility. +Ignored; for OpenOCD compatibility. + + +### Probe + +##### `flushprobe` + +**Usage**: \ +Ensure all debug probe requests have been completed. ### Registers ##### `reg` -**Usage**: [-f] [REG] \ -Print core or peripheral register(s). If no arguments are provided, all core registers will be printed. Either a core register name, the name of a peripheral, or a peripheral.register can be provided. When a peripheral name is provided without a register, all registers in the peripheral will be printed. If the -f option is passed, then individual fields of peripheral registers will be printed in addition to the full value. +**Aliases**: `rr` \ +**Usage**: [-p] [-f] [REG...] \ +Print core or peripheral register(s). If no arguments are provided, the 'general' core register group will be printed. Either a core register name, the name of a peripheral, or a peripheral.register can be provided. When a peripheral name is provided without a register, all registers in the peripheral will be printed. The -p option forces evaluating the register name as a peripheral register name. If the -f option is passed, then individual fields of peripheral registers will be printed in addition to the full value. ##### `wreg` -**Usage**: [-r] REG VALUE \ -Set the value of a core or peripheral register. The REG parameter must be a core register name or a peripheral.register. When a peripheral register is written, if the -r option is passed then it is read back and the updated value printed. +**Aliases**: `wr` \ +**Usage**: [-r] [-p] [-f] REG VALUE \ +Set the value of a core or peripheral register. The REG parameter must be a core register name or a peripheral.register. When a peripheral register is written, if the -r option is passed then it is read back and the updated value printed. The -p option forces evaluating the register name as a peripheral register name. If the -f option is passed, then individual fields of peripheral registers will be printed in addition to the full value. ### Semihosting @@ -974,7 +995,7 @@ Show symbol, file, and line for address. The symbol name, source file path, and **Aliases**: `st` \ **Usage**: \ -Show the target's current state. +Show the target's current state. ### Threads @@ -982,7 +1003,7 @@ Show the target's current state. ##### `threads` **Usage**: {flush,enable,disable,status} \ -Control thread awareness. +Control thread awareness. ### Values @@ -990,11 +1011,11 @@ Control thread awareness. ##### `set` **Usage**: NAME VALUE \ -Set a value. +Set a value. ##### `show` **Usage**: NAME \ -Display a value. +Display a value. diff --git a/pyocd/commands/commands.py b/pyocd/commands/commands.py index a52cb644a..fa7a81178 100755 --- a/pyocd/commands/commands.py +++ b/pyocd/commands/commands.py @@ -161,7 +161,6 @@ def _dump_peripheral_register(self, periph, reg, show_fields): lsb = f.bit_offset f_value = bfx(value, msb, lsb) v_enum = None - v = None if f.enumerated_values: for v in f.enumerated_values: if v.value == f_value: @@ -177,111 +176,132 @@ def _dump_peripheral_register(self, periph, reg, show_fields): f_value_bin_str = bin(f_value)[2:] f_value_bin_str = "0" * (f.bit_width - len(f_value_bin_str)) + f_value_bin_str if v_enum: - assert v - f_value_enum_str = " %s: %s" % (v.name, v_enum.description) + if v_enum.name and v_enum.description: + f_value_enum_str = f" {v_enum.name}: {v_enum.description}" + elif v_enum.name or v_enum.description: + f_value_enum_str = f" {v_enum.name or v_enum.description}" + else: + f_value_enum_str = "" else: f_value_enum_str = "" self.context.writei(" %s[%s] = %s (%s)%s", f.name, bits_str, f_value_str, f_value_bin_str, f_value_enum_str) class RegCommand(RegisterCommandBase): INFO = { - 'names': ['reg'], + 'names': ['reg', 'rr'], 'group': 'standard', 'category': 'registers', - 'nargs': [0, 1, 2], - 'usage': "[-f] [REG]", + 'nargs': '*', + 'usage': "[-p] [-f] [REG...]", 'help': "Print core or peripheral register(s).", - 'extra_help': "If no arguments are provided, all core registers will be printed. " - "Either a core register name, the name of a peripheral, or a " - "peripheral.register can be provided. When a peripheral name is " - "provided without a register, all registers in the peripheral will " - "be printed. If the -f option is passed, then individual fields of " - "peripheral registers will be printed in addition to the full value.", + 'extra_help': + "If no arguments are provided, the 'general' core register group will be printed. Either a core " + "register name, the name of a peripheral, or a peripheral.register can be provided. When a peripheral " + "name is provided without a register, all registers in the peripheral will be printed. The -p option " + "forces evaluating the register name as a peripheral register name. If the -f option is passed, then " + "individual fields of peripheral registers will be printed in addition to the full value.", } - def parse(self, args): - self.show_all = False - self.reg = None - self.show_fields = False + show_all = False + show_fields = False + show_peripheral = False + def parse(self, args): if len(args) == 0: - self.reg = "general" + self.regs = ["general"] else: - reg_idx = 0 - if len(args) == 2 and args[0] == '-f': - reg_idx = 1 - self.show_fields = True + while (len(args) >= 2) and args[0].startswith('-'): + opt = args.pop(0) + if opt == '-f': + self.show_fields = True + elif opt == '-p': + self.show_peripheral = True + else: + raise exceptions.CommandError(f"unrecognized option {opt}") - self.reg = args[reg_idx].lower() - self.show_all = (self.reg == "all") + self.regs = args + self.show_all = (not self.show_peripheral and self.regs[0].lower() == "all") def execute(self): if self.show_all: self.dump_registers(show_all=True) return - # Check register names first. - if self.reg in self.context.selected_core.core_registers.by_name: - if not self.context.selected_core.is_halted(): - self.context.write("Core is not halted; cannot read core registers") - return + matcher = UniquePrefixMatcher(self.context.selected_core.core_registers.groups) - info = self.context.selected_core.core_registers.by_name[self.reg] - value = self.context.selected_core.read_core_register(self.reg) - value_str = self._format_core_register(info, value) - self.context.writei("%s = %s", self.reg, value_str) - return + for reg in self.regs: + reg = reg.lower() + if not self.show_peripheral: + # Check register names first. + if reg in self.context.selected_core.core_registers.by_name: + if not self.context.selected_core.is_halted(): + self.context.write("Core is not halted; cannot read core registers") + return - # Now look for matching group name. - matcher = UniquePrefixMatcher(self.context.selected_core.core_registers.groups) - group_matches = matcher.find_all(self.reg) - if len(group_matches) == 1: - self.dump_registers(show_group=group_matches[0]) - return + info = self.context.selected_core.core_registers.by_name[reg] + value = self.context.selected_core.read_core_register(reg) + value_str = self._format_core_register(info, value) + self.context.writei("%s = %s", reg, value_str) + continue - # And finally check for peripherals. - subargs = self.reg.split('.') - if subargs[0] in self.context.peripherals: - p = self.context.peripherals[subargs[0]] - if len(subargs) > 1: - r = [x for x in p.registers if x.name.lower() == subargs[1]] - if len(r): - self._dump_peripheral_register(p, r[0], self.show_fields) + # Now look for matching group name. + group_matches = matcher.find_all(reg) + if len(group_matches) == 1: + self.dump_registers(show_group=group_matches[0]) + continue + + # And finally check for peripherals. + subargs = reg.split('.') + if subargs[0] in self.context.peripherals: + p = self.context.peripherals[subargs[0]] + if len(subargs) > 1: + r = [x for x in p.registers if x.name.lower() == subargs[1]] + if len(r): + self._dump_peripheral_register(p, r[0], self.show_fields) + else: + raise exceptions.CommandError("invalid register '%s' for %s" % (subargs[1], p.name)) else: - raise exceptions.CommandError("invalid register '%s' for %s" % (subargs[1], p.name)) + for r in p.registers: + self._dump_peripheral_register(p, r, self.show_fields) else: - for r in p.registers: - self._dump_peripheral_register(p, r, self.show_fields) - else: - raise exceptions.CommandError("invalid peripheral '%s'" % (subargs[0])) + raise exceptions.CommandError("invalid peripheral '%s'" % (subargs[0])) class WriteRegCommand(RegisterCommandBase): INFO = { - 'names': ['wreg'], + 'names': ['wreg', 'wr'], 'group': 'standard', 'category': 'registers', - 'nargs': [2, 3], - 'usage': "[-r] REG VALUE", + 'nargs': '*', + 'usage': "[-r] [-p] [-f] REG VALUE", 'help': "Set the value of a core or peripheral register.", - 'extra_help': "The REG parameter must be a core register name or a peripheral.register. " - "When a peripheral register is written, if the -r option is passed then " - "it is read back and the updated value printed.", + 'extra_help': + "The REG parameter must be a core register name or a peripheral.register. When a peripheral register " + "is written, if the -r option is passed then it is read back and the updated value printed. The -p " + "option forces evaluating the register name as a peripheral register name. If the -f option is passed, " + "then individual fields of peripheral registers will be printed in addition to the full value.", } + select_peripheral = False + do_readback = False + show_fields = False + def parse(self, args): - idx = 0 - if len(args) == 3: - if args[0] != '-r': - raise exceptions.CommandError("invalid arguments") - idx = 1 - self.do_readback = True - else: - self.do_readback = False - self.reg = args[idx].lower() - self.value = args[idx + 1] + while (len(args) >= 2) and args[0].startswith('-'): + opt = args.pop(0) + if opt == '-r': + self.do_readback = True + elif opt == '-p': + self.select_peripheral = True + elif opt == '-f': + self.show_fields = True + else: + raise exceptions.CommandError(f"unrecognized option {opt}") + + self.reg = args[0].lower() + self.value = args[1] def execute(self): - if self.reg in self.context.selected_core.core_registers.by_name: + if not self.select_peripheral and self.reg in self.context.selected_core.core_registers.by_name: if not self.context.selected_core.is_halted(): self.context.write("Core is not halted; cannot write core registers") return @@ -321,7 +341,7 @@ def execute(self): raise exceptions.CommandError("too many dots") self.context.target.flush() if self.do_readback: - self._dump_peripheral_register(p, r, True) + self._dump_peripheral_register(p, r, self.show_fields) else: raise exceptions.CommandError("invalid register '%s' for %s" % (subargs[1], p.name)) else: diff --git a/scripts/generate_command_help.py b/scripts/generate_command_help.py index ca9ef0767..abf51fefe 100755 --- a/scripts/generate_command_help.py +++ b/scripts/generate_command_help.py @@ -78,20 +78,24 @@ def build_categories(commands): def gen_cmd_groups(commands): categories = build_categories(commands) - + for group in sorted(categories.keys()): print(f"""{group.capitalize()} """) - - group_cmds = sorted(categories[group], key=lambda c: c.INFO['names'][0]) + + # Filter out the base classes that have empty 'names'. + filtered_cmds = [c for c in categories[group] if c.INFO['names']] + group_cmds = sorted(filtered_cmds, key=lambda c: c.INFO['names'][0]) for cmd in group_cmds: gen_command(cmd.INFO) def gen_value_groups(commands): for group in sorted(commands.keys()): # print(f"""{group.capitalize()}""") - - group_cmds = sorted(commands[group], key=lambda c: c.INFO['names'][0]) + + # Filter out the base classes that have empty 'names'. + filtered_cmds = [c for c in commands[group] if c.INFO['names']] + group_cmds = sorted(filtered_cmds, key=lambda c: c.INFO['names'][0]) for cmd in group_cmds: gen_value(cmd.INFO) @@ -105,7 +109,9 @@ def gen_command_docs(commands): if group_docs: print(group_docs) - group_cmds = sorted(categories[group], key=lambda c: c.INFO['names'][0]) + # Filter out the base classes that have empty 'names'. + filtered_cmds = [c for c in categories[group] if c.INFO['names']] + group_cmds = sorted(filtered_cmds, key=lambda c: c.INFO['names'][0]) for cmd in group_cmds: info = cmd.INFO print(f""" From 8c8bcde1580f63b3a38b06b9fadf0f891d38b44f Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 6 Jan 2022 14:01:16 +0000 Subject: [PATCH 106/123] commands: 'reg' writes to redirectable output stream. (#1299) This change allows the 'reg' command's output to be visible in the gdb console when executed as a monitor command. --- pyocd/commands/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyocd/commands/commands.py b/pyocd/commands/commands.py index fa7a81178..db4b23cf7 100755 --- a/pyocd/commands/commands.py +++ b/pyocd/commands/commands.py @@ -125,7 +125,7 @@ def dump_register_group(self, group_name): value_str = self._format_core_register(info, value) col_printer.add_items([(info.name, value_str)]) - col_printer.write() + col_printer.write(self.context.output_stream) def dump_registers(self, show_all=False, show_group=None): if not self.context.selected_core.is_halted(): From 244e29182aed09f4bb1d54507425a60810ee1754 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Thu, 6 Jan 2022 14:01:59 +0000 Subject: [PATCH 107/123] gdbserver: better logging of RTOS discovery issues when rtos.name is set. (#1300) --- pyocd/gdbserver/gdbserver.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/pyocd/gdbserver/gdbserver.py b/pyocd/gdbserver/gdbserver.py index 778763d8c..3156ad00c 100644 --- a/pyocd/gdbserver/gdbserver.py +++ b/pyocd/gdbserver/gdbserver.py @@ -999,21 +999,27 @@ def init_thread_providers(self): return self.create_rsp_packet(b"OK") forced_rtos_name = self.session.options.get('rtos.name') + if forced_rtos_name and (forced_rtos_name not in RTOS.keys()): + LOG.error("%s was specified as the RTOS but no plugin with that name exists", forced_rtos_name) + return self.create_rsp_packet(b"OK") symbol_provider = GDBSymbolProvider(self) - for rtosName, rtosClass in RTOS.items(): - if (forced_rtos_name is not None) and (rtosName != forced_rtos_name): + LOG.info("Attempting to load RTOS plugins") + for rtos_name, rtos_class in RTOS.items(): + if (forced_rtos_name is not None) and (rtos_name != forced_rtos_name): continue try: - LOG.info("Attempting to load %s", rtosName) - rtos = rtosClass(self.target) + LOG.debug("Attempting to load %s", rtos_name) + rtos = rtos_class(self.target) if rtos.init(symbol_provider): - LOG.info("%s loaded successfully", rtosName) + LOG.info("%s loaded successfully", rtos_name) self.thread_provider = rtos break + elif forced_rtos_name is not None: + LOG.error("%s was specified as the RTOS but failed to load", rtos_name) except exceptions.Error as e: - LOG.error("Error during symbol lookup: " + str(e), exc_info=self.session.log_tracebacks) + LOG.error("Error during symbol lookup: %s", e, exc_info=self.session.log_tracebacks) self.did_init_thread_providers = True From 5f9b34178c9c09da7be211348814ed7f7bfdac0c Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 9 Jan 2022 22:14:18 +0000 Subject: [PATCH 108/123] probe: stlink: fix accidental double convert to kHz. (#1301) --- pyocd/probe/stlink/stlink.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyocd/probe/stlink/stlink.py b/pyocd/probe/stlink/stlink.py index b96c096fe..6129335a5 100644 --- a/pyocd/probe/stlink/stlink.py +++ b/pyocd/probe/stlink/stlink.py @@ -302,7 +302,7 @@ def set_com_frequency(self, protocol, freq): with self._lock: freq_khz = int(freq) // 1000 cmd = [Commands.JTAG_COMMAND, Commands.SET_COM_FREQ, protocol.value - 1, 0] - cmd.extend(conversion.u32le_list_to_byte_list([freq_khz // 1000])) + cmd.extend(conversion.u32le_list_to_byte_list([freq_khz])) response = self._device.transfer(cmd, readSize=8) self._check_status(response[0:2]) From 3b610869d0f11b2079dfa182d5712695b043596d Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 9 Jan 2022 22:55:13 +0000 Subject: [PATCH 109/123] Probe client/server: some fixes and small improvements. (#1302) * probe: client: raise errors from deferred callbacks. * probe: server: fix race causing server to terminaly immeditately. If the server thread was delayed in its startup for whatever reason, and the server subcommand entered its wait loop, it would see the server as not running and exit. Add another flag for the server having started to resolve this. * probe: server: fix incorrect access to probe in finish(). * probe: server: type annotations and Python 3 cleanup. * probe: server: improve connect/disconnect log messages. --- pyocd/probe/tcp_client_probe.py | 101 +++++++++++++++++++------------- pyocd/probe/tcp_probe_server.py | 92 ++++++++++++++++++----------- 2 files changed, 117 insertions(+), 76 deletions(-) diff --git a/pyocd/probe/tcp_client_probe.py b/pyocd/probe/tcp_client_probe.py index 18aea67cb..8427459e4 100644 --- a/pyocd/probe/tcp_client_probe.py +++ b/pyocd/probe/tcp_client_probe.py @@ -1,6 +1,6 @@ # pyOCD debugger # Copyright (c) 2020-2021 Arm Limited -# Copyright (c) 2021 Chris Reed +# Copyright (c) 2021-2022 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +18,7 @@ import logging import json import threading +from typing import (Any, Optional, Tuple) from .debug_probe import DebugProbe from ..core import exceptions @@ -31,7 +32,31 @@ TRACE.setLevel(logging.CRITICAL) class TCPClientProbe(DebugProbe): - """! @brief Probe class that connects to a debug probe server.""" + """! @brief Probe class that connects to a debug probe server. + + The protocol is a one-line JSON request and response form. + + Request structure: + + ````json + { + "id": , + "request": , + ["arguments": ] + } + ```` + + Response structure: + + ````json + { + "id": , + "status": , + ["error": ,] + ["result": ] + } + ```` + """ DEFAULT_PORT = 5555 @@ -122,29 +147,12 @@ def request_id(self): self._request_id += 1 return rid - def _perform_request(self, request, *args): + def _perform_request_without_raise(self, request: str, *args: Any) -> Tuple[Any, Optional[BaseException]]: """! Execute a request-reply transaction with the server. - Request: - - ```` - { - "id": , - "request": , - ["arguments": ] - } - ```` - - Response: - - ```` - { - "id": , - "status": , - ["error": ,] - ["result": ] - } - ```` + The return value is a 2-tuple consisting of the optional result from the request and an optional + exception object. The latter is only non-None if the request failed and a non-zero status code was + returned. """ # Protect requests with the local lock. with self._lock: @@ -170,6 +178,7 @@ def _perform_request(self, request, *args): raise exceptions.ProbeError("malformed response from server; missing required field") # Check response status. + exc = None status = decoded_response['status'] if status != 0: # Get the error message. @@ -178,20 +187,21 @@ def _perform_request(self, request, *args): request, status, error) # Create an appropriate local exception based on the status code. - exc = self._create_exception_from_status_code(status, + exc = self.STATUS_CODE_CLASS_MAP.get(status, exceptions.ProbeError)( "error received from server for command %s (status code %i): %s" % (request, status, error)) - raise exc # Get response value. If not present then there was no return value from the command result = decoded_response.get('result', None) - return result + return result, exc - def _create_exception_from_status_code(self, status, message): - """! @brief Convert a status code into an exception instance.""" - # Other status codes can use the map. - return self.STATUS_CODE_CLASS_MAP.get(status, exceptions.ProbeError)(message) + def _perform_request(self, request: str, *args: Any) -> Any: + """@brief Perform the request and immediately raise any errors.""" + result, exc = self._perform_request_without_raise(request, *args) + if exc is not None: + raise exc + return result _PROPERTY_CONVERTERS = { 'capabilities': lambda value: [DebugProbe.Capability[v] for v in value], @@ -278,37 +288,43 @@ def flush(self): ##@{ def read_dp(self, addr, now=True): - result = self._perform_request('read_dp', addr) + result, exc = self._perform_request_without_raise('read_dp', addr) def read_dp_cb(): - # TODO need to raise any exception from here + # Raise any exception here so the traceback includes the actual caller. + if exc is not None: + raise exc return result - return result if now else read_dp_cb + return read_dp_cb() if now else read_dp_cb def write_dp(self, addr, data): self._perform_request('write_dp', addr, data) def read_ap(self, addr, now=True): - result = self._perform_request('read_ap', addr) + result, exc = self._perform_request_without_raise('read_ap', addr) def read_ap_cb(): - # TODO need to raise any exception from here + # Raise any exception here so the traceback includes the actual caller. + if exc is not None: + raise exc return result - return result if now else read_ap_cb + return read_ap_cb() if now else read_ap_cb def write_ap(self, addr, data): self._perform_request('write_ap', addr, data) def read_ap_multiple(self, addr, count=1, now=True): - results = self._perform_request('read_ap_multiple', addr, count) + results, exc = self._perform_request_without_raise('read_ap_multiple', addr, count) def read_ap_multiple_cb(): - # TODO need to raise any exception from here + # Raise any exception here so the traceback includes the actual caller. + if exc is not None: + raise exc return results - return results if now else read_ap_multiple_cb + return read_ap_multiple_cb() if now else read_ap_multiple_cb def write_ap_multiple(self, addr, values): self._perform_request('write_ap_multiple', addr, values) @@ -352,11 +368,14 @@ def write_memory(self, addr, data, transfer_size=32): def read_memory(self, addr, transfer_size=32, now=True): assert transfer_size in (8, 16, 32) - result = self._remote_probe._perform_request('read_mem', self._handle, addr, transfer_size) + result, exc = self._remote_probe._perform_request_without_raise('read_mem', self._handle, addr, transfer_size) def read_callback(): + # Raise any exception here so the traceback includes the actual caller. + if exc is not None: + raise exc return result - return result if now else read_callback + return read_callback() if now else read_callback def write_memory_block32(self, addr, data): self._remote_probe._perform_request('write_block32', self._handle, addr, data) diff --git a/pyocd/probe/tcp_probe_server.py b/pyocd/probe/tcp_probe_server.py index ed07f053f..887c8cc8f 100644 --- a/pyocd/probe/tcp_probe_server.py +++ b/pyocd/probe/tcp_probe_server.py @@ -1,6 +1,6 @@ # pyOCD debugger # Copyright (c) 2020-2021 Arm Limited -# Copyright (c) 2021 Chris Reed +# Copyright (c) 2021-2022 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,26 +20,38 @@ import json import socket from socketserver import (ThreadingTCPServer, StreamRequestHandler) +from time import sleep +from typing import (Callable, Dict, Optional, TYPE_CHECKING, Tuple, cast) from .shared_probe_proxy import SharedDebugProbeProxy from ..core import exceptions from .debug_probe import DebugProbe from ..coresight.ap import (APVersion, APv1Address, APv2Address) +if TYPE_CHECKING: + from ..core.session import Session + from ..core.memory_interface import MemoryInterface + LOG = logging.getLogger(__name__) TRACE = LOG.getChild("trace") TRACE.setLevel(logging.CRITICAL) class DebugProbeServer(threading.Thread): - """! @brief Shares a debug probe over a TCP server. + """@brief Shares a debug probe over a TCP server. When the start() method is called, a new daemon thread is created to run the server. The server can be terminated by calling the stop() method, which will also kill the server thread. """ - def __init__(self, session, probe, port=None, serve_local_only=None): - """! @brief Constructor. + def __init__( + self, + session: "Session", + probe: DebugProbe, + port: Optional[int] = None, + serve_local_only: Optional[bool] = None + ) -> None: + """@brief Constructor. @param self The object. @param session A @ref pyocd.core.session.Session "Session" object. Does not need to have a @@ -54,7 +66,7 @@ def __init__(self, session, probe, port=None, serve_local_only=None): @param serve_local_only Optional Boolean. Whether to restrict the server to be accessible only from localhost. If not specified (set to None), then the 'serve_local_only' session option is used. """ - super(DebugProbeServer, self).__init__() + super().__init__() # Configure the server thread. self.name = "debug probe %s server" % probe.unique_id @@ -63,7 +75,8 @@ def __init__(self, session, probe, port=None, serve_local_only=None): # Init instance variables. self._session = session self._probe = probe - self._is_running = False + self._did_start: bool = False + self._is_running: bool = False # Make sure we have a shared proxy for the probe. if isinstance(probe, SharedDebugProbeProxy): @@ -73,7 +86,7 @@ def __init__(self, session, probe, port=None, serve_local_only=None): # Get the port from options if not specified. if port is None: - self._port = session.options.get('probeserver.port') + self._port = cast(int, session.options.get('probeserver.port')) else: self._port = port @@ -85,15 +98,20 @@ def __init__(self, session, probe, port=None, serve_local_only=None): address = (host, self._port) # Create the server and bind to the address, but don't start running yet. - self._server = TCPProbeServer(address, session, self._proxy) + self._server = TCPProbeServer(address, session, cast(DebugProbe, self._proxy)) self._server.server_bind() - def start(self): - """! @brief Start the server thread and begin listening.""" + def start(self) -> None: + """! @brief Start the server thread and begin listening. + + Returns once the server thread has begun executing. + """ self._server.server_activate() - super(DebugProbeServer, self).start() + super().start() + while not self._did_start: + sleep(0.005) - def stop(self): + def stop(self) -> None: """! @brief Shut down the server. Any open connections will be forcibly closed. This function does not return until the @@ -103,12 +121,12 @@ def stop(self): self.join() @property - def is_running(self): + def is_running(self) -> bool: """! @brief Whether the server thread is running.""" return self._is_running @property - def port(self): + def port(self) -> int: """! @brief The server's port. If port 0 was specified in the constructor, then, after start() is called, this will reflect the actual port @@ -116,8 +134,9 @@ def port(self): """ return self._port - def run(self): + def run(self) -> None: """! @brief The server thread implementation.""" + self._did_start = True self._is_running = True # Read back the actual port if 0 was specified. @@ -135,18 +154,18 @@ class TCPProbeServer(ThreadingTCPServer): # Change the default SO_REUSEADDR setting. allow_reuse_address = True - def __init__(self, server_address, session, probe): + def __init__(self, server_address: Tuple[str, int], session: "Session", probe: DebugProbe): self._session = session self._probe = probe - ThreadingTCPServer.__init__(self, server_address, DebugProbeRequestHandler, + super().__init__(server_address, DebugProbeRequestHandler, bind_and_activate=False) @property - def session(self): + def session(self) -> "Session": return self._session @property - def probe(self): + def probe(self) -> DebugProbe: return self._probe def handle_error(self, request, client_address): @@ -199,11 +218,12 @@ def setup(self): except socket.herror: self._client_domain = self.client_address[0] - LOG.info("Remote probe client connected (%s from port %i)", self._client_domain, self.client_address[1]) - # Get the session and probe we're serving from the server. - self._session = self.server.session - self._probe = self.server.probe + self._session = cast(TCPProbeServer, self.server).session + self._probe = cast(TCPProbeServer, self.server).probe + + LOG.info("Client %s (port %i) connected to probe %s", + self._client_domain, self.client_address[1], self._probe.unique_id) # Give the probe a session if it doesn't have one, in case it needs to access settings. # TODO: create a session proxy so client-side options can be accessed @@ -211,11 +231,11 @@ def setup(self): self._probe.session = self._session # Dict to store handles for AP memory interfaces. - self._next_ap_memif_handle = 0 - self._ap_memif_handles = {} + self._next_ap_memif_handle: int = 0 + self._ap_memif_handles: Dict[int, "MemoryInterface"] = {} # Create the request handlers dict here so we can reference bound probe methods. - self._REQUEST_HANDLERS = { + self._REQUEST_HANDLERS: Dict[str, Tuple[Callable, int]] = { # Command Handler Arg count 'hello': (self._request__hello, 1 ), 'readprop': (self._request__read_property, 1 ), @@ -251,21 +271,20 @@ def setup(self): 'write_block8': (self._request__write_block8, 3 ), # 'write_block8', handle:int, addr:int, data:List[int] } - # Let superclass do its thing. (Can't use super() here because the superclass isn't derived - # from object in Py2.) - StreamRequestHandler.setup(self) + # Let superclass do its thing. + super().setup() def finish(self): - LOG.info("Remote probe client disconnected (%s from port %i)", self._client_domain, self.client_address[1]) + LOG.info("Client %s (port %i) disconnected from probe %s", + self._client_domain, self.client_address[1], self._probe.unique_id) # Flush the probe and ignore any lingering errors. try: - self._session.probe.flush() + self._probe.flush() except exceptions.Error as err: LOG.debug("exception while flushing probe on disconnect: %s", err) - self._session = None - StreamRequestHandler.finish(self) + super().finish() def _send_error_response(self, status=1, message=""): response_dict = { @@ -293,8 +312,9 @@ def _send_response(self, result): def handle(self): # Process requests until the connection is closed. while True: + request = None + request_type = "" try: - request = None request_dict = None self._current_request_id = -1 @@ -345,8 +365,10 @@ def handle(self): except Exception as err: # Only send an error response if we received an request. if request is not None: - LOG.error("Error while processing %s request from client: %s", request, err, + LOG.error("Error processing '%s' request (ID %i, client %s, probe %s): %s", + request_type, self._current_request_id, self._client_domain, self._probe.unique_id, err, exc_info=self._session.log_tracebacks) + LOG.debug("Full request from error: %s", request.decode('utf-8', 'replace')) self._send_error_response(status=self._get_exception_status_code(err), message=str(err)) else: From e1cc28ba993f7913055d8d1d10e0c107272d9171 Mon Sep 17 00:00:00 2001 From: Clay McClure Date: Wed, 12 Jan 2022 12:35:42 -0800 Subject: [PATCH 110/123] DAP: unlock probe on error (#1304) Fixes GDB server deadlock after a failed read_ap_multiple() access. --- pyocd/coresight/dap.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyocd/coresight/dap.py b/pyocd/coresight/dap.py index f8024b091..04caed38b 100644 --- a/pyocd/coresight/dap.py +++ b/pyocd/coresight/dap.py @@ -1,6 +1,7 @@ # pyOCD debugger # Copyright (c) 2015-2020 Arm Limited # Copyright (c) 2021 Chris Reed +# Copyright (c) 2022 Clay McClure # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -909,6 +910,8 @@ def read_ap_multiple(self, addr: int, count: int = 1, now: bool = True) \ result_cb = self.probe.read_ap_multiple(addr, count, now=False) except exceptions.TargetError as error: self._handle_error(error, num) + if did_lock: + self.unlock() raise except Exception: if did_lock: From 439c62dd76a38ab2ab0c6ff72235ff528b42d7a7 Mon Sep 17 00:00:00 2001 From: Clay McClure Date: Wed, 12 Jan 2022 12:58:30 -0800 Subject: [PATCH 111/123] Gdbserver: clear shutdown event after disconnect (#1305) Otherwise, the listening socket is closed, preventing reconnection. --- pyocd/gdbserver/gdbserver.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyocd/gdbserver/gdbserver.py b/pyocd/gdbserver/gdbserver.py index 3156ad00c..7d3b58cae 100644 --- a/pyocd/gdbserver/gdbserver.py +++ b/pyocd/gdbserver/gdbserver.py @@ -1,6 +1,7 @@ # pyOCD debugger # Copyright (c) 2006-2020 Arm Limited # Copyright (c) 2021 Chris Reed +# Copyright (c) 2022 Clay McClure # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -312,6 +313,7 @@ def run(self): while True: try: + self.shutdown_event.clear() self.detach_event.clear() # Notify listeners that the server is running after a short delay. From 368b66264491eb202a9ac5ea22eb905c4407a0d1 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 14 Jan 2022 12:59:31 -0600 Subject: [PATCH 112/123] readme: fix broken links; update some other links; update copyright year. --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 5c0e08ce9..3ec7c3dcf 100644 --- a/README.md +++ b/README.md @@ -32,10 +32,10 @@ Family Packs, [nearly every Cortex-M device](https://www.keil.com/dd2/pack/) on The `pyocd` command line tool gives you total control over your device with these subcommands: - `gdbserver`: GDB remote server allows you to debug using gdb via either the console or - [several GUI debugger options](#recommended-gdb-and-ide-setup). + [several GUI debugger options](https://pyocd.io/docs/gdb_setup). - `load`: Program files of various formats into flash or RAM. - `erase`: Erase part or all of an MCU's flash memory. -- `pack`: Manage [CMSIS Device Family Packs](http://arm-software.github.io/CMSIS_5/Pack/html/index.html) +- `pack`: Manage [CMSIS Device Family Packs](https://open-cmsis-pack.github.io/Open-CMSIS-Pack-Spec/main/html/index.html) that provide additional target device support. - `commander`: Interactive REPL control and inspection of the MCU. - `server`: Share a debug probe with a TCP/IP server. @@ -56,8 +56,8 @@ The API and tools provide these features: - SWO and SWV - and more! -Configuration and customization is supported through [config files](docs/configuration.md), -[user scripts](docs/user_scripts.md), and the Python API. +Configuration and customization is supported through [config files](https://pyocd.io/docs/configuration), +[user scripts](https://pyocd.io/docs/user_scripts), and the Python API. Requirements @@ -68,7 +68,7 @@ Requirements - A recent version of [libusb](https://libusb.info/). See [libusb installation](#libusb-installation) for details. - Microcontroller with an Arm Cortex-M CPU - Supported debug probe - - [CMSIS-DAP](http://www.keil.com/pack/doc/CMSIS/DAP/html/index.html) v1 (HID) or v2 (WinUSB), including: + - [CMSIS-DAP](https://arm-software.github.io/CMSIS_5/DAP/html/index.html) v1 (HID) or v2 (WinUSB), including: - Atmel EDBG/nEDBG - Atmel-ICE - Cypress KitProg3 or MiniProg4 @@ -106,7 +106,7 @@ Installing **The full installation guide is available [in the documentation](https://pyocd.io/docs/installing).** For notes about installing and using on non-x86 systems such as Raspberry Pi, see the -[relevant documentation](https://pyocd.io/docs/installing-on-non-x86). +[relevant documentation](https://pyocd.io/docs/installing_on_non_x86). The latest stable version of pyOCD may be installed via [pip](https://pip.pypa.io/en/stable/index.html) as follows: @@ -156,7 +156,7 @@ instructions. ### Target support -See the [target support documentation](https://pyocd.io/docs/target-support) for information on how to check if +See the [target support documentation](https://pyocd.io/docs/target_support) for information on how to check if the MCU(s) you are using have built-in support, and how to install support for additional MCUs via CMSIS-Packs. @@ -164,7 +164,7 @@ CMSIS-Packs. Using GDB --------- -See the [GDB setup](https://pyocd.io/docs/gdb-setup) documentation for a guide for setting up +See the [GDB setup](https://pyocd.io/docs/gdb_setup) documentation for a guide for setting up and using pyocd with gdb and IDEs. @@ -194,9 +194,9 @@ discussions](https://github.com/pyocd/pyOCD/discussions) and we'll be happy to h an open issue. Any work on major changes should be discussed with the maintainers to make everyone is aligned. Please see the [contribution guidelines](https://github.com/pyocd/pyOCD/tree/main/CONTRIBUTING.md) for detailed requirements. The [developers' -Guide](https://pyocd.io/docs/developers-guide) has instructions on how to set up a development environment for pyOCD. +Guide](https://pyocd.io/docs/developers_guide) has instructions on how to set up a development environment for pyOCD. -New pull requests should be [created](https://github.com/pyocd/pyOCD/pull/new/develop) against the `develop` branch. +New pull requests should be [created](https://github.com/pyocd/pyOCD/pull/new) against the `develop` branch. (You have to change the "base" to `develop`.) License @@ -206,4 +206,4 @@ PyOCD is licensed with the permissive Apache 2.0 license. See the [LICENSE](https://github.com/pyocd/pyOCD/tree/main/LICENSE) file for the full text of the license. All documentation and the website are licensed with [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/). -Copyright © 2006-2021 PyOCD Authors +Copyright © 2006-2022 PyOCD Authors From 8dddd3b7b665aed49c442e65016bd591a370bb69 Mon Sep 17 00:00:00 2001 From: Matthias Wauer Date: Fri, 14 Jan 2022 21:43:55 +0100 Subject: [PATCH 113/123] lpc5500 family: re-unlock debug access after reset (#1306) Co-authored-by: Matthias Wauer --- pyocd/target/family/target_lpc5500.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pyocd/target/family/target_lpc5500.py b/pyocd/target/family/target_lpc5500.py index e2edeae82..3fa98c349 100644 --- a/pyocd/target/family/target_lpc5500.py +++ b/pyocd/target/family/target_lpc5500.py @@ -2,6 +2,7 @@ # Copyright (c) 2019-2020 Arm Limited # Copyright (C) 2020 Ted Tawara # Copyright (c) 2021 Chris Reed +# Copyright (c) 2021 Matthias Wauer # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -131,6 +132,10 @@ def check_locked_state(self, seq): # Perform the unlock procedure using the debugger mailbox. self.unlock(self.aps[DM_AP]) + # Finished, if not called from init sequence + if seq is None: + return + # re-run discovery LOG.info("re-running discovery") new_seq = CallSequence() @@ -348,3 +353,9 @@ def reset_and_halt(self, reset_type=None): # restore vector catch setting self.write_memory(CortexM.DEMCR, demcr) + + def reset(self, reset_type): + # unlock debug access after reset + super(CortexM_LPC5500, self).reset(reset_type) + + self.session.target.check_locked_state(None) From ec0a4952c775a49b31085cc97d1d959ae5bd4449 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 15 Jan 2022 16:46:49 -0600 Subject: [PATCH 114/123] readme: add 'rtt' subcommand to list; some rewording. --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3ec7c3dcf..97b452276 100644 --- a/README.md +++ b/README.md @@ -18,12 +18,11 @@ See the [wiki news page](https://github.com/pyocd/pyOCD/wiki/News) for all recen -pyOCD is an open source Python package for programming and debugging Arm Cortex-M microcontrollers -using multiple supported types of USB debug probes. It is fully cross-platform, with support for -Linux, macOS, Windows, and FreeBSD. +pyOCD is an open source Python based tool and package for programming and debugging Arm Cortex-M microcontrollers +with a wide range of debug probes. It is fully cross-platform, with support for Linux, macOS, Windows, and FreeBSD. A command line tool is provided that covers most use cases, or you can make use of the Python -API to enable low-level target control. A common use for the Python API is to run and control CI +API to facilitate custom target control. A common use for the Python API is to run and control CI tests. Support for more than 70 popular MCUs is built-in. In addition, through the use of CMSIS Device @@ -40,6 +39,7 @@ The `pyocd` command line tool gives you total control over your device with thes - `commander`: Interactive REPL control and inspection of the MCU. - `server`: Share a debug probe with a TCP/IP server. - `reset`: Hardware or software reset of a device. +- `rtt`: Stream Segger RTT IO with _any_ debug probe. - `list`: Show connected devices. The API and tools provide these features: From 6c1aa25e182dad4526f8a72db4ba340a306edfd6 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sat, 11 Dec 2021 16:49:20 -0600 Subject: [PATCH 115/123] docs: target family notes; semihosting fixes; - Initial version of target_family_notes. - Fix some errors in semihosting. - security: make warning text to alert. - Update readme TOC. - Various updates to debug_probes and add STLinkV3 frequencies table. --- docs/README.md | 38 ++++++++++++------------- docs/debug_probes.md | 54 +++++++++++++++++++++++++++++++----- docs/security.md | 4 ++- docs/semihosting.md | 8 ++++-- docs/target_family_notes.md | 55 +++++++++++++++++++++++++++++++++++++ 5 files changed, 128 insertions(+), 31 deletions(-) create mode 100644 docs/target_family_notes.md diff --git a/docs/README.md b/docs/README.md index 3afbc6ca1..e0659a12d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,49 +1,47 @@ ---- -title: Contents ---- ## Table of Contents -### User documentation +### Getting started + +- [Installing](installing.md) +- [Installing on non-x86](installing_on_non_x86.md) +- [GDB setup](gdb_setup.md) -#### Topics +### User documentation - [Terminology](terminology.md) - [Target support](target_support.md) +- [Debug probes](debug_probes.md) - [Configuration](configuration.md) - [User scripts](user_scripts.md) - [Remote probe access](remote_probe_access.md) - [Configuring logging](configuring_logging.md) -- [Debugging multicore devices](multicore_debug.md) - [Target security features](security.md) +- [Debugging multicore devices](multicore_debug.md) +- [Semihosting](semihosting.md) +- [SWO/SWV](swo_swv.md) +- [Target family usage notes](target_notes.md) -#### Reference +### Reference +- [Built-in targets](builtin-targets.md) - [Session options reference](options.md) - [Command reference](command_reference.md) +- [Environment variables](env_vars.md) -#### Python API +### Python API - [Introduction to the pyOCD Python API](python_api.md) - [Python API examples](api_examples.md) -#### Miscellaneous - -- [Installing on non-x86 platforms](installing_on_non_x86.md) - ### Developer documentation -#### How-tos - - [Developers’ guide](developers_guide.md) - [How to add new targets](adding_new_targets.md) -- [Building a standalone gdb server executable](how_to_build.md) - [Running the automated tests](automated_tests.md) - -#### Architecture and Design - -- [Architecture overview](architecture.md) -- [Remote probe protocol](remote_probe_protocol.md) +- Internal design + - [Architecture overview](architecture.md) + - [Remote probe protocol](remote_probe_protocol.md) diff --git a/docs/debug_probes.md b/docs/debug_probes.md index 8472ea71b..50c594bc8 100644 --- a/docs/debug_probes.md +++ b/docs/debug_probes.md @@ -103,14 +103,18 @@ This section contains notes on the use of different types of debug probes and th ### CMSIS-DAP +CMSIS-DAP is a debug probe protocol designed by Arm and released as open source as part of the CMSIS project. There are two major versions of CMSIS-DAP, which use different USB classes: -- v1: USB HID. This version is slower than v2. Still the most common version. -- v2: USB vendor-specific using bulk pipes. Higher performance than v1. WinUSB-enabled to allow driverless usage on Windows 8 and above. Can be used with Windows 7 only if a driver is installed with a tool such as Zadig. +- v1: USB HID. This version is slower than v2. Still the most commonly seen version, although it is now deprecated by + Arm. +- v2: USB vendor-specific using bulk pipes, permitting higher performance than v1. WinUSB-enabled to allow driverless + usage on Windows 8 and above. (Can be used with Windows 7 if device installation settings are set to automatically + download and install drivers for new devices from the Internet.) -These are several commercial probes using the CMSIS-DAP protocol: +These are some of the commercial probes by silicon vendors using the CMSIS-DAP protocol, both standalone and on-board: -- Microchip EDBG/nEDBG +- Microchip EDBG and variants - Microchip Atmel-ICE - Cypress KitProg3 - Cypress MiniProg4 @@ -120,11 +124,13 @@ These are several commercial probes using the CMSIS-DAP protocol: - NXP MCU-Link Pro - NXP OpenSDA -In addition, there are numerous other commercial and open source debug probes based on CMSIS-DAP. +In addition, there are numerous other commercial and open source debug probes utilising the CMSIS-DAP protocol. -PyOCD supports automatic target type identification for debug probes built with the +PyOCD supports automatic target type identification for debug probes built with the widely used [DAPLink](https://github.com/ARMmbed/DAPLink) firmware. +[DAPLink firmware updates](https://daplink.io/) + #### Session options - `cmsis_dap.deferred_transfers` (bool, default True) Whether to use deferred transfers in the CMSIS-DAP probe backend. @@ -132,6 +138,11 @@ PyOCD supports automatic target type identification for debug probes built with - `cmsis_dap.limit_packets` (bool, default False) Restrict CMSIS-DAP backend to using a single in-flight command at a time. This is useful on some systems where USB is problematic, in particular virtual machines. +#### Microchip EDBG + +The Microchip (previously Atmel) EDBG probe firmware, at the time of this writing, provides a CMSIS-DAP v1 interface. +On macOS, reading command responses always times out. The probe works on other OSes, however. + ### STLink @@ -140,7 +151,7 @@ Recent STLink firmware versions will only allow access to STM32 targets. If you from a silicon vendor other than ST Micro, please use a different debug probe. -No host resident drivers need to be installed to use STLink probes; only libusb is required. (This may not be true for Windows 7, but has not been verified.) +No host resident drivers need to be installed to use STLink probes; only libusb is required. The minimum supported STLink firmware version is V2J24, or any V3 version. However, upgrading to the latest version is strongly recommended. Numerous bugs have been fixed, and new commands added for feature and performance improvements. @@ -154,6 +165,30 @@ is strongly recommended. Numerous bugs have been fixed, and new commands added f PyOCD supports automatic target type identification for on-board STLink probes that report a board ID. +#### STLinkV3 SWD/JTAG frequencies + +The STLinkV3 has an internal clock frequency control for its HCLK prescaler that allows access to different SWD/JTAG +frequencies. The prescaler can be set from pyOCD with the `stlink.v3_prescaler` session option to 1, 2, or 4. In +addition to changing the available SWD/JTAG frequencies, modifying the prescaler also affects UART baud rates and +frequencies of the serial I/O bridge interfaces. + +These are the SWD/JTAG frequencies available with different values of `stlink.v3_prescaler`: + + prescaler=1 (default) | prescaler=2 | prescaler=4 +-----------------------|-------------|---------- + 24.0 MHz | 12.0 MHz | 6.0 MHz + 8.0 MHz | 4.0 MHz | 2.0 MHz + 3.3 MHz | 1.6 MHz | 850 kHz + 1.0 MHz | 1.0 MHz | 520 kHz + 200 kHz | 200 kHz | 200 kHz + 50 kHz | 50 kHz | 50 kHz + +#### Session options + +- `stlink.v3_prescaler` (int, must be 1, 2, or 4, default 1) + Configures the HCLK prescaler of an STLinkV3 to modify the range of available SWD/JTAG frequencies, as described + above. Affects available frequencies of other peripherals, such as UART, as well. + ### J-Link @@ -169,6 +204,11 @@ software directly (or compared to CMSIS-DAP). This is because pyOCD uses the low which are inherently slower than higher level commands (which are less flexible and more difficult and complex to integrate). +#### Serial numbers + +The USB serial number for J-Link probes will have leading zeroes. However, the J-Link driver and applications do not +use leading zeroes. PyOCD also does not use leading zeroes, as it interfaces with the J-Link through its driver. + #### Session options - `jlink.device` (str, no default) diff --git a/docs/security.md b/docs/security.md index c45fc8c87..914d34f44 100644 --- a/docs/security.md +++ b/docs/security.md @@ -47,7 +47,9 @@ Disabling the security features on supported targets is very straight-forward. I performing a mass erase of all device memory. For those pyOCD targets with support for security features, pyOCD can perform this unlock procedure for you. -***WARNING:** Unlocking a locked device will erase all data on the chip!* +
+Unlocking a locked device will erase all data on the chip! +
You can add the option `auto_unlock` to your [configuration]({% link _docs/configuration.md %}): diff --git a/docs/semihosting.md b/docs/semihosting.md index 3c97118cc..7e6db0613 100644 --- a/docs/semihosting.md +++ b/docs/semihosting.md @@ -78,12 +78,14 @@ Defaults are for console to be routed to telnet and syscalls handled by gdb. ### Building into firmware -The blog articles listed earlier have details of how to include semihosting support when linking firmware with gcc -and newlib. These are the steps in short: +These are the steps, in short, for how to include semihosting support when linking firmware with gcc +and newlib. - On the linker command line, add `--specs=rdimon.specs` and ensure `-nostartfiles` is not present. - Call `initialise_monitor_handles()` from your firmware before using semihosting. +The C runtime stdio will now be connected with semihosting. + This small example shows how to init and use semihosting from gcc. ```c @@ -100,7 +102,7 @@ void main(void) { } ``` -These blog articles are also very helpful for gcc: +These blog articles are also very helpful guides for using semihosting with gcc: - [Introduction to ARM Semihosting](https://interrupt.memfault.com/blog/arm-semihosting) is an excellent overall introduction, including how to link in semihosting support for gcc and newlib-based projects. diff --git a/docs/target_family_notes.md b/docs/target_family_notes.md new file mode 100644 index 000000000..711169fcc --- /dev/null +++ b/docs/target_family_notes.md @@ -0,0 +1,55 @@ +--- +title: Target family usage notes +--- + +This section documents usage notes for certain target types. + + +## Nordic Semiconductor + +### nRF51 family + +See [SoftDevice](#softdevice) for notes about handling firmware containing SoftDevice images. + +### nRF52 family + +See [SoftDevice](#softdevice) for notes about handling firmware containing SoftDevice images. + +Unlocking of flash security, also called APPROTECT, is supported. For more, see the +[security and protection]({% link _docs/security.md %}) documentation. + +### SoftDevice + +The nRF51 and nRF52 series have support for so-called “SoftDevice” firmware, which implements Nordic's Bluetooth LE or +other wireless protocol API. When firmware containing a SoftDevice is loaded, the SoftDevice region of flash is locked. +In order to reprogram the flash sectors containing the SoftDevice image, a mass erase must first be performed. This can +potentially cause issues with flash programming if one is not aware of this requirement. + +For a development workflow with firmware using a SoftDevice, no extra steps are required. + +PyOCD will by default scan flash sectors when programming flash in order to only erase and program sectors whose +contents are changing. Since normally the SoftDevice sectors do not change during development, pyOCD will skip over +these sectors. + +In addition, a chip erased performed with a SoftDevice in flash will erase only the non-SoftDevice sectors. For example, +running `pyocd erase --chip` on such a device will leave the SoftDevice intact and erase all other sectors. + +However, any case where the SoftDevice sectors are being erased requires a prior mass erase. This includes +changing the SoftDevice variant or version, as well as switching to firmware that doesn't include a SoftDevice. +Mass erase is a separate operation. It mostly functions like a chip erase, but can also be used to +[unlock]({% link _docs/security.md %}) devices that have APPROTECT enabled. + +To perform a mass erase: + +``` +pyocd erase --mass +``` + + +## NXP + +### Kinetis family + +Unlocking of flash security is supported for all Kinetis targets. For more, see the +[security and protection]({% link _docs/security.md %}) documentation. + From 65fc968b0084210306011ccbde81117336703aa1 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 17 Jan 2022 18:45:08 +0000 Subject: [PATCH 116/123] cmsis-dap: update list of known CMSIS-DAP probes and udev rules. (#1309) Add all documented Microchip EDBG variants. Clean up definitions a bit. --- pyocd/probe/pydapaccess/interface/common.py | 48 ++++++++++++--------- udev/50-cmsis-dap.rules | 20 +++++++-- 2 files changed, 44 insertions(+), 24 deletions(-) diff --git a/pyocd/probe/pydapaccess/interface/common.py b/pyocd/probe/pydapaccess/interface/common.py index b121eac2d..f4da9ec84 100644 --- a/pyocd/probe/pydapaccess/interface/common.py +++ b/pyocd/probe/pydapaccess/interface/common.py @@ -40,30 +40,38 @@ VidPidPair = Tuple[int, int] -# Known USB VID/PID pairs. -ARM_DAPLINK_ID: VidPidPair = (0x0d28, 0x0204) # Arm DAPLink firmware -ATMEL_ICE_ID: VidPidPair = (0x03eb, 0x2141) # Atmel-ICE -CYPRESS_KITPROG1_2_ID: VidPidPair = (0x04b4, 0xf138) # Cypress KitProg1, KitProg2 in CMSIS-DAP mode -CYPRESS_MINIPROG4_BULK_ID: VidPidPair = (0x04b4, 0xf151) # Cypress MiniProg4 bulk -CYPRESS_MINIPROG4_HID_ID: VidPidPair = (0x04b4, 0xf152) # Cypress MiniProg4 HID -CYPRESS_KITPROG3_HID_ID: VidPidPair = (0x04b4, 0xf154) # Cypress KitProg3 HID -CYPRESS_KITPROG3_BULKD_ID: VidPidPair = (0x04b4, 0xf155) # Cypress KitProg3 bulk -CYPRESS_KITPROG3_BULK_2_UART_ID: VidPidPair = (0x04b4, 0xf166) # Cypress KitProg3 bulk with 2x UART -KEIL_ULINKPLUS_ID: VidPidPair = (0xc251, 0x2750) # Keil ULINKplus -NXP_LPCLINK2_ID: VidPidPair = (0x1fc9, 0x0090) # NXP LPC-LinkII -NXP_MCULINK_ID: VidPidPair = (0x1fc9, 0x0143) # NXP MCU-Link +# USB vendor IDs. +ARM_VID = 0x0d28 +ATMEL_VID = 0x03eb +CYPRESS_VID = 0x04b4 +KEIL_VID = 0xc251 +NXP_VID = 0x1fc9 + +# USB VID/PID pairs. +ARM_DAPLINK_ID: VidPidPair = (ARM_VID, 0x0204) # Arm DAPLink firmware +NXP_LPCLINK2_ID: VidPidPair = (NXP_VID, 0x0090) # NXP LPC-LinkII +NXP_MCULINK_ID: VidPidPair = (NXP_VID, 0x0143) # NXP MCU-Link ## List of VID/PID pairs for known CMSIS-DAP USB devices. +# +# Microchip IDs from https://ww1.microchip.com/downloads/en/DeviceDoc/50002630A.pdf. KNOWN_CMSIS_DAP_IDS: List[VidPidPair] = [ ARM_DAPLINK_ID, - ATMEL_ICE_ID, - CYPRESS_KITPROG1_2_ID, - CYPRESS_MINIPROG4_BULK_ID, - CYPRESS_MINIPROG4_HID_ID, - CYPRESS_KITPROG3_HID_ID, - CYPRESS_KITPROG3_BULKD_ID, - CYPRESS_KITPROG3_BULK_2_UART_ID, - KEIL_ULINKPLUS_ID, + (ATMEL_VID, 0x2111), # Microchip EDBG + (ATMEL_VID, 0x2140), # Microchip JTAGICE3 (firmware version 3 or later) + (ATMEL_VID, 0x2141), # Microchip Atmel-ICE + (ATMEL_VID, 0x2144), # Microchip Power Debugger + (ATMEL_VID, 0x2145), # Microchip mEDBG + (ATMEL_VID, 0x216c), # Microchip EDBGC + (ATMEL_VID, 0x2175), # Microchip nEDBG + (CYPRESS_VID, 0xf138), # Cypress KitProg1, KitProg2 in CMSIS-DAP mode + (CYPRESS_VID, 0xf148), # Cypress KitProg1, KitProg2 in CMSIS-DAP mode + (CYPRESS_VID, 0xf151), # Cypress MiniProg4 bulk + (CYPRESS_VID, 0xf152), # Cypress MiniProg4 HID + (CYPRESS_VID, 0xf154), # Cypress KitProg3 HID + (CYPRESS_VID, 0xf155), # Cypress KitProg3 bulk + (CYPRESS_VID, 0xf166), # Cypress KitProg3 bulk with 2x UART + (KEIL_VID, 0x2750), # Keil ULINKplus NXP_LPCLINK2_ID, NXP_MCULINK_ID, ] diff --git a/udev/50-cmsis-dap.rules b/udev/50-cmsis-dap.rules index 9e32cb7b9..74976fcd0 100644 --- a/udev/50-cmsis-dap.rules +++ b/udev/50-cmsis-dap.rules @@ -31,13 +31,25 @@ SUBSYSTEM=="usb", ATTR{idVendor}=="1fc9", ATTR{idProduct}=="0090", MODE:="666" # 1fc9:0143 NXP MCU-Link SUBSYSTEM=="usb", ATTR{idVendor}=="1fc9", ATTR{idProduct}=="0143", MODE:="666" -# 03eb:2141 Atmel-ICE CMSIS-DAP +# 03eb:2111 Microchip EDBG CMSIS-DAP +SUBSYSTEM=="usb", ATTR{idVendor}=="03eb", ATTR{idProduct}=="2111", MODE:="666" + +# 03eb:2140 Microchip JTAGICE3 CMSIS-DAP (with firmware version 3 or later) +SUBSYSTEM=="usb", ATTR{idVendor}=="03eb", ATTR{idProduct}=="2140", MODE:="666" + +# 03eb:2141 Microchip Atmel-ICE CMSIS-DAP SUBSYSTEM=="usb", ATTR{idVendor}=="03eb", ATTR{idProduct}=="2141", MODE:="666" -# 03eb:2111 Microchip/Atmel EDBG CMSIS-DAP -SUBSYSTEM=="usb", ATTR{idVendor}=="03eb", ATTR{idProduct}=="2111", MODE:="666" +# 03eb:2144 Microchip Power Debugger CMSIS-DAP +SUBSYSTEM=="usb", ATTR{idVendor}=="03eb", ATTR{idProduct}=="2144", MODE:="666" + +# 03eb:2145 Microchip mEDBG CMSIS-DAP +SUBSYSTEM=="usb", ATTR{idVendor}=="03eb", ATTR{idProduct}=="2145", MODE:="666" + +# 03eb:216c Microchip EDBGC CMSIS-DAP +SUBSYSTEM=="usb", ATTR{idVendor}=="03eb", ATTR{idProduct}=="216c", MODE:="666" -# 03eb:2175 Microchip/Atmel nEDBG CMSIS-DAP +# 03eb:2175 Microchip nEDBG CMSIS-DAP SUBSYSTEM=="usb", ATTR{idVendor}=="03eb", ATTR{idProduct}=="2175", MODE:="666" # If you share your linux system with other users, or just don't like the From 17ab710bab77bcdf9d87a11534c43bbf3f08fedd Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Mon, 17 Jan 2022 23:03:40 +0000 Subject: [PATCH 117/123] gdbserver: fix exit issues; cleanup related code (#1308) * gdbserver: cleanup and fix connection disconnect code. - Major cleanup of shutdown and detach related code in the listen and connection loops. The detach event is checked only by _run_connection(). The list of "detach commands" is replaced by detach() and kill() command handlers setting detach_event themselves. - Cleanup of a connection and the server is done at the end of the relevant method. Handling of server persisting is done at the end of _run_connection(). - The detach command is basically ignored if it is an extended-remote connection, per the RSP spec. Kill will still close the connection, though. - One changed behaviour is that server persisting is correctly honoured if gdb closes its client connection without sending a command. - A few other changes to clean up the code. * test: gdb_test: kill gdbserver if it fails to exit. * gdbserver: 'exit' monitor command. * gdbserver: packet_io: handle connection aborted and reset errors. Add exception handlers for ConnectionAbortedError and ConnectionResetError around the GDBServerPacketIOThread send and receive operations. These log a warning and close the connection. --- pyocd/gdbserver/gdbserver.py | 114 ++++++++++++-------------- pyocd/gdbserver/gdbserver_commands.py | 26 +++++- pyocd/gdbserver/packet_io.py | 27 +++--- test/gdb_test.py | 23 +++++- 4 files changed, 115 insertions(+), 75 deletions(-) diff --git a/pyocd/gdbserver/gdbserver.py b/pyocd/gdbserver/gdbserver.py index 7d3b58cae..500176e20 100644 --- a/pyocd/gdbserver/gdbserver.py +++ b/pyocd/gdbserver/gdbserver.py @@ -1,6 +1,6 @@ # pyOCD debugger # Copyright (c) 2006-2020 Arm Limited -# Copyright (c) 2021 Chris Reed +# Copyright (c) 2021-2022 Chris Reed # Copyright (c) 2022 Clay McClure # SPDX-License-Identifier: Apache-2.0 # @@ -112,7 +112,7 @@ class GDBServer(threading.Thread): START_LISTENING_NOTIFY_DELAY = 0.03 # 30 ms def __init__(self, session, core=None): - super(GDBServer, self).__init__() + super().__init__() self.session = session self.board = session.board if core is None: @@ -122,7 +122,6 @@ def __init__(self, session, core=None): self.core = core self.target = self.board.target.cores[core] self.name = "gdb-server-core%d" % self.core - self.abstract_socket = None self.port = session.options.get('gdbserver_port') if self.port != 0: @@ -259,9 +258,6 @@ def __init__(self, session, core=None): b'Z' : (self.breakpoint, 1 ), # Remove breakpoint/watchpoint. } - # Commands that kill the connection to gdb. - self.DETACH_COMMANDS = (b'D', b'k') - # pylint: enable=invalid-name self.setDaemon(True) @@ -279,12 +275,15 @@ def _init_remote_commands(self): # Add the gdbserver command group. self._command_context.command_set.add_command_group('gdbserver') - def stop(self): + def stop(self, wait=True): if self.is_alive(): self.shutdown_event.set() - while self.is_alive(): - pass - LOG.info("GDB server thread killed") + if wait: + LOG.debug("gdbserver shutdown event set; waiting for exit") + self.join() + else: + LOG.debug("gdbserver shutdown event set") + LOG.info("GDB server thread stopped") def _cleanup(self): LOG.debug("GDB server cleaning up") @@ -311,11 +310,8 @@ def _cleanup_for_next_connection(self): def run(self): LOG.info('GDB server started on port %d (core %d)', self.port, self.core) - while True: + while not self.shutdown_event.is_set(): try: - self.shutdown_event.clear() - self.detach_event.clear() - # Notify listeners that the server is running after a short delay. # # This timer prevents a race condition where the notification is sent before the server is @@ -324,18 +320,14 @@ def run(self): args=(self.GDBSERVER_START_LISTENING_EVENT, self)) notify_timer.start() - while not self.shutdown_event.is_set() and not self.detach_event.is_set(): + while not self.shutdown_event.is_set(): connected = self.abstract_socket.connect() if connected != None: self.packet_io = GDBServerPacketIOThread(self.abstract_socket) break if self.shutdown_event.is_set(): - self._cleanup() - return - - if self.detach_event.is_set(): - continue + break # Make sure the target is halted. Otherwise gdb gets easily confused. self.target.halt() @@ -343,28 +335,27 @@ def run(self): LOG.info("Client connected to port %d!", self.port) self._run_connection() LOG.info("Client disconnected from port %d!", self.port) - self._cleanup_for_next_connection() except Exception as e: LOG.error("Unexpected exception: %s", e, exc_info=self.session.log_tracebacks) + LOG.debug("gdbserver thread exiting") + self._cleanup() + def _run_connection(self): - while True: - try: - if self.shutdown_event.is_set(): - self._cleanup() - return + assert self.packet_io - if self.detach_event.is_set(): - break + self.detach_event.clear() + while not (self.detach_event.is_set() or self.shutdown_event.is_set()): + try: if self.packet_io.interrupt_event.is_set(): if self.non_stop: self.target.halt() self.is_target_running = False self.send_stop_notification() else: - LOG.error("Got unexpected ctrl-c, ignoring") + LOG.warning("Got unexpected ctrl-c, ignoring") self.packet_io.interrupt_event.clear() if self.non_stop and self.is_target_running: @@ -380,13 +371,10 @@ def _run_connection(self): try: packet = self.packet_io.receive(block=not self.non_stop) except ConnectionClosedException: + LOG.debug("gdbserver connection loop exiting; client closed connection") break if self.shutdown_event.is_set(): - self._cleanup() - return - - if self.detach_event.is_set(): break if self.non_stop and packet is None: @@ -395,26 +383,29 @@ def _run_connection(self): if packet is not None and len(packet) != 0: # decode and prepare resp - resp, detach = self.handle_message(packet) + resp = self.handle_message(packet) if resp is not None: # send resp self.packet_io.send(resp) - if detach: - self.abstract_socket.close() - self.packet_io.stop() - self.packet_io = None - if self.persist: - self._cleanup_for_next_connection() - break - else: - self.shutdown_event.set() - return - except Exception as e: LOG.error("Unexpected exception: %s", e, exc_info=self.session.log_tracebacks) + LOG.debug("gdbserver exiting connection loop") + + # Clean up the connection. + self.abstract_socket.close() + self.packet_io.stop() + self.packet_io = None + + # If persisting is not enabled, we exit on detach. Otherwise prepare for a new connection. + if self.persist: + LOG.debug("preparing for next connection") + self._cleanup_for_next_connection() + else: + self.shutdown_event.set() + def handle_message(self, msg): try: assert msg[0:1] == b'$', "invalid first char of message != $" @@ -425,36 +416,37 @@ def handle_message(self, msg): LOG.error("Unknown RSP packet: %s", msg) return self.create_rsp_packet(b""), 0 - self.lock.acquire() - if msgStart == 0: - reply = handler() - else: - reply = handler(msg[msgStart:]) - self.lock.release() + with self.lock: + if msgStart == 0: + reply = handler() + else: + reply = handler(msg[msgStart:]) - detach = msg[1:2] in self.DETACH_COMMANDS - return reply, detach + return reply except Exception as e: - self.lock.release() - LOG.error("Unhandled exception in handle_message: %s", e, exc_info=self.session.log_tracebacks) + LOG.error("Unhandled exception in handle_message (%s): %s", + msg[1:2], e, exc_info=self.session.log_tracebacks) return self.create_rsp_packet(b"E01"), 0 def extended_remote(self): + LOG.debug("extended remote enabled") self._is_extended_remote = True return self.create_rsp_packet(b"OK") def detach(self, data): LOG.info("Client detached") - resp = b"OK" - return self.create_rsp_packet(resp) + # In extended-remote mode, detach should detach from the program but not close the connection. gdb assumes + # the server connection is still valid. Detaching from the program doesn't really make sense for embedded + # targets, so just ignore the detach. + if not self._is_extended_remote: + self.detach_event.set() + return self.create_rsp_packet(b"OK") def kill(self): LOG.debug("GDB kill") - # Keep target halted and leave vector catches if in persistent mode. - if not self.persist: - self.board.target.set_vector_catch(Target.VectorCatch.NONE) - self.board.target.resume() + self.detach_event.set() + # No packet is returned from the 'k' command. def restart(self, data): self.target.reset_and_halt() diff --git a/pyocd/gdbserver/gdbserver_commands.py b/pyocd/gdbserver/gdbserver_commands.py index 636ef85e7..e5dde197b 100644 --- a/pyocd/gdbserver/gdbserver_commands.py +++ b/pyocd/gdbserver/gdbserver_commands.py @@ -95,9 +95,33 @@ class GdbserverMonitorInitCommand(CommandBase): 'group': 'gdbserver', 'category': 'openocd_compatibility', 'nargs': 2, - 'usage': "init", + 'usage': "", 'help': "Ignored; for OpenOCD compatibility.", } def execute(self): pass + +class GdbserverMonitorExitCommand(CommandBase): + """@brief 'exit' command to cleanly shut down the gdbserver from an IDE. + + This command is primarily intended to be used by an IDE to tell the pyocd process to exit when + the debug session is terminated. + """ + INFO = { + 'names': ['exit'], + 'group': 'gdbserver', + 'category': 'gdbserver', + 'nargs': 0, + 'usage': "", + 'help': "Terminate running gdbservers in this session.", + 'extra_help': + "For the pyocd gdbserver subcommand, terminating gdbservers will cause the process to exit. The " + "effect when the gdbserver(s) are running in a different environment depends on that program. " + "Note that gdb will still believe the connection to be valid after this command completes, so " + "executing the 'disconnect' command is a necessity." + } + + def execute(self): + for server in self.context.session.gdbservers.values(): + server.stop(wait=False) diff --git a/pyocd/gdbserver/packet_io.py b/pyocd/gdbserver/packet_io.py index 9c47670a2..edbfab10f 100644 --- a/pyocd/gdbserver/packet_io.py +++ b/pyocd/gdbserver/packet_io.py @@ -1,6 +1,6 @@ # pyOCD debugger # Copyright (c) 2006-2019 Arm Limited -# Copyright (c) 2021 Chris Reed +# Copyright (c) 2021-2022 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,6 @@ import logging import threading -import socket import queue CTRL_C = b'\x03' @@ -116,8 +115,12 @@ def run(self): TRACE_PACKETS.debug('-->>>> GDB read %d bytes: %s', len(data), data) self._buffer += data - except socket.error: - pass + except (ConnectionAbortedError, ConnectionResetError) as err: + LOG.warning("GDB packet thread: connection unexpectedly closed during receive (%s)", err) + self._closed = True + break + except OSError as err: + LOG.debug("Error in packet IO thread: %s", err) if self._shutdown_event.is_set(): break @@ -130,12 +133,16 @@ def _write_packet(self, packet): TRACE_PACKETS.debug('--<<<< GDB send %d bytes: %s', len(packet), packet) # Make sure the entire packet is sent. - remaining = len(packet) - while remaining: - written = self._abstract_socket.write(packet) - remaining -= written - if remaining: - packet = packet[written:] + try: + remaining = len(packet) + while remaining: + written = self._abstract_socket.write(packet) + remaining -= written + if remaining: + packet = packet[written:] + except (ConnectionAbortedError, ConnectionResetError) as err: + LOG.warning("GDB packet thread: connection unexpectedly closed during send (%s)", err) + self._closed = True if self.send_acks: self._expecting_ack = True diff --git a/test/gdb_test.py b/test/gdb_test.py index ea1dbdf9b..19357e736 100644 --- a/test/gdb_test.py +++ b/test/gdb_test.py @@ -1,5 +1,6 @@ # pyOCD debugger # Copyright (c) 2015-2020 Arm Limited +# Copyright (c) 2022 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,6 +40,7 @@ from pyocd.__main__ import PyOCDTool from pyocd.core.helpers import ConnectHelper from pyocd.core.memory_map import MemoryType +from pyocd.core.session import Session from pyocd.flash.file_programmer import FileProgrammer from test_util import ( Test, @@ -166,14 +168,29 @@ def test_gdb(board_id=None, n=0): server_thread = threading.Thread(target=server.run, args=[server_args]) server_thread.daemon = True server_thread.start() + LOG.info('Waiting for gdb to finish...') did_complete = wait_with_deadline(gdb_program, TEST_TIMEOUT_SECONDS) - LOG.info('Waiting for server to finish...') - server_thread.join(timeout=SERVER_EXIT_TIMEOUT) if not did_complete: LOG.error("Test timed out!") + + LOG.info('Waiting for server to finish...') + server_thread.join(timeout=SERVER_EXIT_TIMEOUT) if server_thread.is_alive(): - LOG.error('Server is still running!') + LOG.error('Server is still running! Stopping now... and failing test') + did_complete = False + session = Session.get_current() + LOG.info(f"gdbserver session: {session}") + LOG.info(f"gdbservers: {session.gdbservers}") + for g in session.gdbservers.values(): + g.stop() + + # Wait again for server thread to complete now that the gdbservers are stopped. + server_thread.join(timeout=SERVER_EXIT_TIMEOUT) + if server_thread.is_alive(): + # The server thread is _still_ alive. Not much we can do at this point. Any tests run + # past this point will likely fail. + LOG.error("Server thread is still alive after stopping gdbservers!") try: with open(gdb_output_filename, 'r') as f: From 66d7d48a8e5db473b4f5bdc1fd1dad5025a40fe2 Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 18 Jan 2022 18:57:10 +0000 Subject: [PATCH 118/123] cleanup: remove leading "!" in doc comments. (#1310) This was for doxygen. Not needed anymore with the PYTHON_DOCSTRING config setting. --- pyocd/__main__.py | 14 +-- pyocd/board/board.py | 6 +- pyocd/board/mbed_board.py | 8 +- pyocd/cache/memory.py | 14 +-- pyocd/cache/metrics.py | 2 +- pyocd/cache/register.py | 4 +- pyocd/commands/base.py | 26 ++--- pyocd/commands/commander.py | 12 +-- pyocd/commands/execution_context.py | 42 ++++---- pyocd/commands/repl.py | 8 +- pyocd/core/core_registers.py | 50 ++++----- pyocd/core/exceptions.py | 32 +++--- pyocd/core/helpers.py | 12 +-- pyocd/core/memory_interface.py | 30 +++--- pyocd/core/memory_map.py | 68 ++++++------ pyocd/core/options.py | 2 +- pyocd/core/options_manager.py | 28 ++--- pyocd/core/plugin.py | 16 +-- pyocd/core/session.py | 48 ++++----- pyocd/core/soc_target.py | 4 +- pyocd/core/target.py | 18 ++-- pyocd/core/target_delegate.py | 38 +++---- pyocd/coresight/ap.py | 100 +++++++++--------- pyocd/coresight/component.py | 8 +- pyocd/coresight/component_ids.py | 2 +- pyocd/coresight/core_ids.py | 4 +- pyocd/coresight/coresight_target.py | 20 ++-- pyocd/coresight/cortex_m.py | 86 +++++++-------- pyocd/coresight/cortex_m_core_registers.py | 18 ++-- pyocd/coresight/cortex_m_v8m.py | 12 +-- pyocd/coresight/dap.py | 66 ++++++------ pyocd/coresight/discovery.py | 28 ++--- pyocd/coresight/dwt.py | 12 +-- pyocd/coresight/fpb.py | 10 +- pyocd/coresight/generic_mem_ap.py | 2 +- pyocd/coresight/gpr.py | 10 +- pyocd/coresight/itm.py | 2 +- pyocd/coresight/rom_table.py | 42 ++++---- pyocd/coresight/sdc600.py | 52 ++++----- pyocd/coresight/tpiu.py | 10 +- pyocd/debug/breakpoints/manager.py | 12 +-- pyocd/debug/cache.py | 2 +- pyocd/debug/context.py | 16 +-- pyocd/debug/elf/elf.py | 16 +-- pyocd/debug/elf/elf_reader.py | 2 +- pyocd/debug/elf/symbols.py | 2 +- pyocd/debug/semihost.py | 14 +-- pyocd/debug/svd/loader.py | 2 +- pyocd/debug/svd/model.py | 12 +-- pyocd/debug/svd/parser.py | 6 +- pyocd/debug/symbols.py | 2 +- pyocd/flash/builder.py | 48 ++++----- pyocd/flash/eraser.py | 6 +- pyocd/flash/file_programmer.py | 13 ++- pyocd/flash/flash.py | 47 +++----- pyocd/flash/loader.py | 12 +-- pyocd/gdbserver/context_facade.py | 18 ++-- pyocd/gdbserver/gdbserver.py | 14 +-- pyocd/gdbserver/packet_io.py | 6 +- pyocd/gdbserver/symbols.py | 2 +- pyocd/gdbserver/syscall.py | 2 +- pyocd/probe/aggregator.py | 4 +- pyocd/probe/cmsis_dap_probe.py | 8 +- pyocd/probe/common.py | 4 +- pyocd/probe/debug_probe.py | 80 +++++++------- pyocd/probe/jlink_probe.py | 14 +-- pyocd/probe/picoprobe.py | 36 +++---- pyocd/probe/pydapaccess/cmsis_dap_core.py | 10 +- pyocd/probe/pydapaccess/dap_access_api.py | 88 +++++++-------- .../probe/pydapaccess/dap_access_cmsis_dap.py | 64 +++++------ pyocd/probe/shared_probe_proxy.py | 6 +- pyocd/probe/stlink/detect/base.py | 8 +- pyocd/probe/stlink/detect/factory.py | 2 +- pyocd/probe/stlink/detect/linux.py | 8 +- pyocd/probe/stlink/detect/windows.py | 12 +-- pyocd/probe/stlink/stlink.py | 10 +- pyocd/probe/stlink/usb.py | 2 +- pyocd/probe/stlink_probe.py | 10 +- pyocd/probe/swj.py | 28 ++--- pyocd/probe/tcp_client_probe.py | 14 +-- pyocd/probe/tcp_probe_server.py | 19 ++-- pyocd/rtos/argon.py | 12 +-- pyocd/rtos/common.py | 4 +- pyocd/rtos/freertos.py | 8 +- pyocd/rtos/provider.py | 11 +- pyocd/rtos/rtx5.py | 8 +- pyocd/rtos/threadx.py | 8 +- pyocd/rtos/zephyr.py | 8 +- pyocd/subcommands/base.py | 20 ++-- pyocd/subcommands/commander_cmd.py | 6 +- pyocd/subcommands/erase_cmd.py | 6 +- pyocd/subcommands/gdbserver_cmd.py | 12 +-- pyocd/subcommands/json_cmd.py | 8 +- pyocd/subcommands/list_cmd.py | 6 +- pyocd/subcommands/load_cmd.py | 6 +- pyocd/subcommands/pack_cmd.py | 40 +++---- pyocd/subcommands/reset_cmd.py | 6 +- pyocd/subcommands/rtt_cmd.py | 10 +- pyocd/subcommands/server_cmd.py | 6 +- pyocd/target/builtin/target_MKL28Z512xxx7.py | 7 +- pyocd/target/builtin/target_RP2040.py | 10 +- pyocd/target/family/flash_kinetis.py | 4 +- pyocd/target/family/target_kinetis.py | 10 +- pyocd/target/family/target_lpc5500.py | 6 +- pyocd/target/family/target_nRF52.py | 2 +- pyocd/target/pack/cmsis_pack.py | 62 +++++------ pyocd/target/pack/flash_algo.py | 19 ++-- pyocd/target/pack/pack_target.py | 26 ++--- pyocd/tools/gdb_server.py | 2 +- pyocd/tools/lists.py | 10 +- pyocd/trace/events.py | 16 +-- pyocd/trace/sink.py | 18 ++-- pyocd/trace/swo.py | 16 +-- pyocd/trace/swv.py | 18 ++-- pyocd/utility/autoflush.py | 4 +- pyocd/utility/cmdline.py | 16 +-- pyocd/utility/columns.py | 10 +- pyocd/utility/concurrency.py | 2 +- pyocd/utility/conversion.py | 46 ++++---- pyocd/utility/graph.py | 28 ++--- pyocd/utility/hex.py | 6 +- pyocd/utility/mask.py | 30 +++--- pyocd/utility/notification.py | 10 +- pyocd/utility/progress.py | 12 +-- pyocd/utility/sequencer.py | 36 +++---- pyocd/utility/server.py | 12 +-- pyocd/utility/sockets.py | 4 +- pyocd/utility/strings.py | 10 +- pyocd/utility/timeout.py | 14 +-- test/automated_test.py | 4 +- test/test_user_script.py | 34 +++--- test/test_util.py | 2 +- test/unit/test_rom_table.py | 18 ++-- test/unit/test_semihosting.py | 6 +- 134 files changed, 1169 insertions(+), 1193 deletions(-) diff --git a/pyocd/__main__.py b/pyocd/__main__.py index f79e414dd..9857d4964 100644 --- a/pyocd/__main__.py +++ b/pyocd/__main__.py @@ -48,7 +48,7 @@ LOG = logging.getLogger("pyocd.tool") class PyOCDTool(SubcommandBase): - """! @brief Main class for the pyocd tool and subcommands. + """@brief Main class for the pyocd tool and subcommands. """ HELP = "PyOCD debug tools for Arm Cortex devices" @@ -82,7 +82,7 @@ def __init__(self): self._parser = self.build_parser() def build_parser(self) -> argparse.ArgumentParser: - """! @brief Construct the command line parser with all subcommands and options.""" + """@brief Construct the command line parser with all subcommands and options.""" # Create top level argument parser. parser = argparse.ArgumentParser(description=self.HELP) parser.set_defaults(command_class=self, quiet=0, verbose=0, log_level=[]) @@ -96,7 +96,7 @@ def build_parser(self) -> argparse.ArgumentParser: return parser def _setup_logging(self) -> None: - """! @brief Configure the logging module. + """@brief Configure the logging module. The color log formatter is set up, based on the --color argument and `PYOCD_COLOR` env variable. The --color argument overrides `PYOCD_COLOR`. @@ -143,7 +143,7 @@ def _setup_logging(self) -> None: break def invoke(self) -> int: - """! @brief Show help when pyocd is run with no subcommand.""" + """@brief Show help when pyocd is run with no subcommand.""" if self._args.help_options: self.show_options_help() else: @@ -151,11 +151,11 @@ def invoke(self) -> int: return 0 def __call__(self, *args: Any, **kwds: Any) -> "PyOCDTool": - """! @brief Hack to allow the root command object instance to be used as default command class.""" + """@brief Hack to allow the root command object instance to be used as default command class.""" return self def run(self, args: Optional[Sequence[str]] = None) -> int: - """! @brief Main entry point for command line processing.""" + """@brief Main entry point for command line processing.""" try: self._args = self._parser.parse_args(args) @@ -181,7 +181,7 @@ def run(self, args: Optional[Sequence[str]] = None) -> int: return 1 def show_options_help(self) -> None: - """! @brief Display help for session options.""" + """@brief Display help for session options.""" for info_name in sorted(options.OPTIONS_INFO.keys()): info = options.OPTIONS_INFO[info_name] if isinstance(info.type, tuple): diff --git a/pyocd/board/board.py b/pyocd/board/board.py index 12d601105..03892de9b 100644 --- a/pyocd/board/board.py +++ b/pyocd/board/board.py @@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__) class Board(GraphNode): - """!@brief Represents the board containing the target and associated components. + """@brief Represents the board containing the target and associated components. The board is the root of the runtime object graph. """ @@ -111,7 +111,7 @@ def __init__(self, self.add_child(self.target) def init(self) -> None: - """! @brief Initialize the board.""" + """@brief Initialize the board.""" # If we don't have a delegate set yet, see if there is a session delegate. if (self.delegate is None) and (self.session.delegate is not None): self.delegate = self.session.delegate @@ -129,7 +129,7 @@ def init(self) -> None: self.delegate.did_connect(board=self) def uninit(self) -> None: - """! @brief Uninitialize the board.""" + """@brief Uninitialize the board.""" if self._inited: LOG.debug("uninit board %s", self) try: diff --git a/pyocd/board/mbed_board.py b/pyocd/board/mbed_board.py index 6487ad7b4..ec2bd4c2b 100644 --- a/pyocd/board/mbed_board.py +++ b/pyocd/board/mbed_board.py @@ -21,7 +21,7 @@ LOG = logging.getLogger(__name__) class MbedBoard(Board): - """! @brief Mbed board class. + """@brief Mbed board class. This class inherits from Board and is specific to mbed boards. Particularly, this class will dynamically determine the type of connected board based on the board ID encoded in @@ -29,7 +29,7 @@ class MbedBoard(Board): firmware is generic and doesn't have an associated board. """ def __init__(self, session, target=None, board_id=None): - """! @brief Constructor. + """@brief Constructor. This constructor attempts to use the board ID from the serial number to determine the target type. See #BOARD_ID_TO_INFO. @@ -72,11 +72,11 @@ def __init__(self, session, target=None, board_id=None): @property def name(self): - """! @brief Return board name.""" + """@brief Return board name.""" return self._name @property def description(self): - """! @brief Return description of the board.""" + """@brief Return description of the board.""" return self.name + " [" + self.target_type + "]" diff --git a/pyocd/cache/memory.py b/pyocd/cache/memory.py index f9e68ee9b..bba76d5bb 100644 --- a/pyocd/cache/memory.py +++ b/pyocd/cache/memory.py @@ -25,7 +25,7 @@ LOG = logging.getLogger(__name__) class MemoryCache(object): - """! @brief Memory cache. + """@brief Memory cache. Maintains a cache of target memory. The constructor is passed a backing DebugContext object that will be used to fill the cache. @@ -50,7 +50,7 @@ def _reset_cache(self): self._metrics = CacheMetrics() def _check_cache(self): - """! @brief Invalidates the cache if appropriate.""" + """@brief Invalidates the cache if appropriate.""" if self._core.is_running(): LOG.debug("core is running; invalidating cache") self._reset_cache() @@ -61,7 +61,7 @@ def _check_cache(self): self._run_token = self._core.run_token def _get_ranges(self, addr, count): - """! @brief Splits a memory address range into cached and uncached subranges. + """@brief Splits a memory address range into cached and uncached subranges. @return Returns a 2-tuple with the first element being a set of Interval objects for each of the cached subranges. The second element is a set of Interval objects for each of the non-cached subranges. @@ -88,7 +88,7 @@ def _get_ranges(self, addr, count): return cached, uncached def _read_uncached(self, uncached): - """! "@brief Reads uncached memory ranges and updates the cache. + """"@brief Reads uncached memory ranges and updates the cache. @return A list of Interval objects is returned. Each Interval has its @a data attribute set to a bytearray of the data read from target memory. """ @@ -126,7 +126,7 @@ def _dump_metrics(self): LOG.debug("no reads") def _read(self, addr, size): - """! @brief Performs a cached read operation of an address range. + """@brief Performs a cached read operation of an address range. @return A list of Interval objects sorted by address. """ # Get the cached and uncached subranges of the requested read. @@ -142,7 +142,7 @@ def _read(self, addr, size): return combined def _merge_data(self, combined, addr, size): - """! @brief Extracts data from the intersection of an address range across a list of interval objects. + """@brief Extracts data from the intersection of an address range across a list of interval objects. The range represented by @a addr and @a size are assumed to overlap the intervals. The first and last interval in the list may have ragged edges not fully contained in the address range, in @@ -207,7 +207,7 @@ def _update_contiguous(self, cached, addr, value): self._cache.addi(leadBegin, trailEnd, data) def _check_regions(self, addr, count): - """! @return A bool indicating whether the given address range is fully contained within + """@return A bool indicating whether the given address range is fully contained within one known memory region, and that region is cacheable. @exception TransferFaultError Raised if the access is not entirely contained within a single region. """ diff --git a/pyocd/cache/metrics.py b/pyocd/cache/metrics.py index 293c4b769..8d2d50e4c 100644 --- a/pyocd/cache/metrics.py +++ b/pyocd/cache/metrics.py @@ -15,7 +15,7 @@ # limitations under the License. class CacheMetrics(object): - """! @brief Holds hit ratio metrics for the caches.""" + """@brief Holds hit ratio metrics for the caches.""" def __init__(self): self.hits = 0 self.misses = 0 diff --git a/pyocd/cache/register.py b/pyocd/cache/register.py index c8a489ef8..02f3e8274 100644 --- a/pyocd/cache/register.py +++ b/pyocd/cache/register.py @@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__) class RegisterCache(object): - """! @brief Cache of a core's register values. + """@brief Cache of a core's register values. The only interesting part of this cache is how it handles the special registers: CONTROL, FAULTMASK, BASEPRI, PRIMASK, and CFBP. The values of the first four registers are read and written @@ -73,7 +73,7 @@ def _dump_metrics(self): LOG.debug("no accesses") def _check_cache(self): - """! @brief Invalidates the cache if needed and returns whether the core is running.""" + """@brief Invalidates the cache if needed and returns whether the core is running.""" if self._core.is_running(): LOG.debug("core is running; invalidating cache") self._reset_cache() diff --git a/pyocd/commands/base.py b/pyocd/commands/base.py index eaa2479c9..500e8f869 100755 --- a/pyocd/commands/base.py +++ b/pyocd/commands/base.py @@ -29,7 +29,7 @@ ALL_COMMANDS: Dict[str, Set[Union["CommandBase", "ValueBase"]]] = {} class CommandMeta(type): - """! @brief Metaclass for commands. + """@brief Metaclass for commands. Examines the `INFO` attribute of the command class and builds the @ref pyocd.commands.commands.ALL_COMMANDS "ALL_COMMANDS" table. @@ -56,7 +56,7 @@ def __new__(mcs: Type, name: str, bases: Tuple[type, ...], objdict: Dict[str, An return new_type class CommandBase(metaclass=CommandMeta): - """! @brief Base class for a command. + """@brief Base class for a command. Each command class must have an `INFO` attribute with the following keys: - `names`: List of names for the info. The first element is the primary name. @@ -81,16 +81,16 @@ class CommandBase(metaclass=CommandMeta): } def __init__(self, context): - """! @brief Constructor.""" + """@brief Constructor.""" self._context = context @property def context(self): - """! @brief The command execution context.""" + """@brief The command execution context.""" return self._context def check_arg_count(self, args): - """! @brief Verify the number of command arguments.""" + """@brief Verify the number of command arguments.""" nargs = self.INFO['nargs'] if nargs == '*': pass @@ -106,11 +106,11 @@ def check_arg_count(self, args): raise exceptions.CommandError("too many arguments") def parse(self, args): - """! @brief Extract command arguments.""" + """@brief Extract command arguments.""" pass def execute(self): - """! @brief Perform the command.""" + """@brief Perform the command.""" raise NotImplementedError() def _format_core_register(self, info, value): @@ -126,7 +126,7 @@ def _format_core_register(self, info, value): return value_str def _convert_value(self, arg): - """! @brief Convert an argument to a 32-bit integer. + """@brief Convert an argument to a 32-bit integer. Handles the usual decimal, binary, and hex numbers with the appropriate prefix. Also recognizes register names and address dereferencing. Dereferencing using the @@ -180,7 +180,7 @@ def _convert_value(self, arg): @classmethod def format_help(cls, context, max_width=72): - """! @brief Return a string with the help text for this command.""" + """@brief Return a string with the help text for this command.""" text = "Usage: {cmd} {usage}\n".format(cmd=cls.INFO['names'][0], usage=cls.INFO['usage']) if len(cls.INFO['names']) > 1: text += "Aliases: {0}\n".format(", ".join(cls.INFO['names'][1:])) @@ -190,7 +190,7 @@ def format_help(cls, context, max_width=72): return text class ValueBase(CommandBase): - """! @brief Base class for value commands. + """@brief Base class for value commands. Value commands are special commands representing a value that can be read and/or written. They are used through the `show` and `set` commands. A value command has an associated access mode of read-only, @@ -209,16 +209,16 @@ class ValueBase(CommandBase): """ def display(self, args): - """! @brief Output the value of the info.""" + """@brief Output the value of the info.""" raise NotImplementedError() def modify(self, args): - """! @brief Change the info to a new value.""" + """@brief Change the info to a new value.""" raise NotImplementedError() @classmethod def format_help(cls, context, max_width=72): - """! @brief Return a string with the help text for this command.""" + """@brief Return a string with the help text for this command.""" first_name = cls.INFO['names'][0] text = "Usage: " did_print_on_usage_line = False diff --git a/pyocd/commands/commander.py b/pyocd/commands/commander.py index eadb3ef3c..3fb72f58a 100755 --- a/pyocd/commands/commander.py +++ b/pyocd/commands/commander.py @@ -38,7 +38,7 @@ DEFAULT_CLOCK_FREQ_HZ = 1000000 class PyOCDCommander: - """! @brief Manages the commander interface. + """@brief Manages the commander interface. Responsible for connecting the execution context, REPL, and commands, and handles connection. @@ -62,7 +62,7 @@ def __init__( args: "argparse.Namespace", cmds: Optional[CommandsListType] = None ) -> None: - """! @brief Constructor.""" + """@brief Constructor.""" # Read command-line arguments. self.args = args self.cmds: PyOCDCommander.CommandsListType = cmds or [] @@ -73,7 +73,7 @@ def __init__( self.exit_code: int = 0 def run(self) -> int: - """! @brief Main entry point.""" + """@brief Main entry point.""" try: # If no commands, enter interactive mode. If there are commands, use the --interactive arg. enter_interactive = (not self.cmds) or self.args.interactive @@ -155,7 +155,7 @@ def _commands_require_connect(self) -> bool: return False def run_commands(self) -> None: - """! @brief Run commands specified on the command line.""" + """@brief Run commands specified on the command line.""" for args in self.cmds: # Open file containing commands. if isinstance(args, io.IOBase) and not isinstance(args, str): @@ -173,7 +173,7 @@ def run_commands(self) -> None: self.context.process_command_line(args) def connect(self) -> bool: - """! @brief Connect to the probe.""" + """@brief Connect to the probe.""" if (self.args.frequency is not None) and (self.args.frequency != DEFAULT_CLOCK_FREQ_HZ): self.context.writei("Setting SWD clock to %d kHz", self.args.frequency // 1000) @@ -230,7 +230,7 @@ def connect(self) -> bool: return result def _post_connect(self) -> bool: - """! @brief Finish the connect process. + """@brief Finish the connect process. The session is opened. The `no_init` parameter passed to the constructor determines whether the board and target are initialized. diff --git a/pyocd/commands/execution_context.py b/pyocd/commands/execution_context.py index 425ddb31a..90a2a11f6 100755 --- a/pyocd/commands/execution_context.py +++ b/pyocd/commands/execution_context.py @@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__) class CommandSet: - """! @brief Holds a set of command classes.""" + """@brief Holds a set of command classes.""" ## Whether command and infos modules have been loaded yet. DID_LOAD_COMMAND_MODULES = False @@ -85,7 +85,7 @@ def value_matcher(self): return self._value_matcher def add_command_group(self, group_name): - """! @brief Add commands belonging to a group to the command set. + """@brief Add commands belonging to a group to the command set. @param self The command set. @param group_name String with the name of the group to add. """ @@ -93,7 +93,7 @@ def add_command_group(self, group_name): self.add_commands(ALL_COMMANDS.get(group_name, set())) def add_commands(self, commands): - """! @brief Add some commands to the command set. + """@brief Add some commands to the command set. @param self The command set. @param commands List of command classes. """ @@ -111,7 +111,7 @@ def add_commands(self, commands): self._value_matcher.add_items(value_names.keys()) class CommandInvocation(NamedTuple): - """! @brief Groups the command name with an iterable of args and a handler function. + """@brief Groups the command name with an iterable of args and a handler function. The handler is a callable that will evaluate the command. It accepts a single argument of the CommandInvocation instance. @@ -121,14 +121,14 @@ class CommandInvocation(NamedTuple): handler: Callable[["CommandInvocation"], None] # type:ignore # mypy doesn't support recursive types yet! class CommandExecutionContext: - """! @brief Manages command execution. + """@brief Manages command execution. This class holds persistent state for command execution, and provides the interface for executing commands and command lines. """ def __init__(self, no_init: bool = False, output_stream: Optional[IO[str]] = None): - """! @brief Constructor. + """@brief Constructor. @param self This object. @param no_init Whether the board and target will be initialized when attach_session() is called. Defaults to False. @@ -152,7 +152,7 @@ def __init__(self, no_init: bool = False, output_stream: Optional[IO[str]] = Non self._command_set.add_command_group('standard') def write(self, message='', **kwargs): - """! @brief Write a fixed message to the output stream. + """@brief Write a fixed message to the output stream. The message is written to the output stream passed to the constructor, terminated with a newline by default. The `end` keyword argument can be passed to change the terminator. No @@ -170,7 +170,7 @@ def write(self, message='', **kwargs): self._output.write(message + end) def writei(self, fmt, *args, **kwargs): - """! @brief Write an interpolated string to the output stream. + """@brief Write an interpolated string to the output stream. The formatted string is written to the output stream passed to the constructor, terminated with a newline by default. The `end` keyword argument can be passed to change the terminator. @@ -183,7 +183,7 @@ def writei(self, fmt, *args, **kwargs): self.write(message, **kwargs) def writef(self, fmt, *args, **kwargs): - """! @brief Write a formatted string to the output stream. + """@brief Write a formatted string to the output stream. The formatted string is written to the output stream passed to the constructor, terminated with a newline by default. The `end` keyword argument can be passed to change the terminator. @@ -196,7 +196,7 @@ def writef(self, fmt, *args, **kwargs): self.write(message, **kwargs) def attach_session(self, session): - """! @brief Associate a session with the command context. + """@brief Associate a session with the command context. Various data for the context are initialized. This includes selecting the initially selected core and MEM-AP, and getting an ELF file that was set on the target. @@ -258,12 +258,12 @@ def elf(self): @property def command_set(self): - """! @brief CommandSet with commands available in this context.""" + """@brief CommandSet with commands available in this context.""" return self._command_set @property def peripherals(self): - """! @brief Dict of SVD peripherals.""" + """@brief Dict of SVD peripherals.""" assert self.target if self.target.svd_device and not self._loaded_peripherals: for p in self.target.svd_device.peripherals: @@ -281,7 +281,7 @@ def output_stream(self, stream): @property def selected_core(self): - """! @brief The Target instance for the selected core.""" + """@brief The Target instance for the selected core.""" return self._selected_core @selected_core.setter @@ -305,7 +305,7 @@ def selected_ap(self): return self.target.aps[self.selected_ap_address] def process_command_line(self, line: str) -> None: - """! @brief Run a command line consisting of one or more semicolon-separated commands. + """@brief Run a command line consisting of one or more semicolon-separated commands. @param self @param line Complete command line string. @@ -316,7 +316,7 @@ def process_command_line(self, line: str) -> None: invoc.handler(invoc) def process_command_file(self, cmd_file: IO[str]) -> None: - """! @brief Run commands contained in a file. + """@brief Run commands contained in a file. @param self @param cmd_file File object containing commands to run. Must be opened in text mode. When this method returns, @@ -335,7 +335,7 @@ def process_command_file(self, cmd_file: IO[str]) -> None: cmd_file.close() def _split_commands(self, line: str) -> Iterator[List[str]]: - """! @brief Generator yielding commands separated by semicolons. + """@brief Generator yielding commands separated by semicolons. Python and system commands are handled specially. For these we yield a list of 2 elements: the command, either "$" or "!", followed by the unmodified remainder of the command line. For these commands, @@ -367,7 +367,7 @@ def _split_commands(self, line: str) -> Iterator[List[str]]: yield result def parse_command(self, cmdline: List[str]) -> CommandInvocation: - """! @brief Create a CommandInvocation from a single command.""" + """@brief Create a CommandInvocation from a single command.""" # Check for Python or system command lines. first_char = cmdline[0] if first_char in '$!': @@ -401,7 +401,7 @@ def parse_command(self, cmdline: List[str]) -> CommandInvocation: return CommandInvocation(matched_command, args, self.execute_command) def execute_command(self, invocation: CommandInvocation) -> None: - """! @brief Execute a single command.""" + """@brief Execute a single command.""" # Must have an attached session to run commands, except for certain commands. assert (self.session is not None) or (invocation.cmd in ('list', 'help', 'exit')) @@ -419,7 +419,7 @@ def execute_command(self, invocation: CommandInvocation) -> None: cmd_object.execute() def _build_python_namespace(self) -> None: - """! @brief Construct the dictionary used as the namespace for python commands.""" + """@brief Construct the dictionary used as the namespace for python commands.""" assert self.session assert self.target ns = self.session.user_script_proxy.namespace @@ -430,7 +430,7 @@ def _build_python_namespace(self) -> None: self._python_namespace = ns def handle_python(self, invocation: CommandInvocation) -> None: - """! @brief Evaluate a python expression.""" + """@brief Evaluate a python expression.""" assert self.session try: # Lazily build the python environment. @@ -454,7 +454,7 @@ def handle_python(self, invocation: CommandInvocation) -> None: raise exceptions.CommandError("exception while executing expression: %s" % e) def handle_system(self, invocation: CommandInvocation) -> None: - """! @brief Evaluate a system call command.""" + """@brief Evaluate a system call command.""" try: output = subprocess.check_output(invocation.cmd, stderr=subprocess.STDOUT, shell=True) self.write(six.ensure_str(output), end='') diff --git a/pyocd/commands/repl.py b/pyocd/commands/repl.py index 2579b8b30..529733aa9 100755 --- a/pyocd/commands/repl.py +++ b/pyocd/commands/repl.py @@ -25,14 +25,14 @@ LOG = logging.getLogger(__name__) class ToolExitException(Exception): - """! @brief Special exception indicating the tool should exit. + """@brief Special exception indicating the tool should exit. This exception is only raised by the `exit` command. """ pass class PyocdRepl(object): - """! @brief Read-Eval-Print-Loop for pyOCD commander.""" + """@brief Read-Eval-Print-Loop for pyOCD commander.""" PROMPT = 'pyocd> ' @@ -72,7 +72,7 @@ def __init__(self, command_context): pass def run(self): - """! @brief Runs the REPL loop until EOF is encountered.""" + """@brief Runs the REPL loop until EOF is encountered.""" try: while True: try: @@ -89,7 +89,7 @@ def run(self): pass def run_one_command(self, line): - """! @brief Execute a single command line and handle exceptions.""" + """@brief Execute a single command line and handle exceptions.""" try: line = line.strip() if line: diff --git a/pyocd/core/core_registers.py b/pyocd/core/core_registers.py index 7eb92e518..7703f487f 100644 --- a/pyocd/core/core_registers.py +++ b/pyocd/core/core_registers.py @@ -28,7 +28,7 @@ CoreRegisterValueType = Union[int, float] class CoreRegisterInfo: - """! @brief Useful information about a core register. + """@brief Useful information about a core register. Provides properties for classification of the register, and utilities to convert to and from the raw integer representation of the register value. @@ -51,14 +51,14 @@ class CoreRegisterInfo: @classmethod def add_to_map(cls, all_regs: Sequence["CoreRegisterInfo"]) -> None: - """! @brief Build info map from list of CoreRegisterInfo instance.""" + """@brief Build info map from list of CoreRegisterInfo instance.""" for reg in all_regs: cls._NAME_MAP[reg.name] = reg cls._INDEX_MAP[reg.index] = reg @classmethod def get(cls, reg: "CoreRegisterNameOrNumberType") -> "CoreRegisterInfo": - """! @brief Return the CoreRegisterInfo instance for a register. + """@brief Return the CoreRegisterInfo instance for a register. @param reg Either a register name or internal register number. @return CoreRegisterInfo @exception KeyError @@ -82,7 +82,7 @@ def __init__( reg_num: Optional[int] = None, feature: Optional[str] = None ) -> None: - """! @brief Constructor.""" + """@brief Constructor.""" self._name = name self._index = index self._bitsize = bitsize @@ -93,56 +93,56 @@ def __init__( @property def name(self) -> str: - """! @brief Name of the register. Always lowercase.""" + """@brief Name of the register. Always lowercase.""" return self._name @property def index(self) -> int: - """! @brief Integer index of the register.""" + """@brief Integer index of the register.""" return self._index @property def bitsize(self) -> int: - """! @brief Bit width of the register..""" + """@brief Bit width of the register..""" return self._bitsize @property def group(self) -> str: - """! @brief Named group the register is contained within.""" + """@brief Named group the register is contained within.""" return self._group @property def gdb_type(self) -> str: - """! @brief Value type specific to gdb.""" + """@brief Value type specific to gdb.""" return self._gdb_type @property def gdb_regnum(self) -> Optional[int]: - """! @brief Register number specific to gdb.""" + """@brief Register number specific to gdb.""" return self._gdb_regnum @property def gdb_feature(self) -> Optional[str]: - """! @brief GDB architecture feature to which the register belongs.""" + """@brief GDB architecture feature to which the register belongs.""" return self._gdb_feature @property def is_float_register(self) -> bool: - """! @brief Returns true for registers single or double precision float registers (but not, say, FPSCR).""" + """@brief Returns true for registers single or double precision float registers (but not, say, FPSCR).""" return self.is_single_float_register or self.is_double_float_register @property def is_single_float_register(self) -> bool: - """! @brief Returns true for registers holding single-precision float values""" + """@brief Returns true for registers holding single-precision float values""" return self.gdb_type == 'ieee_single' @property def is_double_float_register(self) -> bool: - """! @brief Returns true for registers holding double-precision float values""" + """@brief Returns true for registers holding double-precision float values""" return self.gdb_type == 'ieee_double' def from_raw(self, value: int) -> "CoreRegisterValueType": - """! @brief Convert register value from raw (integer) to canonical type.""" + """@brief Convert register value from raw (integer) to canonical type.""" # Convert int to float. if self.is_single_float_register: return conversion.u32_to_float32(value) @@ -152,7 +152,7 @@ def from_raw(self, value: int) -> "CoreRegisterValueType": return value def to_raw(self, value: "CoreRegisterValueType") -> int: - """! @brief Convert register value from canonical type to raw (integer).""" + """@brief Convert register value from canonical type to raw (integer).""" # Convert float to int. if isinstance(value, float): if self.is_single_float_register: @@ -164,7 +164,7 @@ def to_raw(self, value: "CoreRegisterValueType") -> int: return value def clone(self) -> "CoreRegisterInfo": - """! @brief Return a copy of the register info.""" + """@brief Return a copy of the register info.""" return copy(self) def __eq__(self, other: Any) -> bool: @@ -177,7 +177,7 @@ def __repr__(self) -> str: return "<{}@{:#x} {}={} {}-bit>".format(self.__class__.__name__, id(self), self.name, self.index, self.bitsize) class CoreRegistersIndex: - """! @brief Class to hold indexes of available core registers. + """@brief Class to hold indexes of available core registers. This class is meant to be used by a core to hold the set of core registers that are actually present on a particular device, as determined by runtime inspection of the core. A number of properties are made @@ -193,31 +193,31 @@ def __init__(self): @property def groups(self) -> Set[str]: - """! @brief Set of unique register group names.""" + """@brief Set of unique register group names.""" return self._groups @property def as_set(self) -> Set[CoreRegisterInfo]: - """! @brief Set of available registers as CoreRegisterInfo objects.""" + """@brief Set of available registers as CoreRegisterInfo objects.""" return self._all @property def by_name(self) -> Dict[str, CoreRegisterInfo]: - """! @brief Dict of (register name) -> CoreRegisterInfo.""" + """@brief Dict of (register name) -> CoreRegisterInfo.""" return self._by_name @property def by_index(self) -> Dict[int, CoreRegisterInfo]: - """! @brief Dict of (register index) -> CoreRegisterInfo.""" + """@brief Dict of (register index) -> CoreRegisterInfo.""" return self._by_index @property def by_feature(self) -> Dict[str, List[CoreRegisterInfo]]: - """! @brief Dict of (register gdb feature) -> List[CoreRegisterInfo].""" + """@brief Dict of (register gdb feature) -> List[CoreRegisterInfo].""" return self._by_feature def iter_matching(self, predicate: Callable[[CoreRegisterInfo], bool]) -> Iterator[CoreRegisterInfo]: - """! @brief Iterate over registers matching a given predicate callable. + """@brief Iterate over registers matching a given predicate callable. @param self The object. @param predicate Callable accepting a single argument, a CoreRegisterInfo, and returning a boolean. If the predicate returns True then the iterator will include the register. @@ -227,7 +227,7 @@ def iter_matching(self, predicate: Callable[[CoreRegisterInfo], bool]) -> Iterat yield reg def add_group(self, regs: Sequence[CoreRegisterInfo]) -> None: - """! @brief Add a list of registers. + """@brief Add a list of registers. @param self The object. @param regs Iterable of CoreRegisterInfo objects. The objects are copied as they are added. """ diff --git a/pyocd/core/exceptions.py b/pyocd/core/exceptions.py index d407afabd..ad9194674 100644 --- a/pyocd/core/exceptions.py +++ b/pyocd/core/exceptions.py @@ -15,54 +15,54 @@ # limitations under the License. class Error(RuntimeError): - """! @brief Parent of all errors pyOCD can raise""" + """@brief Parent of all errors pyOCD can raise""" pass class InternalError(Error): - """! @brief Internal consistency or logic error. + """@brief Internal consistency or logic error. This error indicates that something has happened that shouldn't be possible. """ pass class TimeoutError(Error): - """! @brief Any sort of timeout""" + """@brief Any sort of timeout""" pass class TargetSupportError(Error): - """! @brief Error related to target support""" + """@brief Error related to target support""" pass class ProbeError(Error): - """! @brief Error communicating with the debug probe""" + """@brief Error communicating with the debug probe""" pass class ProbeDisconnected(ProbeError): - """! @brief The connection to the debug probe was lost""" + """@brief The connection to the debug probe was lost""" pass class TargetError(Error): - """! @brief An error that happens on the target""" + """@brief An error that happens on the target""" pass class DebugError(TargetError): - """! @brief Error controlling target debug resources""" + """@brief Error controlling target debug resources""" pass class CoreRegisterAccessError(DebugError): - """! @brief Failure to read or write a core register.""" + """@brief Failure to read or write a core register.""" pass class TransferError(DebugError): - """! @brief Error ocurred with a transfer over SWD or JTAG""" + """@brief Error ocurred with a transfer over SWD or JTAG""" pass class TransferTimeoutError(TransferError): - """! @brief An SWD or JTAG timeout occurred""" + """@brief An SWD or JTAG timeout occurred""" pass class TransferFaultError(TransferError): - """! @brief A memory fault occurred. + """@brief A memory fault occurred. This exception class is extended to optionally record the start address and an optional length of the attempted memory access that caused the fault. The address and length, if available, will be included @@ -113,7 +113,7 @@ def __str__(self): return desc class FlashFailure(TargetError): - """! @brief Exception raised when flashing fails for some reason. + """@brief Exception raised when flashing fails for some reason. Positional arguments passed to the constructor are passed through to the superclass' constructor, and thus operate like any other standard exception class. The flash address that @@ -147,14 +147,14 @@ def __str__(self): return desc class FlashEraseFailure(FlashFailure): - """! @brief An attempt to erase flash failed. """ + """@brief An attempt to erase flash failed. """ pass class FlashProgramFailure(FlashFailure): - """! @brief An attempt to program flash failed. """ + """@brief An attempt to program flash failed. """ pass class CommandError(Error): - """! @brief Raised when a command encounters an error.""" + """@brief Raised when a command encounters an error.""" pass diff --git a/pyocd/core/helpers.py b/pyocd/core/helpers.py index f2d8b4bc4..81b304dbc 100644 --- a/pyocd/core/helpers.py +++ b/pyocd/core/helpers.py @@ -33,7 +33,7 @@ colorama.init() class ConnectHelper: - """! @brief Helper class for streamlining the probe discovery and session creation process. + """@brief Helper class for streamlining the probe discovery and session creation process. This class provides several static methods that wrap the DebugProbeAggregator methods with a simple command-line user interface, or provide a single method that performs @@ -47,7 +47,7 @@ def get_sessions_for_all_connected_probes( options: Optional[Mapping[str, Any]] = None, **kwargs ) -> List[Session]: - """! @brief Return a list of Session objects for all connected debug probes. + """@brief Return a list of Session objects for all connected debug probes. This method is useful for listing detailed information about connected probes, especially those that have associated boards, as the Session object will have a Board instance. @@ -77,7 +77,7 @@ def get_all_connected_probes( unique_id: Optional[str] = None, print_wait_message: bool = True ) -> List["DebugProbe"]: - """! @brief Return a list of DebugProbe objects for all connected debug probes. + """@brief Return a list of DebugProbe objects for all connected debug probes. The returned list of debug probes is always sorted by the combination of the probe's description and unique ID. @@ -117,7 +117,7 @@ def get_all_connected_probes( @staticmethod def list_connected_probes() -> None: - """! @brief List the connected debug probes. + """@brief List the connected debug probes. Prints a list of all connected probes to stdout. If no probes are connected, a message saying as much is printed instead. @@ -135,7 +135,7 @@ def choose_probe( return_first: bool = False, unique_id: str = None ) -> Optional["DebugProbe"]: - """! @brief Return a debug probe possibly chosen by the user. + """@brief Return a debug probe possibly chosen by the user. This method provides an easy to use command line interface for selecting one of the connected debug probes. It has parameters that control filtering of probes by unique ID and @@ -217,7 +217,7 @@ def session_with_chosen_probe( options: Optional[Mapping[str, Any]] = None, **kwargs ) -> Optional[Session]: - """! @brief Create a session with a probe possibly chosen by the user. + """@brief Create a session with a probe possibly chosen by the user. This method provides an easy to use command line interface for selecting one of the connected debug probes, then creating and opening a Session instance. It has several diff --git a/pyocd/core/memory_interface.py b/pyocd/core/memory_interface.py index cc34746d9..9e1e0f871 100644 --- a/pyocd/core/memory_interface.py +++ b/pyocd/core/memory_interface.py @@ -21,10 +21,10 @@ from ..utility import conversion class MemoryInterface: - """! @brief Interface for memory access.""" + """@brief Interface for memory access.""" def write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: - """! @brief Write a single memory location. + """@brief Write a single memory location. By default the transfer size is a word.""" raise NotImplementedError() @@ -46,33 +46,33 @@ def read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Ca ... def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Read a memory location. + """@brief Read a memory location. By default, a word will be read.""" raise NotImplementedError() def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: - """! @brief Write an aligned block of 32-bit words.""" + """@brief Write an aligned block of 32-bit words.""" raise NotImplementedError() def read_memory_block32(self, addr: int, size: int) -> Sequence[int]: - """! @brief Read an aligned block of 32-bit words.""" + """@brief Read an aligned block of 32-bit words.""" raise NotImplementedError() def write64(self, addr: int, value: int) -> None: - """! @brief Shorthand to write a 64-bit word.""" + """@brief Shorthand to write a 64-bit word.""" self.write_memory(addr, value, 64) def write32(self, addr: int, value: int) -> None: - """! @brief Shorthand to write a 32-bit word.""" + """@brief Shorthand to write a 32-bit word.""" self.write_memory(addr, value, 32) def write16(self, addr: int, value: int) -> None: - """! @brief Shorthand to write a 16-bit halfword.""" + """@brief Shorthand to write a 16-bit halfword.""" self.write_memory(addr, value, 16) def write8(self, addr: int, value: int) -> None: - """! @brief Shorthand to write a byte.""" + """@brief Shorthand to write a byte.""" self.write_memory(addr, value, 8) @overload @@ -92,7 +92,7 @@ def read64(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: ... def read64(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Shorthand to read a 64-bit word.""" + """@brief Shorthand to read a 64-bit word.""" return self.read_memory(addr, 64, now) @overload @@ -112,7 +112,7 @@ def read32(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: ... def read32(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Shorthand to read a 32-bit word.""" + """@brief Shorthand to read a 32-bit word.""" return self.read_memory(addr, 32, now) @overload @@ -132,7 +132,7 @@ def read16(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: ... def read16(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Shorthand to read a 16-bit halfword.""" + """@brief Shorthand to read a 16-bit halfword.""" return self.read_memory(addr, 16, now) @overload @@ -152,11 +152,11 @@ def read8(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: ... def read8(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Shorthand to read a byte.""" + """@brief Shorthand to read a byte.""" return self.read_memory(addr, 8, now) def read_memory_block8(self, addr: int, size: int) -> Sequence[int]: - """! @brief Read a block of unaligned bytes in memory. + """@brief Read a block of unaligned bytes in memory. @return an array of byte values """ res = [] @@ -197,7 +197,7 @@ def read_memory_block8(self, addr: int, size: int) -> Sequence[int]: return res def write_memory_block8(self, addr: int, data: Sequence[int]) -> None: - """! @brief Write a block of unaligned bytes in memory.""" + """@brief Write a block of unaligned bytes in memory.""" size = len(data) idx = 0 diff --git a/pyocd/core/memory_map.py b/pyocd/core/memory_map.py index b47777039..bd7201849 100644 --- a/pyocd/core/memory_map.py +++ b/pyocd/core/memory_map.py @@ -28,7 +28,7 @@ from ..flash.flash import Flash class MemoryType(Enum): - """! @brief Known types of memory.""" + """@brief Known types of memory.""" OTHER = 0 RAM = 1 ROM = 2 @@ -60,7 +60,7 @@ def check_range( @total_ordering class MemoryRangeBase: - """! @brief Base class for a range of memory. + """@brief Base class for a range of memory. This base class provides the basic address range support and methods to test for containment or intersection with another range. @@ -95,7 +95,7 @@ def contains_range( length: Optional[int] = None, range: Optional["MemoryRangeBase"] = None ) -> bool: - """! @return Whether the given range is fully contained by the region.""" + """@return Whether the given range is fully contained by the region.""" start, end = check_range(start, end, length, range) return self.contains_address(start) and self.contains_address(end) @@ -106,7 +106,7 @@ def contained_by_range( length: Optional[int] = None, range: Optional["MemoryRangeBase"] = None ) -> bool: - """! @return Whether the region is fully within the bounds of the given range.""" + """@return Whether the region is fully within the bounds of the given range.""" start, end = check_range(start, end, length, range) return start <= self.start and end >= self.end @@ -117,7 +117,7 @@ def intersects_range( length: Optional[int] = None, range: Optional["MemoryRangeBase"] = None ) -> bool: - """! @return Whether the region and the given range intersect at any point.""" + """@return Whether the region and the given range intersect at any point.""" start, end = check_range(start, end, length, range) return (start <= self.start and end >= self.start) or (start <= self.end and end >= self.end) \ or (start >= self.start and end <= self.end) @@ -132,7 +132,7 @@ def __lt__(self, other: "MemoryRangeBase") -> bool: return self.start < other.start or (self.start == other.start and self.length == other.length) class MemoryRange(MemoryRangeBase): - """! @brief A range of memory optionally tied to a region.""" + """@brief A range of memory optionally tied to a region.""" def __init__( self, start: int = 0, @@ -161,7 +161,7 @@ def __repr__(self) -> str: id(self), self.start, self.end, self.length, self.region) class MemoryRegion(MemoryRangeBase): - """! @brief One contiguous range of memory. + """@brief One contiguous range of memory. Memory regions have attributes accessible via the normal dot syntax. @@ -226,7 +226,7 @@ def __init__( length: Optional[int] = None, **attrs: Any ) -> None: - """! Memory region constructor. + """Memory region constructor. Memory regions are required to have non-zero lengths, unlike memory ranges. @@ -320,7 +320,7 @@ def __repr__(self) -> str: return "<%s@0x%x name=%s type=%s start=0x%x end=0x%x length=0x%x access=%s>" % (self.__class__.__name__, id(self), self.name, self.type, self.start, self.end, self.length, self.access) class RamRegion(MemoryRegion): - """! @brief Contiguous region of RAM.""" + """@brief Contiguous region of RAM.""" def __init__( self, start: int = 0, @@ -332,7 +332,7 @@ def __init__( super().__init__(start=start, end=end, length=length, **attrs) class RomRegion(MemoryRegion): - """! @brief Contiguous region of ROM.""" + """@brief Contiguous region of ROM.""" # Default attribute values for ROM regions. DEFAULT_ATTRS = MemoryRegion.DEFAULT_ATTRS.copy() @@ -351,13 +351,13 @@ def __init__( super().__init__(start=start, end=end, length=length, **attrs) class DefaultFlashWeights: - """! @brief Default weights for flash programming operations.""" + """@brief Default weights for flash programming operations.""" PROGRAM_PAGE_WEIGHT = 0.130 ERASE_SECTOR_WEIGHT = 0.048 ERASE_ALL_WEIGHT = 0.174 class FlashRegion(MemoryRegion): - """! @brief Contiguous region of flash memory. + """@brief Contiguous region of flash memory. Flash regions have a number of attributes in addition to those available in all region types. - `blocksize`: Erase sector size in bytes. @@ -471,7 +471,7 @@ def flash(self, flash_instance: "Flash") -> None: self._flash = flash_instance def is_data_erased(self, d: Iterable[int]) -> bool: - """! @brief Helper method to check if a block of data is erased. + """@brief Helper method to check if a block of data is erased. @param self @param d List of data or bytearray. @retval True The contents of d all match the erased byte value for this flash region. @@ -507,7 +507,7 @@ def __repr__(self) -> str: self.length, self.access, self.blocksize) class DeviceRegion(MemoryRegion): - """! @brief Device or peripheral memory.""" + """@brief Device or peripheral memory.""" # Default attribute values for device regions. DEFAULT_ATTRS = MemoryRegion.DEFAULT_ATTRS.copy() @@ -537,7 +537,7 @@ def __init__( } class MemoryMap(collections.abc.Sequence): - """! @brief Memory map consisting of memory regions. + """@brief Memory map consisting of memory regions. The normal way to create a memory map is to instantiate regions directly in the call to the constructor. @@ -565,7 +565,7 @@ class MemoryMap(collections.abc.Sequence): _regions: List[MemoryRegion] def __init__(self, *more_regions: Union[Sequence[MemoryRegion], MemoryRegion]) -> None: - """! @brief Constructor. + """@brief Constructor. All parameters passed to the constructor are assumed to be MemoryRegion instances, and are passed to add_regions(). The resulting memory map is sorted by region start address. @@ -578,7 +578,7 @@ def __init__(self, *more_regions: Union[Sequence[MemoryRegion], MemoryRegion]) - @property def regions(self) -> List[MemoryRegion]: - """! @brief List of all memory regions. + """@brief List of all memory regions. Regions in the returned list are sorted by start address. """ @@ -586,11 +586,11 @@ def regions(self) -> List[MemoryRegion]: @property def region_count(self) -> int: - """! @brief Number of memory regions in the map.""" + """@brief Number of memory regions in the map.""" return len(self._regions) def clone(self) -> "MemoryMap": - """! @brief Create a duplicate of the memory map. + """@brief Create a duplicate of the memory map. The duplicate memory map contains shallow copies of each of the regions. This is intended to be used so that `Target` objects in different but simultaneously live sessions have @@ -599,7 +599,7 @@ def clone(self) -> "MemoryMap": return MemoryMap(*[copy.copy(r) for r in self.regions]) def add_regions(self, *more_regions: Union[Sequence[MemoryRegion], MemoryRegion]) -> None: - """! @brief Add multiple regions to the memory map. + """@brief Add multiple regions to the memory map. There are two options for passing the list of regions to be added. The first is to pass each region as a separate parameter, similar to how the constructor is intended to be used. @@ -622,7 +622,7 @@ def add_regions(self, *more_regions: Union[Sequence[MemoryRegion], MemoryRegion] self.add_region(new_region) def add_region(self, new_region: MemoryRegion) -> None: - """! @brief Add one new region to the map. + """@brief Add one new region to the map. The region list is resorted after adding the provided region. @@ -640,7 +640,7 @@ def add_region(self, new_region: MemoryRegion) -> None: self._regions.sort() def remove_region(self, region: MemoryRegion) -> None: - """! @brief Removes a memory region from the map. + """@brief Removes a memory region from the map. @param self @param region The region to remove. The region to remove is matched by identity, not value, so this parameter must be the exact object that you wish to remove from the map. @@ -650,7 +650,7 @@ def remove_region(self, region: MemoryRegion) -> None: del self._regions[i] def get_boot_memory(self) -> Optional[MemoryRegion]: - """! @brief Returns the first region marked as boot memory. + """@brief Returns the first region marked as boot memory. @param self @return MemoryRegion or None. @@ -661,7 +661,7 @@ def get_boot_memory(self) -> Optional[MemoryRegion]: return None def get_region_for_address(self, address: int) -> Optional[MemoryRegion]: - """! @brief Returns the first region containing the given address. + """@brief Returns the first region containing the given address. @param self @param address An integer target address. @@ -673,7 +673,7 @@ def get_region_for_address(self, address: int) -> Optional[MemoryRegion]: return None def is_valid_address(self, address: int) -> bool: - """! @brief Determines whether an address is contained by any region. + """@brief Determines whether an address is contained by any region. @param self @param address An integer target address. @@ -688,7 +688,7 @@ def get_contained_regions( length: Optional[int] = None, range: Optional["MemoryRangeBase"] = None ) -> List[MemoryRegion]: - """! @brief Get all regions fully contained by an address range. + """@brief Get all regions fully contained by an address range. @param self @param start The start address or a MemoryRange object. @@ -708,7 +708,7 @@ def get_intersecting_regions( length: Optional[int] = None, range: Optional["MemoryRangeBase"] = None ) -> List[MemoryRegion]: - """! @brief Get all regions intersected by an address range. + """@brief Get all regions intersected by an address range. @param self @param start The start address or a MemoryRange object. @@ -722,7 +722,7 @@ def get_intersecting_regions( return [r for r in self._regions if r.intersects_range(start, end)] def iter_matching_regions(self, **kwargs: Any) -> Iterator[MemoryRegion]: - """! @brief Iterate over regions matching given criteria. + """@brief Iterate over regions matching given criteria. Useful attributes to match on include 'type', 'name', 'is_default', and others. @@ -746,7 +746,7 @@ def iter_matching_regions(self, **kwargs: Any) -> Iterator[MemoryRegion]: yield r def get_first_matching_region(self, **kwargs: Any) -> Optional[MemoryRegion]: - """! @brief Get the first region matching a given memory type. + """@brief Get the first region matching a given memory type. The region of given type with the lowest start address is returned. If there are no regions with that type, None is returned instead. @@ -760,7 +760,7 @@ def get_first_matching_region(self, **kwargs: Any) -> Optional[MemoryRegion]: return None def get_default_region_of_type(self, type: MemoryType) -> Optional[MemoryRegion]: - """! @brief Get the default region of a given memory type. + """@brief Get the default region of a given memory type. If there are multiple regions of the specified type marked as default, then the one with the lowest start address will be returned. None is returned if there are no default regions @@ -776,15 +776,15 @@ def __eq__(self, other: "MemoryMap") -> bool: return isinstance(other, MemoryMap) and (self._regions == other._regions) def __iter__(self) -> Iterator[MemoryRegion]: - """! @brief Enable iteration over the memory map.""" + """@brief Enable iteration over the memory map.""" return iter(self._regions) def __reversed__(self) -> Iterator[MemoryRegion]: - """! @brief Reverse iteration over the memory map.""" + """@brief Reverse iteration over the memory map.""" return reversed(self._regions) def __getitem__(self, key: Union[int, str]) -> MemoryRegion: - """! @brief Return a region indexed by name or number.""" + """@brief Return a region indexed by name or number.""" if isinstance(key, str): result = self.get_first_matching_region(name=key) if result is None: @@ -794,7 +794,7 @@ def __getitem__(self, key: Union[int, str]) -> MemoryRegion: return self._regions[key] def __len__(self) -> int: - """! @brief Return the number of regions.""" + """@brief Return the number of regions.""" return len(self._regions) def __contains__(self, key: Union[int, str, MemoryRegion]) -> bool: diff --git a/pyocd/core/options.py b/pyocd/core/options.py index 03f51140f..6b83a4812 100644 --- a/pyocd/core/options.py +++ b/pyocd/core/options.py @@ -180,7 +180,7 @@ OPTIONS_INFO = {} def add_option_set(options): - """! @brief Merge a list of OptionInfo objects into OPTIONS_INFO.""" + """@brief Merge a list of OptionInfo objects into OPTIONS_INFO.""" OPTIONS_INFO.update({oi.name: oi for oi in options}) # Start with only builtin options. diff --git a/pyocd/core/options_manager.py b/pyocd/core/options_manager.py index 4e9403e3c..862a44595 100644 --- a/pyocd/core/options_manager.py +++ b/pyocd/core/options_manager.py @@ -58,13 +58,13 @@ class OptionsManager(Notifier): _layers: List[Dict[str, Any]] def __init__(self) -> None: - """! @brief Option manager constructor. + """@brief Option manager constructor. """ super().__init__() self._layers = [] def _update_layers(self, new_options: Optional[LayerType], update_operation: Callable[[LayerType], None]) -> None: - """! @brief Internal method to add a new layer dictionary. + """@brief Internal method to add a new layer dictionary. @param self @param new_options Dictionary of option values. @@ -80,7 +80,7 @@ def _update_layers(self, new_options: Optional[LayerType], update_operation: Cal self._notify_changes(previous_values, new_values) def add_front(self, new_options: Optional[LayerType]) -> None: - """! @brief Add a new highest priority layer of option values. + """@brief Add a new highest priority layer of option values. @param self @param new_options Dictionary of option values. @@ -88,7 +88,7 @@ def add_front(self, new_options: Optional[LayerType]) -> None: self._update_layers(new_options, partial(self._layers.insert, 0)) def add_back(self, new_options: Optional[LayerType]) -> None: - """! @brief Add a new lowest priority layer of option values. + """@brief Add a new lowest priority layer of option values. @param self @param new_options Dictionary of option values. @@ -96,7 +96,7 @@ def add_back(self, new_options: Optional[LayerType]) -> None: self._update_layers(new_options, self._layers.append) def _convert_options(self, new_options: LayerType) -> LayerType: - """! @brief Prepare a dictionary of session options for use by the manager. + """@brief Prepare a dictionary of session options for use by the manager. 1. Strip dictionary entries with a value of None. 2. Replace double-underscores ("__") with a dot ("."). @@ -112,7 +112,7 @@ def _convert_options(self, new_options: LayerType) -> LayerType: return output def is_set(self, key: str) -> bool: - """! @brief Return whether a value is set for the specified option. + """@brief Return whether a value is set for the specified option. This method returns True as long as any layer has a value set for the option, even if the value is the same as the default value. If the option is not set in any layer, then False is @@ -124,45 +124,45 @@ def is_set(self, key: str) -> bool: return False def get_default(self, key: str) -> Any: - """! @brief Return the default value for the specified option.""" + """@brief Return the default value for the specified option.""" if key in OPTIONS_INFO: return OPTIONS_INFO[key].default else: return None def get(self, key: str) -> Any: - """! @brief Return the highest priority value for the option, or its default.""" + """@brief Return the highest priority value for the option, or its default.""" for layer in self._layers: if key in layer: return layer[key] return self.get_default(key) def set(self, key: str, value: Any) -> None: - """! @brief Set an option in the current highest priority layer.""" + """@brief Set an option in the current highest priority layer.""" self.update({key: value}) def update(self, new_options: LayerType) -> None: - """! @brief Set multiple options in the current highest priority layer.""" + """@brief Set multiple options in the current highest priority layer.""" filtered_options = self._convert_options(new_options) previous_values = {name: self.get(name) for name in filtered_options.keys()} self._layers[0].update(filtered_options) self._notify_changes(previous_values, filtered_options) def _notify_changes(self, previous: LayerType, options: LayerType) -> None: - """! @brief Send notifications that the specified options have changed.""" + """@brief Send notifications that the specified options have changed.""" for name, new_value in options.items(): previous_value = previous[name] if new_value != previous_value: self.notify(name, data=OptionChangeInfo(new_value, previous_value)) def __contains__(self, key: str) -> bool: - """! @brief Returns whether the named option has a non-default value.""" + """@brief Returns whether the named option has a non-default value.""" return self.is_set(key) def __getitem__(self, key: str) -> Any: - """! @brief Return the highest priority value for the option, or its default.""" + """@brief Return the highest priority value for the option, or its default.""" return self.get(key) def __setitem__(self, key: str, value: Any) -> None: - """! @brief Set an option in the current highest priority layer.""" + """@brief Set an option in the current highest priority layer.""" self.set(key, value) diff --git a/pyocd/core/plugin.py b/pyocd/core/plugin.py index 63defaa79..d86ad1155 100644 --- a/pyocd/core/plugin.py +++ b/pyocd/core/plugin.py @@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__) class Plugin: - """! @brief Class that describes a plugin for pyOCD. + """@brief Class that describes a plugin for pyOCD. Each plugin vends a subclass of Plugin that describes itself and provides meta-actions. @@ -42,11 +42,11 @@ class Plugin: """ def should_load(self) -> bool: - """! @brief Whether the plugin should be loaded.""" + """@brief Whether the plugin should be loaded.""" return True def load(self) -> Any: - """! @brief Load the plugin and return the plugin implementation. + """@brief Load the plugin and return the plugin implementation. This method can perform any actions required to load the plugin beyond simply returning the implementation. @@ -57,14 +57,14 @@ def load(self) -> Any: @property def options(self) -> List[OptionInfo]: - """! @brief A list of options added by the plugin. + """@brief A list of options added by the plugin. @return List of @ref pyocd.core.options.OptionInfo "OptionInfo" objects. """ return [] @property def version(self) -> str: - """! @brief Current release version of the plugin. + """@brief Current release version of the plugin. The default implementation returns pyOCD's version. @@ -74,16 +74,16 @@ def version(self) -> str: @property def name(self) -> str: - """! @brief Name of the plugin.""" + """@brief Name of the plugin.""" raise NotImplementedError() @property def description(self) -> str: - """! @brief Short description of the plugin.""" + """@brief Short description of the plugin.""" return "" def load_plugin_classes_of_type(plugin_group: str, plugin_dict: Dict[str, Any], base_class: type) -> None: - """! @brief Helper method to load plugins. + """@brief Helper method to load plugins. Plugins are expected to return an implementation class from their Plugin.load() method. This class must be derived from `base_class`. diff --git a/pyocd/core/session.py b/pyocd/core/session.py index d1cd457c4..20e1bdae1 100644 --- a/pyocd/core/session.py +++ b/pyocd/core/session.py @@ -54,7 +54,7 @@ ] class Session(Notifier): - """! @brief Top-level object for a debug session. + """@brief Top-level object for a debug session. This class represents a debug session with a single debug probe. It is the root of the object graph, where it owns the debug probe and the board objects. @@ -95,7 +95,7 @@ class Session(Notifier): @classmethod def get_current(cls) -> "Session": - """! @brief Return the most recently created Session instance or a default Session. + """@brief Return the most recently created Session instance or a default Session. By default this method will return the most recently created Session object that is still alive. If no live session exists, a new default session will be created and returned. @@ -120,7 +120,7 @@ def __init__( option_defaults: Optional[Mapping[str, Any]] = None, **kwargs ) -> None: - """! @brief Session constructor. + """@brief Session constructor. Creates a new session using the provided debug probe. Session options are merged from the _options_ parameter and any keyword arguments. Normally a board instance is created that can @@ -236,7 +236,7 @@ def _get_config(self) -> Dict[str, Any]: return {} def find_user_file(self, option_name: Optional[str], filename_list: List[str]) -> Optional[str]: - """! @brief Search the project directory for a file. + """@brief Search the project directory for a file. @retval None No matching file was found. @retval string An absolute path to the requested file. @@ -263,7 +263,7 @@ def find_user_file(self, option_name: Optional[str], filename_list: List[str]) - return filePath def _configure_logging(self) -> None: - """! @brief Load a logging config dict or file.""" + """@brief Load a logging config dict or file.""" # Get logging config that could have been loaded from the config file. config_value = self.options.get('logging') @@ -303,22 +303,22 @@ def _configure_logging(self) -> None: @property def is_open(self) -> bool: - """! @brief Boolean of whether the session has been opened.""" + """@brief Boolean of whether the session has been opened.""" return self._inited and not self._closed @property def probe(self) -> Optional["DebugProbe"]: - """! @brief The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" instance.""" + """@brief The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" instance.""" return self._probe @property def board(self) -> Optional[Board]: - """! @brief The @ref pyocd.board.board.Board "Board" object.""" + """@brief The @ref pyocd.board.board.Board "Board" object.""" return self._board @property def target(self) -> Optional["SoCTarget"]: - """! @brief The @ref pyocd.core.target.soc_target "SoCTarget" object representing the SoC. + """@brief The @ref pyocd.core.target.soc_target "SoCTarget" object representing the SoC. This is the @ref pyocd.core.target.soc_target "SoCTarget" instance owned by the board. """ @@ -326,27 +326,27 @@ def target(self) -> Optional["SoCTarget"]: @property def options(self) -> OptionsManager: - """! @brief The @ref pyocd.core.options_manager.OptionsManager "OptionsManager" object.""" + """@brief The @ref pyocd.core.options_manager.OptionsManager "OptionsManager" object.""" return self._options @property def project_dir(self) -> str: - """! @brief Path to the project directory.""" + """@brief Path to the project directory.""" return self._project_dir @property def delegate(self) -> Any: - """! @brief An optional delegate object for customizing behaviour.""" + """@brief An optional delegate object for customizing behaviour.""" return self._delegate @delegate.setter def delegate(self, new_delegate: Any) -> None: - """! @brief Setter for the `delegate` property.""" + """@brief Setter for the `delegate` property.""" self._delegate = new_delegate @property def user_script_proxy(self) -> "UserScriptDelegateProxy": - """! @brief The UserScriptDelegateProxy object for a loaded user script.""" + """@brief The UserScriptDelegateProxy object for a loaded user script.""" # Create a proxy if there isn't already one. This is a fallback in case there isn't a user script, # yet a Python $-command is executed and needs the user script namespace in which to run. if not self._user_script_proxy: @@ -361,22 +361,22 @@ def user_script_print_proxy(self) -> "PrintProxy": @property def gdbservers(self) -> Dict[int, "GDBServer"]: - """! @brief Dictionary of core numbers to @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances.""" + """@brief Dictionary of core numbers to @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances.""" return self._gdbservers @property def probeserver(self) -> Optional["DebugProbeServer"]: - """! @brief A @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer" instance.""" + """@brief A @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer" instance.""" return self._probeserver @probeserver.setter def probeserver(self, server: "DebugProbeServer") -> None: - """! @brief Setter for the `probeserver` property.""" + """@brief Setter for the `probeserver` property.""" self._probeserver = server @property def log_tracebacks(self) -> bool: - """! @brief Quick access to debug.traceback option since it is widely used.""" + """@brief Quick access to debug.traceback option since it is widely used.""" return cast(bool, self.options.get('debug.traceback')) def __enter__(self) -> "Session": @@ -394,7 +394,7 @@ def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> bo return False def _init_user_script_namespace(self, script_name: str, script_path: str) -> None: - """! @brief Create the namespace dict used for user scripts. + """@brief Create the namespace dict used for user scripts. This initial namespace has only those objects that are available very early in the session init process. For instance, the Target instance isn't available yet. The @@ -458,7 +458,7 @@ def _init_user_script_namespace(self, script_name: str, script_path: str) -> Non } def _update_user_script_namespace(self) -> None: - """! @brief Add objects available only after init to the user script namespace.""" + """@brief Add objects available only after init to the user script namespace.""" if self._user_script_namespace is not None: self._user_script_namespace.update({ 'probe': self.probe, @@ -496,7 +496,7 @@ def _load_user_script(self) -> None: LOG.warning("Error attempting to load user script '%s': %s", script_path, err) def open(self, init_board: bool = True) -> None: - """! @brief Open the session. + """@brief Open the session. This method does everything necessary to begin a debug session. It first loads the user script, if there is one. The user script will be available via the _user_script_proxy_ @@ -524,7 +524,7 @@ def open(self, init_board: bool = True) -> None: self._inited = True def close(self) -> None: - """! @brief Close the session. + """@brief Close the session. Uninits the board and disconnects then closes the probe. """ @@ -554,7 +554,7 @@ def close(self) -> None: LOG.error("probe exception during close:", exc_info=self.log_tracebacks) class UserScriptFunctionProxy: - """! @brief Proxy for user script functions. + """@brief Proxy for user script functions. This proxy makes arguments to user script functions optional. """ @@ -572,7 +572,7 @@ def __call__(self, **kwargs: Any) -> Any: self._fn(**args) class UserScriptDelegateProxy: - """! @brief Delegate proxy for user scripts.""" + """@brief Delegate proxy for user scripts.""" def __init__(self, script_namespace: Dict) -> None: super().__init__() diff --git a/pyocd/core/soc_target.py b/pyocd/core/soc_target.py index 823fdec54..6d8412ba3 100644 --- a/pyocd/core/soc_target.py +++ b/pyocd/core/soc_target.py @@ -37,7 +37,7 @@ LOG = logging.getLogger(__name__) class SoCTarget(TargetGraphNode): - """! @brief Represents a microcontroller system-on-chip. + """@brief Represents a microcontroller system-on-chip. An instance of this class is the root of the chip-level object graph. It has child nodes for the DP and all cores. As a concrete subclass of Target, it provides methods @@ -148,7 +148,7 @@ def init(self) -> None: self.call_delegate('did_init_target', target=self) def post_connect_hook(self) -> None: - """! @brief Hook function called after post_connect init task. + """@brief Hook function called after post_connect init task. This hook lets the target subclass configure the target as necessary. """ diff --git a/pyocd/core/target.py b/pyocd/core/target.py index 47c076b22..c43588542 100644 --- a/pyocd/core/target.py +++ b/pyocd/core/target.py @@ -34,7 +34,7 @@ class Target(MemoryInterface): class State(Enum): - """! @brief States a target processor can be in.""" + """@brief States a target processor can be in.""" ## Core is executing code. RUNNING = 1 ## Core is halted in debug mode. @@ -47,14 +47,14 @@ class State(Enum): LOCKUP = 5 class SecurityState(Enum): - """! @brief Security states for a processor with the Security extension.""" + """@brief Security states for a processor with the Security extension.""" ## PE is in the Non-secure state. NONSECURE = 0 ## PE is in the Secure state. SECURE = 1 class ResetType(Enum): - """! @brief Available reset methods.""" + """@brief Available reset methods.""" ## Hardware reset via the nRESET signal. HW = 1 ## Software reset using the core's default software reset method. @@ -73,7 +73,7 @@ class ResetType(Enum): SW_EMULATED = 5 class BreakpointType(Enum): - """! @brief Types of breakpoints.""" + """@brief Types of breakpoints.""" ## Hardware breakpoint. HW = 1 ## Software breakpoint. @@ -82,7 +82,7 @@ class BreakpointType(Enum): AUTO = 3 class WatchpointType(Enum): - """! @brief Types of watchpoints.""" + """@brief Types of watchpoints.""" ## Watchpoint on read accesses. READ = 1 ## Watchpoint on write accesses. @@ -91,7 +91,7 @@ class WatchpointType(Enum): READ_WRITE = 3 class VectorCatch: - """! Vector catch option masks. + """Vector catch option masks. These constants can be OR'd together to form any combination of vector catch settings. """ @@ -121,7 +121,7 @@ class VectorCatch: | SECURE_FAULT) class Event(Enum): - """! Target notification events.""" + """Target notification events.""" ## Sent after completing the initialisation sequence. POST_CONNECT = 1 ## Sent prior to disconnecting cores and powering down the DP. @@ -155,7 +155,7 @@ class Event(Enum): POST_FLASH_PROGRAM = 10 class RunType(Enum): - """! Run type for run notifications. + """Run type for run notifications. An enum of this type is set as the data attribute on PRE_RUN and POST_RUN notifications. """ @@ -165,7 +165,7 @@ class RunType(Enum): STEP = 2 class HaltReason(Enum): - """! Halt type for halt notifications. + """Halt type for halt notifications. An value of this type is returned from Target.get_halt_reason(). It is also used as the data attribute on PRE_HALT and POST_HALT notifications. diff --git a/pyocd/core/target_delegate.py b/pyocd/core/target_delegate.py index 333a8ff80..df0874c55 100644 --- a/pyocd/core/target_delegate.py +++ b/pyocd/core/target_delegate.py @@ -25,14 +25,14 @@ from ..utility.sequencer import CallSequence ## @brief Return type for some delegate methods. -# +# # For certain delegate method, the delegate can reply with a boolean or None, where True means that # it handled the actions and no further action is to be performed, and False or None means to continue # processing. DelegateResult = Optional[bool] class TargetDelegateInterface: - """! @brief Abstract class defining the delegate interface for targets. + """@brief Abstract class defining the delegate interface for targets. Note that delegates don't actually have to derive from this class due to Python's dynamic method dispatching. The primary purpose of this class is for documentation @@ -43,7 +43,7 @@ def __init__(self, session: "Session") -> None: self._session = session def will_connect(self, board: "Board") -> None: - """! @brief Pre-init hook for the board. + """@brief Pre-init hook for the board. @param self @param board A Board instance that is about to be initialized. @return Ignored. @@ -51,7 +51,7 @@ def will_connect(self, board: "Board") -> None: pass def did_connect(self, board: "Board") -> None: - """! @brief Post-initialization hook for the board. + """@brief Post-initialization hook for the board. @param self @param board A Board instance. @return Ignored. @@ -59,7 +59,7 @@ def did_connect(self, board: "Board") -> None: pass def will_init_target(self, target: "SoCTarget", init_sequence: "CallSequence") -> None: - """! @brief Hook to review and modify init call sequence prior to execution. + """@brief Hook to review and modify init call sequence prior to execution. @param self @param target An SoCTarget object about to be initialized. @param init_sequence The CallSequence that will be invoked. Because call sequences are @@ -69,7 +69,7 @@ def will_init_target(self, target: "SoCTarget", init_sequence: "CallSequence") - pass def did_init_target(self, target: "SoCTarget") -> None: - """! @brief Post-initialization hook. + """@brief Post-initialization hook. @param self @param target An SoCTarget. @return Ignored. @@ -77,7 +77,7 @@ def did_init_target(self, target: "SoCTarget") -> None: pass def will_start_debug_core(self, core: "Target") -> DelegateResult: - """! @brief Hook to enable debug for the given core. + """@brief Hook to enable debug for the given core. @param self @param core A CortexM object about to be initialized. @retval True Do not perform the normal procedure to start core debug. @@ -86,7 +86,7 @@ def will_start_debug_core(self, core: "Target") -> DelegateResult: pass def did_start_debug_core(self, core: "Target") -> None: - """! @brief Post-initialization hook. + """@brief Post-initialization hook. @param self @param core A CortexM object. @return Ignored. @@ -94,7 +94,7 @@ def did_start_debug_core(self, core: "Target") -> None: pass def will_stop_debug_core(self, core: "Target") -> DelegateResult: - """! @brief Pre-cleanup hook for the core. + """@brief Pre-cleanup hook for the core. @param self @param core A CortexM object. @retval True Do not perform the normal procedure to disable core debug. @@ -103,7 +103,7 @@ def will_stop_debug_core(self, core: "Target") -> DelegateResult: pass def did_stop_debug_core(self, core: "Target") -> None: - """! @brief Post-cleanup hook for the core. + """@brief Post-cleanup hook for the core. @param self @param core A CortexM object. @return Ignored. @@ -111,7 +111,7 @@ def did_stop_debug_core(self, core: "Target") -> None: pass def will_disconnect(self, target: "SoCTarget", resume: bool) -> None: - """! @brief Pre-disconnect hook. + """@brief Pre-disconnect hook. @param self @param target Either a CoreSightTarget or CortexM object. @param resume The value of the `disconnect_on_resume` option. @@ -120,7 +120,7 @@ def will_disconnect(self, target: "SoCTarget", resume: bool) -> None: pass def did_disconnect(self, target: "SoCTarget", resume: bool) -> None: - """! @brief Post-disconnect hook. + """@brief Post-disconnect hook. @param self @param target Either a CoreSightTarget or CortexM object. @param resume The value of the `disconnect_on_resume` option. @@ -128,7 +128,7 @@ def did_disconnect(self, target: "SoCTarget", resume: bool) -> None: pass def will_reset(self, core: "Target", reset_type: "Target.ResetType") -> DelegateResult: - """! @brief Pre-reset hook. + """@brief Pre-reset hook. @param self @param core A CortexM instance. @param reset_type One of the Target.ResetType enumerations. @@ -138,7 +138,7 @@ def will_reset(self, core: "Target", reset_type: "Target.ResetType") -> Delegate pass def did_reset(self, core: "Target", reset_type: "Target.ResetType") -> None: - """! @brief Post-reset hook. + """@brief Post-reset hook. @param self @param core A CortexM instance. @param reset_type One of the Target.ResetType enumerations. @@ -147,7 +147,7 @@ def did_reset(self, core: "Target", reset_type: "Target.ResetType") -> None: pass def set_reset_catch(self, core: "Target", reset_type: "Target.ResetType") -> DelegateResult: - """! @brief Hook to prepare target for halting on reset. + """@brief Hook to prepare target for halting on reset. @param self @param core A CortexM instance. @param reset_type One of the Target.ResetType enumerations. @@ -157,7 +157,7 @@ def set_reset_catch(self, core: "Target", reset_type: "Target.ResetType") -> Del pass def clear_reset_catch(self, core: "Target", reset_type: "Target.ResetType") -> None: - """! @brief Hook to clean up target after a reset and halt. + """@brief Hook to clean up target after a reset and halt. @param self @param core A CortexM instance. @param reset_type @@ -166,7 +166,7 @@ def clear_reset_catch(self, core: "Target", reset_type: "Target.ResetType") -> N pass def mass_erase(self, target: "SoCTarget") -> DelegateResult: - """! @brief Hook to override mass erase. + """@brief Hook to override mass erase. @param self @param target A CoreSightTarget object. @retval True Indicate that mass erase was performed by the hook. @@ -176,7 +176,7 @@ def mass_erase(self, target: "SoCTarget") -> DelegateResult: pass def trace_start(self, target: "SoCTarget", mode: int) -> None: - """! @brief Hook to prepare for tracing the target. + """@brief Hook to prepare for tracing the target. @param self @param target A CoreSightTarget object. @param mode The trace mode. Currently always 0 to indicate SWO. @@ -185,7 +185,7 @@ def trace_start(self, target: "SoCTarget", mode: int) -> None: pass def trace_stop(self, target: "SoCTarget", mode: int) -> None: - """! @brief Hook to clean up after tracing the target. + """@brief Hook to clean up after tracing the target. @param self @param target A CoreSightTarget object. @param mode The trace mode. Currently always 0 to indicate SWO. diff --git a/pyocd/coresight/ap.py b/pyocd/coresight/ap.py index 67dd7ac93..822756c65 100644 --- a/pyocd/coresight/ap.py +++ b/pyocd/coresight/ap.py @@ -173,7 +173,7 @@ DEMCR_TRCENA = (1 << 24) class APVersion(Enum): - """! @brief Supported versions of APs.""" + """@brief Supported versions of APs.""" ## APv1 from ADIv5.x. APv1 = 1 ## APv2 from ADIv6. @@ -181,7 +181,7 @@ class APVersion(Enum): @total_ordering class APAddressBase: - """! @brief Base class for AP addresses. + """@brief Base class for AP addresses. An instance of this class has a "nominal address", which is an integer address in terms of how it is typically referenced. For instance, for an APv1, the nominal address is the unshifted @@ -201,24 +201,24 @@ class APAddressBase: """ def __init__(self, address: int) -> None: - """! @brief Constructor accepting the nominal address.""" + """@brief Constructor accepting the nominal address.""" self._nominal_address = address @property def ap_version(self) -> APVersion: - """! @brief Version of the AP, as an APVersion enum.""" + """@brief Version of the AP, as an APVersion enum.""" raise NotImplementedError() @property def nominal_address(self) -> int: - """! @brief Integer AP address in the form in which one speaks about it. + """@brief Integer AP address in the form in which one speaks about it. This value is used for comparisons and hashing.""" return self._nominal_address @property def address(self) -> int: - """! @brief Integer AP address used as a base for register accesses. + """@brief Integer AP address used as a base for register accesses. This value can be passed to the DebugPort's read_ap() or write_ap() methods. Offsets of registers can be added to this value to create register addresses.""" @@ -226,7 +226,7 @@ def address(self) -> int: @property def idr_address(self) -> int: - """! @brief Address of the IDR register.""" + """@brief Address of the IDR register.""" raise NotImplementedError() def __hash__(self) -> int: @@ -247,7 +247,7 @@ def __repr__(self) -> str: return "<{}@{:#x} {}>".format(self.__class__.__name__, id(self), str(self)) class APv1Address(APAddressBase): - """! @brief Represents the address for an APv1. + """@brief Represents the address for an APv1. The nominal address is the 8-bit APSEL value. This is written into the top byte of the DP SELECT register to select the AP to communicate with. @@ -255,12 +255,12 @@ class APv1Address(APAddressBase): @property def ap_version(self) -> APVersion: - """! @brief APVersion.APv1.""" + """@brief APVersion.APv1.""" return APVersion.APv1 @property def apsel(self) -> int: - """! @brief Alias for the _nominal_address_ property.""" + """@brief Alias for the _nominal_address_ property.""" return self._nominal_address @property @@ -269,14 +269,14 @@ def address(self) -> int: @property def idr_address(self) -> int: - """! @brief Address of the IDR register.""" + """@brief Address of the IDR register.""" return AP_IDR def __str__(self) -> str: return "#%d" % self.apsel class APv2Address(APAddressBase): - """! @brief Represents the address for an APv2. + """@brief Represents the address for an APv2. ADIv6 uses an APB bus to communicate with APv2 instances. The nominal address is simply the base address of the APB slave. The APB bus address width is variable from 12-52 bits in 8-bit steps. @@ -286,7 +286,7 @@ class APv2Address(APAddressBase): @property def ap_version(self) -> APVersion: - """! @brief Returns APVersion.APv2.""" + """@brief Returns APVersion.APv2.""" return APVersion.APv2 @property @@ -295,18 +295,18 @@ def address(self) -> int: @property def idr_address(self) -> int: - """! @brief Address of the IDR register.""" + """@brief Address of the IDR register.""" return APv2_IDR def __str__(self) -> str: return "@0x%x" % self.address class AccessPort: - """! @brief Base class for a CoreSight Access Port (AP) instance.""" + """@brief Base class for a CoreSight Access Port (AP) instance.""" @staticmethod def probe(dp: "DebugPort", ap_num: int) -> bool: - """! @brief Determine if an AP exists with the given AP number. + """@brief Determine if an AP exists with the given AP number. Only applicable for ADIv5. @@ -323,7 +323,7 @@ def create( ap_address: APAddressBase, cmpid: Optional["CoreSightComponentID"] = None ) -> "AccessPort": - """! @brief Create a new AP object. + """@brief Create a new AP object. Determines the type of the AP by examining the IDR value and creates a new AP object of the appropriate class. See #AP_TYPE_MAP for the mapping of IDR @@ -375,7 +375,7 @@ def __init__( flags: int = 0, cmpid: Optional["CoreSightComponentID"] = None ) -> None: - """! @brief AP constructor. + """@brief AP constructor. @param self @param dp The DebugPort object. @param ap_address APAddress object with address of this AP. @@ -402,7 +402,7 @@ def short_description(self) -> str: @property def ap_version(self) -> APVersion: - """! @brief The AP's major version determined by ADI version. + """@brief The AP's major version determined by ADI version. @retval APVersion.APv1 @retval APVersion.APv2 """ @@ -428,7 +428,7 @@ def init(self) -> None: LOG.info("%s IDR = 0x%08x (%s)", self.short_description, self.idr, desc) def find_components(self) -> None: - """! @brief Find CoreSight components attached to this AP.""" + """@brief Find CoreSight components attached to this AP.""" pass @overload @@ -456,16 +456,16 @@ def write_reg(self, addr: int, data: int) -> None: self.dp.write_ap(self.address.address + addr, data) def lock(self) -> None: - """! @brief Lock the AP from access by other threads.""" + """@brief Lock the AP from access by other threads.""" self.dp.probe.lock() def unlock(self) -> None: - """! @brief Unlock the AP.""" + """@brief Unlock the AP.""" self.dp.probe.unlock() @contextmanager def locked(self) -> Generator[None, None, None]: - """! @brief Context manager for locking the AP using a with statement. + """@brief Context manager for locking the AP using a with statement. All public methods of AccessPort and its subclasses are automatically locked, so manual locking usually is not necessary unless you need to hold the lock across multiple AP @@ -480,7 +480,7 @@ def __repr__(self) -> str: self.__class__.__name__, id(self), self.short_description, self.idr, self.rom_addr) class MEM_AP(AccessPort, memory_interface.MemoryInterface): - """! @brief MEM-AP component. + """@brief MEM-AP component. This class supports MEM-AP v1 and v2. @@ -575,12 +575,12 @@ def __init__( @property def supported_transfer_sizes(self) -> Set[int]: - """! @brief Tuple of transfer sizes supported by this AP.""" + """@brief Tuple of transfer sizes supported by this AP.""" return self._transfer_sizes @property def is_enabled(self) -> bool: - """! @brief Whether any memory transfers are allowed by this AP. + """@brief Whether any memory transfers are allowed by this AP. Memory transfers may be disabled by an input signal to the AP. This is often done when debug security is enabled on the device, to disallow debugger access to internal memory. @@ -588,7 +588,7 @@ def is_enabled(self) -> bool: return self.is_enabled_for(Target.SecurityState.NONSECURE) def is_enabled_for(self, security_state: Target.SecurityState) -> bool: - """! @brief Checks whether memory transfers are allowed by this AP for the given security state. + """@brief Checks whether memory transfers are allowed by this AP for the given security state. Memory transfers may be disabled by an input signal to the AP. This is often done when debug security is enabled on the device, to disallow debugger access to internal memory. @@ -613,7 +613,7 @@ def is_enabled_for(self, security_state: Target.SecurityState) -> bool: @locked def init(self) -> None: - """! @brief Initialize the MEM-AP. + """@brief Initialize the MEM-AP. This method interrogates the MEM-AP to determine its capabilities, and performs any initial setup that is required. @@ -637,7 +637,7 @@ def init(self) -> None: original_csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW) def _init_cfg() -> None: - """! @brief Read MEM-AP CFG register.""" + """@brief Read MEM-AP CFG register.""" cfg = self.read_reg(self._reg_offset + MEM_AP_CFG) # Check for 64-bit address support. @@ -671,7 +671,7 @@ def _init_cfg() -> None: self._dar_count = (1 << darsize) // 4 def _init_transfer_sizes() -> None: - """! @brief Determine supported transfer sizes. + """@brief Determine supported transfer sizes. If the #AP_ALL_TX_SZ flag is set, then we know a priori that this AP implementation supports 8-, 16- and 32- transfer sizes. If the Large Data extension is implemented, then this @@ -693,7 +693,7 @@ def _init_transfer_sizes() -> None: return def _test_transfer_size(sz): - """! @brief Utility to verify whether the MEM-AP supports a given transfer size. + """@brief Utility to verify whether the MEM-AP supports a given transfer size. From ADIv6: If the CSW.Size field is written with a value corresponding to a size that is not supported, @@ -722,7 +722,7 @@ def _test_transfer_size(sz): self._transfer_sizes = {8, 16, 32} def _init_hprot() -> None: - """! @brief Init HPROT HNONSEC. + """@brief Init HPROT HNONSEC. Determines the implemented bits of HPROT and HNONSEC in this MEM-AP. The defaults for these fields of the CSW are based on the implemented bits. @@ -746,7 +746,7 @@ def _init_hprot() -> None: self.hnonsec = self._hnonsec & self._impl_hnonsec def _init_rom_table_base() -> None: - """! @brief Read ROM table base address.""" + """@brief Read ROM table base address.""" base = self.read_reg(self._reg_offset + MEM_AP_BASE) is_adiv5_base = (base & AP_BASE_FORMAT_MASK) != 0 is_base_present = (base & AP_BASE_ENTRY_PRESENT_MASK) != 0 @@ -810,7 +810,7 @@ def hprot(self) -> int: @hprot.setter @locked def hprot(self, value: int) -> None: - """! @brief Setter for current HPROT value used for memory transactions. + """@brief Setter for current HPROT value used for memory transactions. The bits of HPROT have the following meaning. Not all bits are implemented in all MEM-APs. AHB-Lite only implements HPROT[3:0]. @@ -835,7 +835,7 @@ def hnonsec(self) -> int: @hnonsec.setter @locked def hnonsec(self, value: int) -> None: - """! @brief Setter for current HNONSEC value used for memory transactions. + """@brief Setter for current HNONSEC value used for memory transactions. Not all MEM-APs support control of HNONSEC. In particular, only the AHB5-AP used for v8-M Cortex-M systems does. The AXI-AP for Cortex-A systems also allows this control. @@ -848,7 +848,7 @@ def hnonsec(self, value: int) -> None: | (self._hnonsec << CSW_HNONSEC_SHIFT)) class _MemAttrContext: - """! @brief Context manager for temporarily setting HPROT and/or HNONSEC. + """@brief Context manager for temporarily setting HPROT and/or HNONSEC. The AP is locked during the lifetime of the context manager. This means that only the calling thread can perform memory transactions. @@ -878,22 +878,22 @@ def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> No self._ap.unlock() def hprot_lock(self, hprot: int) -> _MemAttrContext: - """! @brief Context manager to temporarily change HPROT.""" + """@brief Context manager to temporarily change HPROT.""" return self._MemAttrContext(self, hprot=hprot) def hnonsec_lock(self, hnonsec: int) -> _MemAttrContext: - """! @brief Context manager to temporarily change HNONSEC. + """@brief Context manager to temporarily change HNONSEC. @see secure_lock(), nonsecure_lock() """ return self._MemAttrContext(self, hnonsec=hnonsec) def secure_lock(self) -> _MemAttrContext: - """! @brief Context manager to temporarily set the AP to use secure memory transfers.""" + """@brief Context manager to temporarily set the AP to use secure memory transfers.""" return self.hnonsec_lock(SECURE) def nonsecure_lock(self) -> _MemAttrContext: - """! @brief Context manager to temporarily set AP to use non-secure memory transfers.""" + """@brief Context manager to temporarily set AP to use non-secure memory transfers.""" return self.hnonsec_lock(NONSECURE) @overload @@ -942,17 +942,17 @@ def write_reg(self, addr: int, data: int) -> None: raise def _invalidate_cache(self) -> None: - """! @brief Invalidate cached registers associated with this AP.""" + """@brief Invalidate cached registers associated with this AP.""" self._cached_csw = -1 def _reset_did_occur(self, notification: "Notification") -> None: - """! @brief Handles reset notifications to invalidate CSW cache.""" + """@brief Handles reset notifications to invalidate CSW cache.""" # We clear the cache on all resets just to be safe. self._invalidate_cache() @locked def _write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: - """! @brief Write a single memory location. + """@brief Write a single memory location. By default the transfer size is a word @@ -1013,7 +1013,7 @@ def _read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, C @locked def _read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Read a memory location. + """@brief Read a memory location. By default, a word will be read. @@ -1078,7 +1078,7 @@ def read_mem_cb() -> int: return read_mem_cb def _write_block32_page(self, addr: int, data: Sequence[int]) -> None: - """! @brief Write a single transaction's worth of aligned words. + """@brief Write a single transaction's worth of aligned words. The transaction must not cross the MEM-AP's auto-increment boundary. @@ -1105,7 +1105,7 @@ def _write_block32_page(self, addr: int, data: Sequence[int]) -> None: TRACE.debug("_write_block32:%06d }", num) def _read_block32_page(self, addr: int, size: int) -> Sequence[int]: - """! @brief Read a single transaction's worth of aligned words. + """@brief Read a single transaction's worth of aligned words. The transaction must not cross the MEM-AP's auto-increment boundary. @@ -1134,7 +1134,7 @@ def _read_block32_page(self, addr: int, size: int) -> Sequence[int]: @locked def _write_memory_block32(self, addr: int, data: Sequence[int]) -> None: - """! @brief Write a block of aligned words in memory.""" + """@brief Write a block of aligned words in memory.""" assert (addr & 0x3) == 0 addr &= self._address_mask size = len(data) @@ -1150,7 +1150,7 @@ def _write_memory_block32(self, addr: int, data: Sequence[int]) -> None: @locked def _read_memory_block32(self, addr: int, size: int) -> Sequence[int]: - """! @brief Read a block of aligned words in memory. + """@brief Read a block of aligned words in memory. @return A list of word values. """ @@ -1171,7 +1171,7 @@ def _handle_error(self, error: Exception, num: int) -> None: self._invalidate_cache() class AHB_AP(MEM_AP): - """! @brief AHB-AP access port subclass. + """@brief AHB-AP access port subclass. This subclass checks for the AP_MSTRTYPE flag, and if set configures that field in the CSW register to use debugger transactions. Only the M3 and M4 AHB-AP implements MSTRTYPE. @@ -1190,7 +1190,7 @@ def init(self) -> None: self._init_mstrtype() def _init_mstrtype(self) -> None: - """! @brief Set master type control in CSW. + """@brief Set master type control in CSW. Only the v1 AHB-AP from Cortex-M3 and Cortex-M4 implements the MSTRTYPE flag to control whether transactions appear as debugger or internal accesses. diff --git a/pyocd/coresight/component.py b/pyocd/coresight/component.py index 9570e6de0..b61207ae0 100644 --- a/pyocd/coresight/component.py +++ b/pyocd/coresight/component.py @@ -18,18 +18,18 @@ from ..utility.graph import GraphNode class CoreSightComponent(GraphNode): - """! @brief CoreSight component base class.""" + """@brief CoreSight component base class.""" @classmethod def factory(cls, ap, cmpid, address): - """! @brief Common CoreSightComponent factory.""" + """@brief Common CoreSightComponent factory.""" cmp = cls(ap, cmpid, address) if hasattr(ap, 'core') and ap.core: ap.core.add_child(cmp) return cmp def __init__(self, ap, cmpid=None, addr=None): - """! @brief Constructor.""" + """@brief Constructor.""" super(CoreSightComponent, self).__init__() self._ap = ap self._cmpid = cmpid @@ -56,7 +56,7 @@ def address(self, newAddr): self._address = newAddr class CoreSightCoreComponent(CoreSightComponent): - """! @brief CoreSight component for a CPU core. + """@brief CoreSight component for a CPU core. This class serves only as a superclass for identifying core-type components. """ diff --git a/pyocd/coresight/component_ids.py b/pyocd/coresight/component_ids.py index fe638a2bf..376e2803a 100644 --- a/pyocd/coresight/component_ids.py +++ b/pyocd/coresight/component_ids.py @@ -75,7 +75,7 @@ APFactory = Callable[["DebugPort", APAddressBase, Optional["CoreSightComponentID"]], AccessPort] class CmpInfo(NamedTuple): - """! @brief Combines a component and product name with a factory method.""" + """@brief Combines a component and product name with a factory method.""" name: str product: Optional[str] factory: Optional[Union[ComponentFactory, APFactory]] diff --git a/pyocd/coresight/core_ids.py b/pyocd/coresight/core_ids.py index db2bd0621..161ace368 100644 --- a/pyocd/coresight/core_ids.py +++ b/pyocd/coresight/core_ids.py @@ -51,14 +51,14 @@ } class CoreArchitecture(Enum): - """! @brief CPU architectures.""" + """@brief CPU architectures.""" ARMv6M = 1 ARMv7M = 2 ARMv8M_BASE = 3 ARMv8M_MAIN = 4 class CortexMExtension(Enum): - """! @brief Extensions for the Cortex-M architecture.""" + """@brief Extensions for the Cortex-M architecture.""" FPU = "FPU" # Single-Precision floating point DSP = "DSP" # Digital Signal Processing instructions FPU_DP = "FPU_DP" # Double-Precision floating point diff --git a/pyocd/coresight/coresight_target.py b/pyocd/coresight/coresight_target.py index 6024a9b68..002e985ef 100644 --- a/pyocd/coresight/coresight_target.py +++ b/pyocd/coresight/coresight_target.py @@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__) class CoreSightTarget(SoCTarget): - """! @brief Represents an SoC that uses CoreSight debug infrastructure. + """@brief Represents an SoC that uses CoreSight debug infrastructure. This class adds Arm CoreSight-specific discovery and initialization code to SoCTarget. """ @@ -62,14 +62,14 @@ def aps(self) -> Dict["APAddressBase", "AccessPort"]: @property def svd_device(self) -> Optional["SVDDevice"]: - """! @brief Waits for SVD file to complete loading before returning.""" + """@brief Waits for SVD file to complete loading before returning.""" if not self._svd_device and self._svd_load_thread: LOG.debug("Waiting for SVD load to complete") self._svd_device = self._svd_load_thread.device return self._svd_device def _create_default_cortex_m_memory_map(self) -> MemoryMap: - """! @brief Create a MemoryMap for the Cortex-M system address map.""" + """@brief Create a MemoryMap for the Cortex-M system address map.""" return MemoryMap( RamRegion(name="Code", start=0x00000000, length=0x20000000, access='rwx'), RamRegion(name="SRAM", start=0x20000000, length=0x20000000, access='rwx'), @@ -109,7 +109,7 @@ def create_init_sequence(self) -> CallSequence: return seq def disconnect(self, resume: bool = True) -> None: - """! @brief Disconnect from the target. + """@brief Disconnect from the target. Same as SoCTarget.disconnect(), except that it asks the DebugPort to power down. """ @@ -124,7 +124,7 @@ def disconnect(self, resume: bool = True) -> None: self.call_delegate('did_disconnect', target=self, resume=resume) def create_discoverer(self) -> None: - """! @brief Init task to create the discovery object. + """@brief Init task to create the discovery object. Instantiates the appropriate @ref pyocd.coresight.discovery.CoreSightDiscovery CoreSightDiscovery subclass for the target's ADI version. @@ -132,7 +132,7 @@ def create_discoverer(self) -> None: self._discoverer = discovery.ADI_DISCOVERY_CLASS_MAP[self.dp.adi_version](self) def pre_connect(self) -> None: - """! @brief Handle some of the connect modes. + """@brief Handle some of the connect modes. This init task performs a connect pre-reset or asserts reset if the connect mode is under-reset. @@ -146,7 +146,7 @@ def pre_connect(self) -> None: self.dp.assert_reset(True) def perform_halt_on_connect(self) -> None: - """! @brief Halt cores. + """@brief Halt cores. This init task performs a connect pre-reset or asserts reset if the connect mode is under-reset. @@ -167,7 +167,7 @@ def perform_halt_on_connect(self) -> None: exc_info=self.session.log_tracebacks) def post_connect(self) -> None: - """! @brief Handle cleaning up some of the connect modes. + """@brief Handle cleaning up some of the connect modes. This init task de-asserts reset if the connect mode is under-reset. """ @@ -186,7 +186,7 @@ def post_connect(self) -> None: exc_info=self.session.log_tracebacks) def create_flash(self) -> None: - """! @brief Instantiates flash objects for memory regions. + """@brief Instantiates flash objects for memory regions. This init task iterates over flash memory regions and for each one creates the Flash instance. It uses the flash_algo and flash_class properties of the region to know how @@ -247,7 +247,7 @@ def create_flash(self) -> None: region.flash = obj def check_for_cores(self) -> None: - """! @brief Init task: verify that at least one core was discovered.""" + """@brief Init task: verify that at least one core was discovered.""" if not len(self.cores): # Allow the user to override the exception to enable uses like chip bringup. if self.session.options.get('allow_no_cores'): diff --git a/pyocd/coresight/cortex_m.py b/pyocd/coresight/cortex_m.py index 3151337e1..cd7a7d74e 100644 --- a/pyocd/coresight/cortex_m.py +++ b/pyocd/coresight/cortex_m.py @@ -50,7 +50,7 @@ LOG = logging.getLogger(__name__) class CortexM(CoreTarget, CoreSightCoreComponent): # lgtm[py/multiple-calls-to-init] - """! @brief CoreSight component for a v6-M or v7-M Cortex-M core. + """@brief CoreSight component for a v6-M or v7-M Cortex-M core. This class has basic functions to access a Cortex-M core: - init @@ -246,7 +246,7 @@ def __init__(self, self.bp_manager.add_provider(self.sw_bp) def add_child(self, cmp: "CoreSightComponent") -> None: - """! @brief Connect related CoreSight components.""" + """@brief Connect related CoreSight components.""" super().add_child(cmp) if isinstance(cmp, FPB): @@ -261,17 +261,17 @@ def core_number(self) -> int: @property def architecture(self) -> CoreArchitecture: - """! @brief @ref pyocd.coresight.core_ids.CoreArchitecture "CoreArchitecture" for this core.""" + """@brief @ref pyocd.coresight.core_ids.CoreArchitecture "CoreArchitecture" for this core.""" return self._architecture @property def extensions(self) -> List[CortexMExtension]: - """! @brief List of extensions supported by this core.""" + """@brief List of extensions supported by this core.""" return self._extensions @property def core_registers(self) -> CoreRegistersIndex: - """! @brief Instance of @ref pyocd.core.core_registers.CoreRegistersIndex "CoreRegistersIndex" + """@brief Instance of @ref pyocd.core.core_registers.CoreRegistersIndex "CoreRegistersIndex" describing available core registers. """ return self._core_registers @@ -299,7 +299,7 @@ def default_software_reset_type(self) -> Target.ResetType: @default_software_reset_type.setter def default_software_reset_type(self, reset_type: Target.ResetType) -> None: - """! @brief Modify the default software reset method. + """@brief Modify the default software reset method. @param self @param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET, or Target.ResetType.SW_EMULATED. @@ -311,7 +311,7 @@ def default_software_reset_type(self, reset_type: Target.ResetType) -> None: @property def supported_security_states(self) -> Sequence[Target.SecurityState]: - """! @brief Tuple of security states supported by the processor. + """@brief Tuple of security states supported by the processor. @return Tuple of @ref pyocd.core.target.Target.SecurityState "Target.SecurityState". For v6-M and v7-M cores, the return value only contains SecurityState.NONSECURE. @@ -319,7 +319,7 @@ def supported_security_states(self) -> Sequence[Target.SecurityState]: return (Target.SecurityState.NONSECURE,) def init(self) -> None: - """! @brief Cortex M initialization. + """@brief Cortex M initialization. The bus must be accessible when this method is called. """ @@ -349,7 +349,7 @@ def disconnect(self, resume: bool = True) -> None: self.call_delegate('did_stop_debug_core', core=self) def _build_registers(self) -> None: - """! @brief Build set of core registers available on this code. + """@brief Build set of core registers available on this code. This method builds the list of core registers for this particular core. This includes all available core registers, and some variants of registers such as 'ipsr', 'iapsr', and the @@ -365,7 +365,7 @@ def _build_registers(self) -> None: self._core_registers.add_group(CoreRegisterGroups.VFP_V5) def _read_core_type(self) -> None: - """! @brief Read the CPUID register and determine core type and architecture.""" + """@brief Read the CPUID register and determine core type and architecture.""" # Read CPUID register cpuid = self.read32(CortexM.CPUID) @@ -392,7 +392,7 @@ def _read_core_type(self) -> None: LOG.warning("CPU core #%d type is unrecognized", self.core_number) def _check_for_fpu(self) -> None: - """! @brief Determine if a core has an FPU. + """@brief Determine if a core has an FPU. The core architecture must have been identified prior to calling this function. """ @@ -433,7 +433,7 @@ def _check_for_fpu(self) -> None: LOG.info("FPU present: " + fpu_type) def write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: - """! @brief Write a single memory location. + """@brief Write a single memory location. By default the transfer size is a word.""" self.ap.write_memory(addr, data, transfer_size) @@ -455,7 +455,7 @@ def read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Ca ... def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Read a memory location. + """@brief Read a memory location. By default, a word will be read.""" result = self.ap.read_memory(addr, transfer_size, now) @@ -470,27 +470,27 @@ def read_memory_cb(): return read_memory_cb def read_memory_block8(self, addr: int, size: int) -> Sequence[int]: - """! @brief Read a block of unaligned bytes in memory. + """@brief Read a block of unaligned bytes in memory. @return an array of byte values """ data = self.ap.read_memory_block8(addr, size) return self.bp_manager.filter_memory_unaligned_8(addr, size, data) def write_memory_block8(self, addr: int, data: Sequence[int]) -> None: - """! @brief Write a block of unaligned bytes in memory.""" + """@brief Write a block of unaligned bytes in memory.""" self.ap.write_memory_block8(addr, data) def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: - """! @brief Write an aligned block of 32-bit words.""" + """@brief Write an aligned block of 32-bit words.""" self.ap.write_memory_block32(addr, data) def read_memory_block32(self, addr: int, size: int) -> Sequence[int]: - """! @brief Read an aligned block of 32-bit words.""" + """@brief Read an aligned block of 32-bit words.""" data = self.ap.read_memory_block32(addr, size) return self.bp_manager.filter_memory_aligned_32(addr, size, data) def halt(self) -> None: - """! @brief Halt the core + """@brief Halt the core """ LOG.debug("halting core %d", self.core_number) @@ -501,7 +501,7 @@ def halt(self) -> None: def step(self, disable_interrupts: bool = True, start: int = 0, end: int = 0, hook_cb: Optional[Callable[[], bool]] = None) -> None: - """! @brief Perform an instruction level step. + """@brief Perform an instruction level step. This API will execute one or more individual instructions on the core. With default parameters, it masks interrupts and only steps a single instruction. The _start_ and _stop_ parameters define an @@ -616,7 +616,7 @@ def clear_debug_cause_bits(self): ) def _perform_emulated_reset(self): - """! @brief Emulate a software reset by writing registers. + """@brief Emulate a software reset by writing registers. All core registers are written to reset values. This includes setting the initial PC and SP to values read from the vector table, which is assumed to be located at the based of the @@ -697,7 +697,7 @@ def _perform_emulated_reset(self): self.write_memory_block32(self.NVIC_IPR0, [0xffffffff] * (numregs * 8)) def _get_actual_reset_type(self, reset_type): - """! @brief Determine the reset type to use given defaults and passed in type.""" + """@brief Determine the reset type to use given defaults and passed in type.""" # Default to reset_type session option if reset_type parameter is None. If the session # option isn't set, then use the core's default reset type. @@ -729,7 +729,7 @@ def _get_actual_reset_type(self, reset_type): return reset_type def _perform_reset(self, reset_type): - """! @brief Perform a reset of the specified type.""" + """@brief Perform a reset of the specified type.""" assert isinstance(reset_type, Target.ResetType) if reset_type is Target.ResetType.HW: # Tell DP to not send reset notifications because we are doing it. @@ -757,7 +757,7 @@ def _perform_reset(self, reset_type): sleep(self.session.options.get('reset.post_delay')) def _post_reset_core_accessibility_test(self): - """! @brief Wait for the system to come out of reset and this core to be accessible. + """@brief Wait for the system to come out of reset and this core to be accessible. Keep reading the DHCSR until we get a good response with S_RESET_ST cleared, or we time out. There's nothing we can do if the test times out, and in fact if this is a secondary core on a multicore system then timing out @@ -787,7 +787,7 @@ def _post_reset_core_accessibility_test(self): LOG.debug("Core #%d did not come out of reset within timeout", self.core_number) def reset(self, reset_type=None): - """! @brief Reset the core. + """@brief Reset the core. The reset method is selectable via the reset_type parameter as well as the reset_type session option. If the reset_type parameter is not specified or None, then the reset_type @@ -830,7 +830,7 @@ def reset(self, reset_type=None): self.session.notify(Target.Event.POST_RESET, self) def set_reset_catch(self, reset_type): - """! @brief Prepare to halt core on reset.""" + """@brief Prepare to halt core on reset.""" LOG.debug("set reset catch, core %d", self.core_number) self._reset_catch_delegate_result = self.call_delegate('set_reset_catch', core=self, reset_type=reset_type) @@ -848,7 +848,7 @@ def set_reset_catch(self, reset_type): self.write_memory(CortexM.DEMCR, self._reset_catch_saved_demcr | CortexM.DEMCR_VC_CORERESET) def clear_reset_catch(self, reset_type): - """! @brief Disable halt on reset.""" + """@brief Disable halt on reset.""" LOG.debug("clear reset catch, core %d", self.core_number) self.call_delegate('clear_reset_catch', core=self, reset_type=reset_type) @@ -858,7 +858,7 @@ def clear_reset_catch(self, reset_type): self.write_memory(CortexM.DEMCR, self._reset_catch_saved_demcr) def reset_and_halt(self, reset_type=None): - """! @brief Perform a reset and stop the core on the reset handler.""" + """@brief Perform a reset and stop the core on the reset handler.""" reset_type = self._get_actual_reset_type(reset_type) # Set up reset catch. @@ -907,7 +907,7 @@ def get_state(self): return Target.State.RUNNING def get_security_state(self): - """! @brief Returns the current security state of the processor. + """@brief Returns the current security state of the processor. @return @ref pyocd.core.target.Target.SecurityState "Target.SecurityState" enumerator. For v6-M and v7-M cores, SecurityState.NONSECURE is always returned. @@ -925,7 +925,7 @@ def is_halted(self): return self.get_state() == Target.State.HALTED def resume(self): - """! @brief Resume execution of the core. + """@brief Resume execution of the core. """ if self.get_state() != Target.State.HALTED: LOG.debug('cannot resume: target not halted') @@ -942,7 +942,7 @@ def find_breakpoint(self, addr): return self.bp_manager.find_breakpoint(addr) def check_reg_list(self, reg_list): - """! @brief Sanity check register values and raise helpful errors.""" + """@brief Sanity check register values and raise helpful errors.""" for reg in reg_list: if reg not in self.core_registers.by_index: # Invalid register, try to give useful error. An invalid name will already @@ -954,7 +954,7 @@ def check_reg_list(self, reg_list): raise KeyError("register %s not available in this CPU", info.name) def read_core_register(self, reg): - """! @brief Read one core register. + """@brief Read one core register. The core must be halted or reads will fail. @@ -972,7 +972,7 @@ def read_core_register(self, reg): return reg_info.from_raw(regValue) def read_core_register_raw(self, reg): - """! @brief Read a core register without type conversion. + """@brief Read a core register without type conversion. The core must be halted or reads will fail. @@ -989,7 +989,7 @@ def read_core_register_raw(self, reg): return vals[0] def read_core_registers_raw(self, reg_list): - """! @brief Read one or more core registers. + """@brief Read one or more core registers. The core must be halted or reads will fail. @@ -1009,7 +1009,7 @@ def read_core_registers_raw(self, reg_list): return self._base_read_core_registers_raw(reg_list) def _base_read_core_registers_raw(self, reg_list): - """! @brief Private core register read routine. + """@brief Private core register read routine. Items in the _reg_list_ must be pre-converted to index and only include valid registers for the core. @@ -1105,7 +1105,7 @@ def _base_read_core_registers_raw(self, reg_list): return reg_vals def write_core_register(self, reg, data): - """! @brief Write a CPU register. + """@brief Write a CPU register. The core must be halted or the write will fail. @@ -1121,7 +1121,7 @@ def write_core_register(self, reg, data): self.write_core_register_raw(reg_info.index, reg_info.to_raw(data)) def write_core_register_raw(self, reg, data): - """! @brief Write a CPU register without type conversion. + """@brief Write a CPU register without type conversion. The core must be halted or the write will fail. @@ -1136,7 +1136,7 @@ def write_core_register_raw(self, reg, data): self.write_core_registers_raw([reg], [data]) def write_core_registers_raw(self, reg_list, data_list): - """! @brief Write one or more core registers. + """@brief Write one or more core registers. The core must be halted or writes will fail. @@ -1158,7 +1158,7 @@ def write_core_registers_raw(self, reg_list, data_list): self._base_write_core_registers_raw(reg_list, data_list) def _base_write_core_registers_raw(self, reg_list, data_list): - """! @brief Private core register write routine. + """@brief Private core register write routine. Items in the _reg_list_ must be pre-converted to index and only include valid registers for the core. Similarly, data_list items must be pre-converted to integer values. @@ -1240,7 +1240,7 @@ def _base_write_core_registers_raw(self, reg_list, data_list): ", ".join(CortexMCoreRegisterInfo.get(r).name for r in fail_list))) def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): - """! @brief Set a hardware or software breakpoint at a specific location in memory. + """@brief Set a hardware or software breakpoint at a specific location in memory. @retval True Breakpoint was set. @retval False Breakpoint could not be set. @@ -1248,7 +1248,7 @@ def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): return self.bp_manager.set_breakpoint(addr, type) def remove_breakpoint(self, addr): - """! @brief Remove a breakpoint at a specific location.""" + """@brief Remove a breakpoint at a specific location.""" self.bp_manager.remove_breakpoint(addr) def get_breakpoint_type(self, addr): @@ -1263,13 +1263,13 @@ def find_watchpoint(self, addr, size, type): return self.dwt.find_watchpoint(addr, size, type) def set_watchpoint(self, addr, size, type): - """! @brief Set a hardware watchpoint. + """@brief Set a hardware watchpoint. """ if self.dwt is not None: return self.dwt.set_watchpoint(addr, size, type) def remove_watchpoint(self, addr, size, type): - """! @brief Remove a hardware watchpoint. + """@brief Remove a hardware watchpoint. """ if self.dwt is not None: return self.dwt.remove_watchpoint(addr, size, type) @@ -1339,7 +1339,7 @@ def is_vector_catch(self): return self.get_halt_reason() == Target.HaltReason.VECTOR_CATCH def get_halt_reason(self): - """! @brief Returns the reason the core has halted. + """@brief Returns the reason the core has halted. @return @ref pyocd.core.target.Target.HaltReason "Target.HaltReason" enumerator or None. """ diff --git a/pyocd/coresight/cortex_m_core_registers.py b/pyocd/coresight/cortex_m_core_registers.py index 61663a728..6a889896d 100644 --- a/pyocd/coresight/cortex_m_core_registers.py +++ b/pyocd/coresight/cortex_m_core_registers.py @@ -31,7 +31,7 @@ IPSR_MASK = 0x000001FF class CortexMCoreRegisterInfo(CoreRegisterInfo): - """! @brief Core register subclass for Cortex-M registers. + """@brief Core register subclass for Cortex-M registers. For most registers, the index is the value written to the DCRSR register to read or write the core register. Other core registers not directly supported by DCRSR have special index values that @@ -48,7 +48,7 @@ class CortexMCoreRegisterInfo(CoreRegisterInfo): @classmethod def register_name_to_index(cls, reg: "CoreRegisterNameOrNumberType") -> int: - """! @brief Convert a register name to integer register index. + """@brief Convert a register name to integer register index. @param reg Either a register name or internal register number. @return Internal register number. @exception KeyError @@ -62,7 +62,7 @@ def register_name_to_index(cls, reg: "CoreRegisterNameOrNumberType") -> int: @classmethod def get(cls, reg: "CoreRegisterNameOrNumberType") -> "CortexMCoreRegisterInfo": - """! @brief Return the CoreRegisterInfo instance for a register. + """@brief Return the CoreRegisterInfo instance for a register. @param reg Either a register name or internal register number. @return CoreRegisterInfo @exception KeyError @@ -71,22 +71,22 @@ def get(cls, reg: "CoreRegisterNameOrNumberType") -> "CortexMCoreRegisterInfo": @property def is_fpu_register(self) -> bool: - """! @brief Returns true for FPSCR, SP, or DP registers.""" + """@brief Returns true for FPSCR, SP, or DP registers.""" return self.index == 33 or self.is_float_register @property def is_cfbp_subregister(self) -> bool: - """! @brief Whether the register is one of those combined into CFBP by the DCSR.""" + """@brief Whether the register is one of those combined into CFBP by the DCSR.""" return -4 <= self.index <= -1 @property def is_psr_subregister(self) -> bool: - """! @brief Whether the register is a combination of xPSR fields.""" + """@brief Whether the register is a combination of xPSR fields.""" return 0x100 <= self.index <= 0x107 @property def psr_mask(self) -> int: - """! @brief Generate a PSR mask based on bottom 3 bits of a MRS SYSm value""" + """@brief Generate a PSR mask based on bottom 3 bits of a MRS SYSm value""" mask = 0 if (self.index & 1) != 0: mask |= IPSR_MASK @@ -97,7 +97,7 @@ def psr_mask(self) -> int: return mask class CoreRegisterGroups: - """! @brief Namespace for lists of Cortex-M core register information.""" + """@brief Namespace for lists of Cortex-M core register information.""" _I = CortexMCoreRegisterInfo # Reduce table width. @@ -267,5 +267,5 @@ class CoreRegisterGroups: + CoreRegisterGroups.VFP_V5) def index_for_reg(name: str) -> int: - """! @brief Utility to easily convert register name to index.""" + """@brief Utility to easily convert register name to index.""" return CortexMCoreRegisterInfo.get(name).index diff --git a/pyocd/coresight/cortex_m_v8m.py b/pyocd/coresight/cortex_m_v8m.py index 0f47c7bf7..6ff3796d6 100644 --- a/pyocd/coresight/cortex_m_v8m.py +++ b/pyocd/coresight/cortex_m_v8m.py @@ -25,7 +25,7 @@ LOG = logging.getLogger(__name__) class CortexM_v8M(CortexM): - """! @brief Component class for a v8.x-M architecture Cortex-M core.""" + """@brief Component class for a v8.x-M architecture Cortex-M core.""" ARMv8M_BASE = 0xC ARMv8M_MAIN = 0xF @@ -62,7 +62,7 @@ def __init__(self, rootTarget, ap, memory_map=None, core_num=0, cmpid=None, addr @property def supported_security_states(self): - """! @brief Tuple of security states supported by the processor. + """@brief Tuple of security states supported by the processor. @return Tuple of @ref pyocd.core.target.Target.SecurityState "Target.SecurityState". The result depends on whether the Security extension is enabled. @@ -73,7 +73,7 @@ def supported_security_states(self): return (Target.SecurityState.NONSECURE,) def _read_core_type(self): - """! @brief Read the CPUID register and determine core type and architecture.""" + """@brief Read the CPUID register and determine core type and architecture.""" # Read CPUID register cpuid = self.read32(CortexM.CPUID) @@ -109,7 +109,7 @@ def _read_core_type(self): LOG.warning("CPU core #%d type is unrecognized", self.core_number) def _check_for_fpu(self): - """! @brief Determine if a core has an FPU. + """@brief Determine if a core has an FPU. In addition to the tests performed by CortexM, this method tests for the MVE extension. """ @@ -143,7 +143,7 @@ def _build_registers(self): self._core_registers.add_group(CoreRegisterGroups.V81M_MVE_ONLY) def get_security_state(self): - """! @brief Returns the current security state of the processor. + """@brief Returns the current security state of the processor. @return @ref pyocd.core.target.Target.SecurityState "Target.SecurityState" enumerator. """ @@ -163,7 +163,7 @@ def clear_debug_cause_bits(self): | CortexM.DFSR_HALTED) def get_halt_reason(self): - """! @brief Returns the reason the core has halted. + """@brief Returns the reason the core has halted. This overridden version of this method adds support for v8.x-M halt reasons. diff --git a/pyocd/coresight/dap.py b/pyocd/coresight/dap.py index 04caed38b..f20877135 100644 --- a/pyocd/coresight/dap.py +++ b/pyocd/coresight/dap.py @@ -113,12 +113,12 @@ class DPIDR(NamedTuple): mindp: int class ADIVersion(Enum): - """! @brief Supported versions of the Arm Debug Interface.""" + """@brief Supported versions of the Arm Debug Interface.""" ADIv5 = 5 ADIv6 = 6 class DPConnector: - """! @brief Establishes a connection to the DP for a given wire protocol. + """@brief Establishes a connection to the DP for a given wire protocol. This class will ask the probe to connect using a given wire protocol. Then it makes multiple attempts at sending the SWJ sequence to select the wire protocol and read the DP IDR register. @@ -135,7 +135,7 @@ def __init__(self, probe: DebugProbe) -> None: @property def idr(self) -> DPIDR: - """! @brief DPIDR instance containing values read from the DP IDR register.""" + """@brief DPIDR instance containing values read from the DP IDR register.""" return self._idr def _get_protocol(self, protocol: Optional[DebugProbe.Protocol]) -> DebugProbe.Protocol: @@ -148,7 +148,7 @@ def _get_protocol(self, protocol: Optional[DebugProbe.Protocol]) -> DebugProbe.P return protocol def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None: - """! @brief Establish a connection to the DP. + """@brief Establish a connection to the DP. This method causes the debug probe to connect using the wire protocol. @@ -244,7 +244,7 @@ def _connect_dp(self, protocol: DebugProbe.Protocol) -> None: raise def read_idr(self): - """! @brief Read IDR register and get DP version""" + """@brief Read IDR register and get DP version""" dpidr = self._probe.read_dp(DP_IDR, now=True) dp_partno = (dpidr & DPIDR_PARTNO_MASK) >> DPIDR_PARTNO_SHIFT dp_version = (dpidr & DPIDR_VERSION_MASK) >> DPIDR_VERSION_SHIFT @@ -253,7 +253,7 @@ def read_idr(self): return DPIDR(dpidr, dp_partno, dp_version, dp_revision, is_mindp) class DebugPort: - """! @brief Represents the Arm Debug Interface (ADI) Debug Port (DP).""" + """@brief Represents the Arm Debug Interface (ADI) Debug Port (DP).""" ## Sleep for 50 ms between connection tests and reconnect attempts after a reset. _RESET_RECOVERY_SLEEP_INTERVAL = 0.05 @@ -262,7 +262,7 @@ class DebugPort: _RESET_RECOVERY_ATTEMPTS_BEFORE_RECONNECT = 1 def __init__(self, probe: DebugProbe, target: Target) -> None: - """! @brief Constructor. + """@brief Constructor. @param self The DebugPort object. @param probe The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" object. The probe is assumed to not have been opened yet. @@ -312,12 +312,12 @@ def adi_version(self) -> ADIVersion: @property def base_address(self) -> int: - """! @brief Base address of the first component for an ADIv6 system.""" + """@brief Base address of the first component for an ADIv6 system.""" return self._base_addr @property def apacc_memory_interface(self) -> "APAccessMemoryInterface": - """! @brief Memory interface for performing APACC transactions.""" + """@brief Memory interface for performing APACC transactions.""" if self._apacc_mem_interface is None: self._apacc_mem_interface = APAccessMemoryInterface(self) return self._apacc_mem_interface @@ -328,15 +328,15 @@ def next_access_number(self) -> int: return self._access_number def lock(self) -> None: - """! @brief Lock the DP from access by other threads.""" + """@brief Lock the DP from access by other threads.""" self.probe.lock() def unlock(self) -> None: - """! @brief Unlock the DP.""" + """@brief Unlock the DP.""" self.probe.unlock() def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None: - """! @brief Connect to the target. + """@brief Connect to the target. This method causes the debug probe to connect using the selected wire protocol. The probe must have already been opened prior to this call. @@ -352,14 +352,14 @@ def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None: self.create_connect_sequence().invoke() def disconnect(self) -> None: - """! @brief Disconnect from target. + """@brief Disconnect from target. DP debug is powered down. See power_down_debug(). """ self.power_down_debug() def create_connect_sequence(self) -> CallSequence: - """! @brief Returns call sequence to connect to the target. + """@brief Returns call sequence to connect to the target. Returns a @ref pyocd.utility.sequence.CallSequence CallSequence that will connect to the DP, power up debug and the system, check the DP version to identify whether the target uses @@ -392,7 +392,7 @@ def create_connect_sequence(self) -> CallSequence: return CallSequence(*seq) def _get_probe_capabilities(self) -> None: - """! @brief Examine the probe's capabilities.""" + """@brief Examine the probe's capabilities.""" caps = self._probe.capabilities self._probe_managed_ap_select = (DebugProbe.Capability.MANAGED_AP_SELECTION in caps) self._probe_managed_dpbanksel = (DebugProbe.Capability.MANAGED_DPBANKSEL in caps) @@ -474,7 +474,7 @@ def write_reg(self, addr: int, data: int) -> None: self.write_dp(addr, data) def power_up_debug(self) -> bool: - """! @brief Assert DP power requests. + """@brief Assert DP power requests. Request both debug and system power be enabled, and wait until the request is acked. There is a timeout for the request. @@ -495,7 +495,7 @@ def power_up_debug(self) -> bool: return True def power_down_debug(self) -> bool: - """! @brief Deassert DP power requests. + """@brief Deassert DP power requests. ADIv6 says that we must not clear CSYSPWRUPREQ and CDBGPWRUPREQ at the same time. ADIv5 says CSYSPWRUPREQ must not be set to 1 while CDBGPWRUPREQ is set to 0. So we @@ -529,11 +529,11 @@ def power_down_debug(self) -> bool: return True def _invalidate_cache(self) -> None: - """! @brief Invalidate cached DP registers.""" + """@brief Invalidate cached DP registers.""" self._cached_dp_select = None def _reset_did_occur(self, notification: "Notification") -> None: - """! @brief Handles reset notifications to invalidate register cache. + """@brief Handles reset notifications to invalidate register cache. The cache is cleared on all resets just to be safe. On most devices, warm resets do not reset debug logic, but it does happen on some devices. @@ -541,7 +541,7 @@ def _reset_did_occur(self, notification: "Notification") -> None: self._invalidate_cache() def post_reset_recovery(self) -> None: - """! @brief Wait for the target to recover from reset, with auto-reconnect if needed.""" + """@brief Wait for the target to recover from reset, with auto-reconnect if needed.""" # Check if we can access DP registers. If this times out, then reconnect the DP and retry. with Timeout(self.session.options.get('reset.dap_recover.timeout'), self._RESET_RECOVERY_SLEEP_INTERVAL) as time_out: @@ -575,7 +575,7 @@ def post_reset_recovery(self) -> None: LOG.error("DAP is not accessible after reset followed by attempted reconnect") def reset(self, *, send_notifications: bool = True) -> None: - """! @brief Hardware reset. + """@brief Hardware reset. Pre- and post-reset notifications are sent. @@ -598,7 +598,7 @@ def reset(self, *, send_notifications: bool = True) -> None: self.session.notify(Target.Event.POST_RESET, self) def assert_reset(self, asserted: bool, *, send_notifications: bool = True) -> None: - """! @brief Assert or deassert the hardware reset signal. + """@brief Assert or deassert the hardware reset signal. A pre-reset notification is sent before asserting reset, whereas a post-reset notification is sent after deasserting reset. @@ -622,7 +622,7 @@ def assert_reset(self, asserted: bool, *, send_notifications: bool = True) -> No self.session.notify(Target.Event.POST_RESET, self) def is_reset_asserted(self) -> bool: - """! @brief Returns the current state of the nRESET signal. + """@brief Returns the current state of the nRESET signal. This method can be called before the DebugPort is initalized. @@ -632,14 +632,14 @@ def is_reset_asserted(self) -> bool: return self.probe.is_reset_asserted() def set_clock(self, frequency: float) -> None: - """! @brief Change the wire protocol's clock frequency. + """@brief Change the wire protocol's clock frequency. @param self This object. @param frequency New wire protocol frequency in Hertz. """ self.probe.set_clock(frequency) def _write_dp_select(self, mask: int, value: int) -> None: - """! @brief Modify part of the DP SELECT register and write if cache is stale. + """@brief Modify part of the DP SELECT register and write if cache is stale. The DP lock must already be acquired before calling this method. """ @@ -656,7 +656,7 @@ def _write_dp_select(self, mask: int, value: int) -> None: self._cached_dp_select = select def _set_dpbanksel(self, addr: int, is_write: bool) -> bool: - """! @brief Updates the DPBANKSEL field of the SELECT register as required. + """@brief Updates the DPBANKSEL field of the SELECT register as required. Several DP registers (most, actually) ignore DPBANKSEL. If one of those is being accessed, any value of DPBANKSEL can be used. Otherwise SELECT is updated if necessary @@ -774,7 +774,7 @@ def write_dp(self, addr: int, data: int) -> None: self.unlock() def _select_ap(self, addr: int) -> bool: - """! @brief Write DP_SELECT to choose the given AP. + """@brief Write DP_SELECT to choose the given AP. Handles the case where the debug probe manages selecting an AP itself, in which case we never write SELECT directly. @@ -958,7 +958,7 @@ def clear_sticky_err(self) -> None: assert False class APAccessMemoryInterface(memory_interface.MemoryInterface): - """! @brief Memory interface for performing simple APACC transactions. + """@brief Memory interface for performing simple APACC transactions. This class allows the caller to generate Debug APB transactions from a DPv3. It simply adapts the MemoryInterface to APACC transactions. @@ -971,7 +971,7 @@ class APAccessMemoryInterface(memory_interface.MemoryInterface): """ def __init__(self, dp: DebugPort, ap_address: Optional["APAddressBase"] = None) -> None: - """! @brief Constructor. + """@brief Constructor. @param self @param dp The DebugPort object. @@ -997,7 +997,7 @@ def short_description(self) -> str: return "Root Component ({})".format(self._ap_address) def write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None: - """! @brief Write a single memory location. + """@brief Write a single memory location. By default the transfer size is a word.""" if transfer_size != 32: @@ -1022,7 +1022,7 @@ def read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Ca ... def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Read a memory location. + """@brief Read a memory location. By default, a word will be read.""" if transfer_size != 32: @@ -1031,14 +1031,14 @@ def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> U return self._dp.read_ap(self._offset + addr, now) def write_memory_block32(self, addr: int, data: Sequence[int]) -> None: - """! @brief Write an aligned block of 32-bit words.""" + """@brief Write an aligned block of 32-bit words.""" addr += self._offset for word in data: self._dp.write_ap(addr, word) addr += 4 def read_memory_block32(self, addr: int, size: int) -> Sequence[int]: - """! @brief Read an aligned block of 32-bit words.""" + """@brief Read an aligned block of 32-bit words.""" addr += self._offset result_cbs = [self._dp.read_ap(addr + i * 4, now=False) for i in range(size)] result = [cb() for cb in result_cbs] diff --git a/pyocd/coresight/discovery.py b/pyocd/coresight/discovery.py index 2b4ab2293..dec2a4f6f 100644 --- a/pyocd/coresight/discovery.py +++ b/pyocd/coresight/discovery.py @@ -26,10 +26,10 @@ LOG = logging.getLogger(__name__) class CoreSightDiscovery(object): - """! @brief Base class for discovering CoreSight components in a target.""" + """@brief Base class for discovering CoreSight components in a target.""" def __init__(self, target): - """! @brief Constructor.""" + """@brief Constructor.""" self._target = target @property @@ -45,7 +45,7 @@ def session(self): return self.target.session def discover(self): - """! @brief Init task for component discovery. + """@brief Init task for component discovery. @return CallSequence for the discovery process. """ raise NotImplementedError() @@ -74,7 +74,7 @@ def _apply_to_all_components(self, action, filter=None): ap.rom_table.for_each(action, filter) class ADIv5Discovery(CoreSightDiscovery): - """! @brief Component discovery process for ADIv5. + """@brief Component discovery process for ADIv5. Component discovery for ADIv5 proceeds as follows. Each of the steps is labeled with the name of the init task for that step. @@ -103,7 +103,7 @@ def discover(self): ) def _find_aps(self): - """! @brief Find valid APs using the ADIv5 method. + """@brief Find valid APs using the ADIv5 method. Scans for valid APs starting at APSEL=0. The default behaviour is to stop after reading 0 for the AP's IDR twice in succession. If the `scan_all_aps` session option is set to True, @@ -143,7 +143,7 @@ def _find_aps(self): self.dp.valid_aps = ap_list def _create_aps(self): - """! @brief Init task that returns a call sequence to create APs. + """@brief Init task that returns a call sequence to create APs. For each AP in the #valid_aps list, an AccessPort object is created. The new objects are added to the #aps dict, keyed by their AP number. @@ -156,7 +156,7 @@ def _create_aps(self): return seq def _create_1_ap(self, apsel): - """! @brief Init task to create a single AP object.""" + """@brief Init task to create a single AP object.""" try: ap_address = APv1Address(apsel) ap = AccessPort.create(self.dp, ap_address) @@ -166,7 +166,7 @@ def _create_1_ap(self, apsel): exc_info=self.session.log_tracebacks) def _find_components(self): - """! @brief Init task that generates a call sequence to ask each AP to find its components.""" + """@brief Init task that generates a call sequence to ask each AP to find its components.""" seq = CallSequence() for ap in [x for x in self.dp.aps.values() if x.has_rom_table]: seq.append( @@ -175,7 +175,7 @@ def _find_components(self): return seq class ADIv6Discovery(CoreSightDiscovery): - """! @brief Component discovery process for ADIv6. + """@brief Component discovery process for ADIv6. The process for discovering components in ADIv6 proceeds as follows. Each of the steps is labeled with the name of the init task for that step. @@ -193,7 +193,7 @@ class ADIv6Discovery(CoreSightDiscovery): """ def __init__(self, target): - """! @brief Constructor.""" + """@brief Constructor.""" super(ADIv6Discovery, self).__init__(target) self._top_rom_table = None @@ -206,7 +206,7 @@ def discover(self): ) def _find_root_components(self): - """! @brief Read top-level ROM table pointed to by the DP.""" + """@brief Read top-level ROM table pointed to by the DP.""" # There's not much we can do if we don't have a base address. if self.dp.base_address is None: return @@ -236,7 +236,7 @@ def _find_root_components(self): self._create_root_component(cmpid) def _create_1_ap(self, cmpid): - """! @brief Init task to create a single AP object.""" + """@brief Init task to create a single AP object.""" try: ap_address = APv2Address(cmpid.address) ap = AccessPort.create(self.dp, ap_address, cmpid=cmpid) @@ -246,7 +246,7 @@ def _create_1_ap(self, cmpid): exc_info=self.session.log_tracebacks) def _create_root_component(self, cmpid): - """! @brief Init task to create a component attached directly to the DP. + """@brief Init task to create a component attached directly to the DP. The newly created component is attached directly to the target instance (i.e., CoreSightTarget or subclass) in the object graph. @@ -265,7 +265,7 @@ def _create_root_component(self, cmpid): exc_info=self.session.log_tracebacks) def _find_components_on_aps(self): - """! @brief Init task that generates a call sequence to ask each AP to find its components.""" + """@brief Init task that generates a call sequence to ask each AP to find its components.""" seq = CallSequence() for ap in [x for x in self.dp.aps.values() if x.has_rom_table]: seq.append( diff --git a/pyocd/coresight/dwt.py b/pyocd/coresight/dwt.py index 82f0a8ab1..f8824123a 100644 --- a/pyocd/coresight/dwt.py +++ b/pyocd/coresight/dwt.py @@ -35,7 +35,7 @@ def __init__(self, comp_register_addr, provider): self.func = 0 class DWT(CoreSightComponent): - """! @brief Data Watchpoint and Trace version 1.0""" + """@brief Data Watchpoint and Trace version 1.0""" # DWT registers # @@ -96,7 +96,7 @@ def watchpoint_count(self): return len(self.watchpoints) def init(self): - """! @brief Inits the DWT. + """@brief Inits the DWT. Reads the number of hardware watchpoints available on the core and makes sure that they are all disabled and ready for future use. @@ -126,7 +126,7 @@ def find_watchpoint(self, addr, size, type): return None def set_watchpoint(self, addr, size, type): - """! @brief Set a hardware watchpoint.""" + """@brief Set a hardware watchpoint.""" if self.dwt_configured is False: self.init() @@ -163,7 +163,7 @@ def set_watchpoint(self, addr, size, type): return False def remove_watchpoint(self, addr, size, type): - """! @brief Remove a hardware watchpoint.""" + """@brief Remove a hardware watchpoint.""" watch = self.find_watchpoint(addr, size, type) if watch is None: return @@ -189,7 +189,7 @@ def cycle_count(self, value): self.ap.write32(self.address + self.DWT_CYCCNT, value) class DWTv2(DWT): - """! @brief Data Watchpoint and Trace version 2.x + """@brief Data Watchpoint and Trace version 2.x This version is present in v8-M platforms. @@ -217,7 +217,7 @@ class DWTv2(DWT): } def set_watchpoint(self, addr, size, type): - """! @brief Set a hardware watchpoint.""" + """@brief Set a hardware watchpoint.""" if self.dwt_configured is False: self.init() diff --git a/pyocd/coresight/fpb.py b/pyocd/coresight/fpb.py index 091f370c6..d67b81c6e 100644 --- a/pyocd/coresight/fpb.py +++ b/pyocd/coresight/fpb.py @@ -35,7 +35,7 @@ def __init__(self, comp_register_addr: int, provider: BreakpointProvider) -> Non self.type = Target.BreakpointType.HW class FPB(BreakpointProvider, CoreSightComponent): - """! @brief Flash Patch and Breakpoint unit""" + """@brief Flash Patch and Breakpoint unit""" # FPB registers # @@ -65,7 +65,7 @@ def revision(self) -> int: return self.fpb_rev def init(self) -> None: - """! @brief Inits the FPB. + """@brief Inits the FPB. Reads the number of hardware breakpoints available on the core and disable the FPB (Flash Patch and Breakpoint Unit), which will be enabled when the first breakpoint is set. @@ -105,7 +105,7 @@ def available_breakpoints(self) -> int: return len(self.hw_breakpoints) - self.num_hw_breakpoint_used def can_support_address(self, addr: int) -> bool: - """! @brief Test whether an address is supported by the FPB. + """@brief Test whether an address is supported by the FPB. For FPBv1, hardware breakpoints are only supported in the range 0x00000000 - 0x1fffffff. This was fixed for FPBv2, which supports hardware breakpoints at any address. @@ -119,7 +119,7 @@ def find_breakpoint(self, addr: int) -> Optional[Breakpoint]: return None def set_breakpoint(self, addr: int) -> Optional[Breakpoint]: - """! @brief Set a hardware breakpoint at a specific location in flash.""" + """@brief Set a hardware breakpoint at a specific location in flash.""" if not self.enabled: self.enable() @@ -150,7 +150,7 @@ def set_breakpoint(self, addr: int) -> Optional[Breakpoint]: return None def remove_breakpoint(self, bp: Breakpoint) -> None: - """! @brief Remove a hardware breakpoint at a specific location in flash.""" + """@brief Remove a hardware breakpoint at a specific location in flash.""" for hwbp in self.hw_breakpoints: if hwbp.enabled and hwbp.addr == bp.addr: hwbp.enabled = False diff --git a/pyocd/coresight/generic_mem_ap.py b/pyocd/coresight/generic_mem_ap.py index 3bbbd10f1..c0a4d3c2e 100644 --- a/pyocd/coresight/generic_mem_ap.py +++ b/pyocd/coresight/generic_mem_ap.py @@ -27,7 +27,7 @@ class GenericMemAPTarget(Target, CoreSightComponent): - """! @brief This target represents ARM debug Access Port without a CPU + """@brief This target represents ARM debug Access Port without a CPU It may be used to access the address space of the target via Access Ports without real ARM CPU core behind it. For instance Cypress PSoC64 devices have diff --git a/pyocd/coresight/gpr.py b/pyocd/coresight/gpr.py index 2713a5d83..12f28d1d9 100644 --- a/pyocd/coresight/gpr.py +++ b/pyocd/coresight/gpr.py @@ -20,7 +20,7 @@ ACK_TIMEOUT = 5.0 class GPR(CoreSightComponent): - """! @brief Granular Power Requestor. + """@brief Granular Power Requestor. Currently only supports enabling power domains. """ @@ -47,11 +47,11 @@ def __init__(self, ap, cmpid=None, addr=None): self.domain_count = 0 def init(self): - """! @brief Inits the GPR.""" + """@brief Inits the GPR.""" self.domain_count = self.cmpid.devid[2] & self.CPWRUPM_COUNT_MASK def _power_up(self, mask): - """! @brief Enable power to a power domaind by mask. + """@brief Enable power to a power domaind by mask. @param self @param mask Bitmask of the domains to power up. @retval True Requested domains were successfully powered on. @@ -69,7 +69,7 @@ def _power_up(self, mask): return False def power_up_all(self): - """! @brief Enable power to all available power domains. + """@brief Enable power to all available power domains. @param self @retval True All domains were successfully powered on. @return False Timeout waiting for power ack bit(s) to set. @@ -78,7 +78,7 @@ def power_up_all(self): return self._power_up(mask) def power_up_one(self, domain_id): - """! @brief Power up a single power domain by domain ID. + """@brief Power up a single power domain by domain ID. @param self @param domain_id Integer power domain ID. @retval True Requested domain was powered on successfully. diff --git a/pyocd/coresight/itm.py b/pyocd/coresight/itm.py index 581b13c39..6ae4a20ca 100644 --- a/pyocd/coresight/itm.py +++ b/pyocd/coresight/itm.py @@ -28,7 +28,7 @@ def __init__(self): pass class ITM(CoreSightComponent): - """! @brief Instrumentation Trace Macrocell""" + """@brief Instrumentation Trace Macrocell""" # Register definitions. # diff --git a/pyocd/coresight/rom_table.py b/pyocd/coresight/rom_table.py index 9bdf9a713..f34128ad6 100644 --- a/pyocd/coresight/rom_table.py +++ b/pyocd/coresight/rom_table.py @@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__) class CoreSightComponentID(object): - """! @brief Reads and parses CoreSight architectural component ID registers. + """@brief Reads and parses CoreSight architectural component ID registers. Reads the CIDR, PIDR, DEVID, and DEVARCH registers present at well known offsets in the memory map of all CoreSight components. The various fields from these @@ -109,7 +109,7 @@ def __init__(self, parent_rom_table, ap, top_addr, power_id=None): self.valid = False def read_id_registers(self): - """! @brief Read Component ID, Peripheral ID, and DEVID/DEVARCH registers.""" + """@brief Read Component ID, Peripheral ID, and DEVID/DEVARCH registers.""" # Read registers as a single block read for performance reasons. regs = self.ap.read_memory_block32(self.top_address + self.IDR_READ_START, self.IDR_READ_COUNT) self.cidr = self._extract_id_register_value(regs, self.CIDR0_OFFSET) @@ -193,7 +193,7 @@ def __repr__(self): class ROMTable(CoreSightComponent): - """! @brief CoreSight ROM table base class. + """@brief CoreSight ROM table base class. This abstract class provides common functionality for ROM tables. Most importantly it has the static create() factory method. @@ -214,7 +214,7 @@ class ROMTable(CoreSightComponent): @staticmethod def create(memif, cmpid, addr=None, parent_table=None): - """! @brief Factory method for creating ROM table components. + """@brief Factory method for creating ROM table components. This static method instantiates the appropriate subclass for the ROM table component described by the cmpid parameter. @@ -235,7 +235,7 @@ def create(memif, cmpid, addr=None, parent_table=None): raise exceptions.DebugError("unexpected ROM table device class (%s)" % cmpid) def __init__(self, ap, cmpid=None, addr=None, parent_table=None): - """! @brief Constructor.""" + """@brief Constructor.""" assert cmpid is not None assert cmpid.is_rom_table super(ROMTable, self).__init__(ap, cmpid, addr) @@ -249,12 +249,12 @@ def __init__(self, ap, cmpid=None, addr=None, parent_table=None): @property def depth(self): - """! @brief Number of parent ROM tables.""" + """@brief Number of parent ROM tables.""" return self._depth @property def components(self): - """! @brief List of CoreSightComponentID instances for components found in this table. + """@brief List of CoreSightComponentID instances for components found in this table. This property contains only the components for this ROM table, not any child tables. @@ -265,11 +265,11 @@ def components(self): @property def depth_indent(self): - """! @brief String of whitespace with a width corresponding to the table's depth.'""" + """@brief String of whitespace with a width corresponding to the table's depth.'""" return " " * self._depth def init(self): - """! @brief Read and parse the ROM table. + """@brief Read and parse the ROM table. As table entries for CoreSight components are read, a CoreSightComponentID instance will be created and the ID registers read. These ID objects are added to the _components_ property. @@ -286,7 +286,7 @@ def _read_table(self): raise NotImplementedError() def for_each(self, action, filter=None): - """! @brief Apply an action to every component defined in the ROM table and child tables. + """@brief Apply an action to every component defined in the ROM table and child tables. This method iterates over every entry in the ROM table. For each entry it calls the filter function if provided. If the filter passes (returns True or was not provided) then @@ -313,7 +313,7 @@ def for_each(self, action, filter=None): action(component) class Class1ROMTable(ROMTable): - """! @brief CoreSight Class 0x1 ROM table component and parser. + """@brief CoreSight Class 0x1 ROM table component and parser. An object of this class represents a CoreSight Class 0x1 ROM table. It supports reading the table and any child tables. For each entry in the table, a CoreSightComponentID object is created @@ -433,7 +433,7 @@ def _handle_table_entry(self, entry, number): self.components.append(cmp) class Class9ROMTable(ROMTable): - """! @brief CoreSight Class 0x9 ROM table component and parser. + """@brief CoreSight Class 0x9 ROM table component and parser. Handles parsing of class 0x9 ROM tables as defined in ADIv6. @@ -491,7 +491,7 @@ class Class9ROMTable(ROMTable): POWER_REQUEST_TIMEOUT = 5.0 def __init__(self, ap, cmpid=None, addr=None, parent_table=None): - """! @brief Component constructor.""" + """@brief Component constructor.""" super(Class9ROMTable, self).__init__(ap, cmpid, addr, parent_table) self._pridr_version = None @@ -506,21 +506,21 @@ def __init__(self, ap, cmpid=None, addr=None, parent_table=None): @property def has_com_port(self): - """! @brief Whether the ROM table includes COM Port functionality.""" + """@brief Whether the ROM table includes COM Port functionality.""" return self._has_com_port @property def has_prr(self): - """! @brief Whether the ROM table includes power and reset requesting functionality.""" + """@brief Whether the ROM table includes power and reset requesting functionality.""" return self._has_prr @property def is_sysmem(self): - """! @brief Whether the ROM table is present in system memory.""" + """@brief Whether the ROM table is present in system memory.""" return self._is_sysmem def _read_table(self): - """! @brief Reads and parses the ROM table.""" + """@brief Reads and parses the ROM table.""" # Compute multipliers for 32- or 64-bit. entrySizeMultiplier = self._width // 32 actualMaxEntries = self.ROM_TABLE_MAX_ENTRIES // entrySizeMultiplier @@ -563,7 +563,7 @@ def _read_table(self): entryNumber += 1 def _power_component(self, number, powerid, entry): - """! @brief Enable power to a component defined by a ROM table entry.""" + """@brief Enable power to a component defined by a ROM table entry.""" if not self._has_prr: # Attempt GPR method of power domain enabling. return super(Class9ROMTable, self)._power_component(number, powerid, entry) @@ -583,7 +583,7 @@ def _power_component(self, number, powerid, entry): return True def _handle_table_entry(self, entry, number): - """! @brief Parse one ROM table entry.""" + """@brief Parse one ROM table entry.""" # Get the component's top 4k address. offset = entry & self.ROM_TABLE_ADDR_OFFSET_MASK[self._width] if (entry & self.ROM_TABLE_ADDR_OFFSET_NEG_MASK[self._width]) != 0: @@ -626,7 +626,7 @@ def _handle_table_entry(self, entry, number): self._components.append(cmp) def check_power_request_version(self): - """! @brief Verify the power request functionality version.""" + """@brief Verify the power request functionality version.""" # Cache the PRIDR0 VERSION field the first time. if self._pridr_version is None: pridr = self.ap.read32(self.address + self.ROM_TABLE_PRIDR0) @@ -635,7 +635,7 @@ def check_power_request_version(self): return self._pridr_version == self.ROM_TABLE_PRIDR0_VERSION def power_debug_domain(self, domain_id, enable=True): - """! @brief Control power for a specified power domain managed by this ROM table.""" + """@brief Control power for a specified power domain managed by this ROM table.""" # Compute register addresses for this power domain. dbgpcr_addr = self.address + self.ROM_TABLE_DBGPCRn + (4 * domain_id) dbgpsr_addr = self.address + self.ROM_TABLE_DBGPSRn + (4 * domain_id) diff --git a/pyocd/coresight/sdc600.py b/pyocd/coresight/sdc600.py index 4522d5743..7127fa4af 100644 --- a/pyocd/coresight/sdc600.py +++ b/pyocd/coresight/sdc600.py @@ -25,43 +25,43 @@ LOG = logging.getLogger(__name__) class ComPortError(exceptions.Error): - """! @brief Base class for SDC-600 exceptions.""" + """@brief Base class for SDC-600 exceptions.""" pass class UnexpectedFlagError(ComPortError): - """! @brief Received an unexpected or out of order flag byte.""" + """@brief Received an unexpected or out of order flag byte.""" pass class LinkError(ComPortError): - """! @brief Received a link error flag (LERR).""" + """@brief Received a link error flag (LERR).""" pass class LinkClosedException(ComPortError): - """! @brief Received an unexpected or out of order flag byte.""" + """@brief Received an unexpected or out of order flag byte.""" def __init__(self, phase): self._phase = phase @property def phase(self): - """! @brief The link phase that was closed from the other side.""" + """@brief The link phase that was closed from the other side.""" return self._phase class SDC600(CoreSightComponent): - """! @brief SDC-600 component. + """@brief SDC-600 component. """ ## Default timeout for an operation or packet transfer. TRANSFER_TIMEOUT = 30.0 class LinkPhase(Enum): - """! @brief COM Port link phases.""" + """@brief COM Port link phases.""" ## Hardware-defined link phase. PHASE1 = 1 ## Software-defined link phase. PHASE2 = 2 class Register: - """! @brief Namespace for SDC-600 register offset constants.""" + """@brief Namespace for SDC-600 register offset constants.""" # Register offsets. VIDR = 0xD00 FIDTXR = 0xD08 @@ -107,7 +107,7 @@ class Register: SR_PEN_SHIFT = (31) class Flag: - """! @brief Namespace with SDC-600 flag byte constants.""" + """@brief Namespace with SDC-600 flag byte constants.""" IDR = 0xA0 IDA = 0xA1 LPH1RA = 0xA6 @@ -152,7 +152,7 @@ def __init__(self, ap, cmpid=None, addr=None): self._current_link_phase = None def init(self): - """! @brief Inits the component. + """@brief Inits the component. Reads the RX and TX widths and whether the SDC-600 is enabled. All error flags are cleared. """ @@ -176,23 +176,23 @@ def init(self): @property def is_enabled(self): - """! @brief Whether the SDC-600 is enabled.""" + """@brief Whether the SDC-600 is enabled.""" return self._is_enabled @property def is_reboot_request_enabled(self): - """! @brief Whether the Reboot Request feature is enabled in the SDC-600.""" + """@brief Whether the Reboot Request feature is enabled in the SDC-600.""" return (self.ap.read32(self.Register.SR) & self.Register.SR_RRDIS_MASK) == 0 @property def current_link_phase(self): - """! @brief Currently established link phase. + """@brief Currently established link phase. @return Either None or one of the SDC600.LinkPhase enums. """ return self._current_link_phase def _read1(self, to_): - """! @brief Read a single byte. + """@brief Read a single byte. If a NULL byte is received, it is ignored and another byte is read. No other flag bytes are processed. @@ -217,7 +217,7 @@ def _read1(self, to_): return value def _write1(self, value, to_): - """! @brief Write one or more bytes. + """@brief Write one or more bytes. @exception TimeoutError """ # Wait until room is available in the transmit FIFO. @@ -232,7 +232,7 @@ def _write1(self, value, to_): self.ap.write32(self.Register.DR, dbr_value) def _check_flags(self, value, to_): - """! @brief Handle link and error related flag bytes. + """@brief Handle link and error related flag bytes. @param self @param value Integer byte value to check. @param to_ Timeout object. @@ -259,7 +259,7 @@ def _check_flags(self, value, to_): raise UnexpectedFlagError("received reserved flag value ({:#04x})".format(value)) def _expect_flag(self, flag, to_): - """! @brief Read a byte and compare to expected value. + """@brief Read a byte and compare to expected value. @param self @param flag Integer flag byte value to match. @param to_ Timeout object. @@ -278,7 +278,7 @@ def _expect_flag(self, flag, to_): LOG.debug("got expected %s", self.Flag.NAME[value]) def _stuff(self, data): - """! @brief Perform COM Encapsulation byte stuffing. + """@brief Perform COM Encapsulation byte stuffing. @param self @param data List of integers of the original data. @return List of integers for the escaped version of _data_. @@ -297,7 +297,7 @@ def _stuff(self, data): return result def _destuff(self, data): - """! @brief Remove COM Encapsulation byte stuffing. + """@brief Remove COM Encapsulation byte stuffing. @param self @param data List of integers. The only acceptable flag byte is ESC. @return List of integers properly de-stuffed. @@ -321,7 +321,7 @@ def _destuff(self, data): return result def _read_packet_data_to_end(self, to_): - """! @brief Read an escaped packet from the first message byte to the end. + """@brief Read an escaped packet from the first message byte to the end. @exception UnexpectedFlagError @exception LinkClosedException @exception TimeoutError @@ -345,7 +345,7 @@ def _read_packet_data_to_end(self, to_): return self._destuff(result) def receive_packet(self, timeout=TRANSFER_TIMEOUT): - """! @brief Read a data packet. + """@brief Read a data packet. Reads a packet (PDU) from the target and removes byte stuffing. The timeout for reading the entire packet can be set via the _timeout_ parameter. @@ -370,7 +370,7 @@ def receive_packet(self, timeout=TRANSFER_TIMEOUT): return self._read_packet_data_to_end(to_) def send_packet(self, data, timeout=TRANSFER_TIMEOUT): - """! @brief Send a data packet. + """@brief Send a data packet. Sends the provided data to the target as a single packet (PDU), escaping bytes as necessary. No data is read while the packet is sent, so if the target closes the connection it will @@ -394,7 +394,7 @@ def send_packet(self, data, timeout=TRANSFER_TIMEOUT): self._write1(self.Flag.END, to_) def open_link(self, phase, timeout=TRANSFER_TIMEOUT): - """! @brief Send the LPH1RA or LPH2RA flag. + """@brief Send the LPH1RA or LPH2RA flag. @exception UnexpectedFlagError @exception LinkClosedException @exception TimeoutError @@ -423,7 +423,7 @@ def open_link(self, phase, timeout=TRANSFER_TIMEOUT): raise ValueError("unrecognized phase value") def close_link(self, phase, timeout=TRANSFER_TIMEOUT): - """! @brief Send the LPH1RL or LPH2RL flag. + """@brief Send the LPH1RL or LPH2RL flag. Link phase 1 can be closed from any state. Link phase 2 can only be closed when the connection is already in that phase. @@ -455,7 +455,7 @@ def _log_status(self): LOG.info("status=0x%08x phase=%s", status, self._current_link_phase) def read_protocol_id(self, timeout=TRANSFER_TIMEOUT): - """! @brief Read and return the 6-byte protocol ID. + """@brief Read and return the 6-byte protocol ID. @exception UnexpectedFlagError @exception LinkClosedException @exception TimeoutError @@ -466,7 +466,7 @@ def read_protocol_id(self, timeout=TRANSFER_TIMEOUT): return self._read_packet_data_to_end(to_) def send_reboot_request(self, timeout=TRANSFER_TIMEOUT): - """! @brief Send remote reboot request.""" + """@brief Send remote reboot request.""" with Timeout(timeout) as to_: self._write1(self.Flag.LPH2RR, to_) diff --git a/pyocd/coresight/tpiu.py b/pyocd/coresight/tpiu.py index fba0db3e7..828d8e158 100644 --- a/pyocd/coresight/tpiu.py +++ b/pyocd/coresight/tpiu.py @@ -17,7 +17,7 @@ from .component import CoreSightComponent class TPIU(CoreSightComponent): - """! @brief Trace Port Interface Unit""" + """@brief Trace Port Interface Unit""" # Register definitions. # @@ -36,17 +36,17 @@ class TPIU(CoreSightComponent): DEVID_NRZ_MASK = (1 << 11) def __init__(self, ap, cmpid=None, addr=None): - """! @brief Standard CoreSight component constructor.""" + """@brief Standard CoreSight component constructor.""" super(TPIU, self).__init__(ap, cmpid, addr) self._has_swo_uart = False @property def has_swo_uart(self): - """! @brief Whether SWO UART mode is supported by the TPIU.""" + """@brief Whether SWO UART mode is supported by the TPIU.""" return self._has_swo_uart def init(self): - """! @brief Reads TPIU capabilities. + """@brief Reads TPIU capabilities. Currently this method simply checks whether the TPIU supports SWO in asynchronous UART mode. The result of this check is available via the has_swo_uart property. @@ -55,7 +55,7 @@ def init(self): self._has_swo_uart = (devid & TPIU.DEVID_NRZ_MASK) != 0 def set_swo_clock(self, swo_clock, system_clock): - """! @brief Prepare TPIU for transmitting SWO at a given baud rate. + """@brief Prepare TPIU for transmitting SWO at a given baud rate. Configures the TPIU for SWO UART mode, then sets the SWO clock frequency based on the provided system clock. diff --git a/pyocd/debug/breakpoints/manager.py b/pyocd/debug/breakpoints/manager.py index 1881d9320..530dc30b1 100644 --- a/pyocd/debug/breakpoints/manager.py +++ b/pyocd/debug/breakpoints/manager.py @@ -70,14 +70,14 @@ def add_provider(self, provider: "BreakpointProvider") -> None: self._fpb = provider def get_breakpoints(self) -> Iterable[int]: - """! @brief Return a list of all breakpoint addresses.""" + """@brief Return a list of all breakpoint addresses.""" return self._breakpoints.keys() def find_breakpoint(self, addr: int) -> Optional[Breakpoint]: return self._updated_breakpoints.get(addr, None) def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): - """! @brief Set a hardware or software breakpoint at a specific location in memory. + """@brief Set a hardware or software breakpoint at a specific location in memory. @retval True Breakpoint was set. @retval False Breakpoint could not be set. @@ -110,7 +110,7 @@ def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): return True def _check_added_breakpoint(self, bp: Breakpoint) -> bool: - """! @brief Check whether a new breakpoint is likely to actually be added when we flush. + """@brief Check whether a new breakpoint is likely to actually be added when we flush. First, software breakpoints are assumed to always be addable. For hardware breakpoints, the current free hardware breakpoint count is updated based on the current set of to-be @@ -140,7 +140,7 @@ def _check_added_breakpoint(self, bp: Breakpoint) -> bool: return free_hw_bp_count > self.MIN_HW_BREAKPOINTS def remove_breakpoint(self, addr: int) -> None: - """! @brief Remove a breakpoint at a specific location.""" + """@brief Remove a breakpoint at a specific location.""" try: LOG.debug("remove bkpt at 0x%x", addr) @@ -153,7 +153,7 @@ def remove_breakpoint(self, addr: int) -> None: LOG.debug("Tried to remove breakpoint 0x%08x that wasn't set" % addr) def _get_updated_breakpoints(self) -> Tuple[List[Breakpoint], List[Breakpoint]]: - """! @brief Compute added and removed breakpoints since last flush. + """@brief Compute added and removed breakpoints since last flush. @return Bi-tuple of (added breakpoint list, removed breakpoint list). """ added = [] @@ -295,7 +295,7 @@ def filter_memory_aligned_32(self, addr: int, size: int, data: MutableSequence[i return data def remove_all_breakpoints(self) -> None: - """! @brief Remove all breakpoints immediately.""" + """@brief Remove all breakpoints immediately.""" for bp in self._breakpoints.values(): bp.provider.remove_breakpoint(bp) self._breakpoints = {} diff --git a/pyocd/debug/cache.py b/pyocd/debug/cache.py index 003e881f5..2a3364a82 100644 --- a/pyocd/debug/cache.py +++ b/pyocd/debug/cache.py @@ -19,7 +19,7 @@ from ..cache.register import RegisterCache class CachingDebugContext(DebugContext): - """! @brief Debug context combining register and memory caches.""" + """@brief Debug context combining register and memory caches.""" def __init__(self, parent): super(CachingDebugContext, self).__init__(parent) diff --git a/pyocd/debug/context.py b/pyocd/debug/context.py index dcd19133a..1e6b1eb5e 100644 --- a/pyocd/debug/context.py +++ b/pyocd/debug/context.py @@ -20,7 +20,7 @@ from ..coresight.cortex_m_core_registers import CortexMCoreRegisterInfo class DebugContext(MemoryInterface): - """! @brief Viewport for inspecting the system being debugged. + """@brief Viewport for inspecting the system being debugged. A debug context is used to access target registers and memory. It enables these accesses to be redirected to different locations. For instance, if you want to read registers from a call frame @@ -37,7 +37,7 @@ class DebugContext(MemoryInterface): """ def __init__(self, parent): - """! @brief Debug context constructor. + """@brief Debug context constructor. @param self @param parent The parent of this context. Can be either a core (CoreSightCoreComponent) or @@ -81,7 +81,7 @@ def read_memory_block32(self, addr, size): return self._parent.read_memory_block32(addr, size) def read_core_register(self, reg): - """! @brief Read one core register. + """@brief Read one core register. @param self The debug context. @param reg Either the register's name in lowercase or an integer register index. @@ -97,7 +97,7 @@ def read_core_register(self, reg): return reg_info.from_raw(regValue) def read_core_register_raw(self, reg): - """! @brief Read a core register without type conversion. + """@brief Read a core register without type conversion. @param self The debug context. @param reg Either the register's name in lowercase or an integer register index. @@ -112,7 +112,7 @@ def read_core_register_raw(self, reg): return vals[0] def read_core_registers_raw(self, reg_list): - """! @brief Read one or more core registers. + """@brief Read one or more core registers. @param self The debug context. @param reg_list List of registers to read. Each element in the list can be either the @@ -127,7 +127,7 @@ def read_core_registers_raw(self, reg_list): return self._parent.read_core_registers_raw(reg_list) def write_core_register(self, reg, data): - """! @brief Write a CPU register. + """@brief Write a CPU register. @param self The debug context. @param reg The name of the register to write. @@ -141,7 +141,7 @@ def write_core_register(self, reg, data): self.write_core_register_raw(reg_info.index, reg_info.to_raw(data)) def write_core_register_raw(self, reg, data): - """! @brief Write a CPU register without type conversion. + """@brief Write a CPU register without type conversion. @param self The debug context. @param reg The name of the register to write. @@ -154,7 +154,7 @@ def write_core_register_raw(self, reg, data): self.write_core_registers_raw([reg], [data]) def write_core_registers_raw(self, reg_list, data_list): - """! @brief Write one or more core registers. + """@brief Write one or more core registers. @param self The debug context. @param reg_list List of registers to read. Each element in the list can be either the diff --git a/pyocd/debug/elf/elf.py b/pyocd/debug/elf/elf.py index 88ed44705..306b69f8e 100644 --- a/pyocd/debug/elf/elf.py +++ b/pyocd/debug/elf/elf.py @@ -22,7 +22,7 @@ from .decoder import (ElfSymbolDecoder, DwarfAddressDecoder) class ELFSection(MemoryRange): - """! @brief Memory range for a section of an ELF file. + """@brief Memory range for a section of an ELF file. Objects of this class represent sections of an ELF file. See the ELFBinaryFile class documentation for details of how sections are selected and how to get instances of this class. @@ -90,7 +90,7 @@ def __repr__(self): id(self), self.name, self.type, self.flags_description, hex(self.start), hex(self.length)) class ELFBinaryFile(object): - """! @brief An ELF binary executable file. + """@brief An ELF binary executable file. Examines the ELF and provides several lists of useful data: section objects, and both used and unused ranges of memory. @@ -125,12 +125,12 @@ def __init__(self, elf, memory_map=None): self._compute_regions() def __del__(self): - """! @brief Close the ELF file if it is owned by this instance.""" + """@brief Close the ELF file if it is owned by this instance.""" if hasattr(self, '_owns_file') and self._owns_file: self.close() def _extract_sections(self): - """! Get list of interesting sections.""" + """Get list of interesting sections.""" self._sections = [] sections = self._elf.iter_sections() for s in sections: @@ -183,7 +183,7 @@ def close(self): self._owns_file = False def read(self, addr, size): - """! @brief Read program data from the elf file. + """@brief Read program data from the elf file. @param addr Physical address (load address) to read from. @param size Number of bytes to read. @@ -206,21 +206,21 @@ def read(self, addr, size): @property def sections(self): - """! @brief Access the list of sections in the ELF file. + """@brief Access the list of sections in the ELF file. @return A list of ELFSection objects sorted by start address. """ return self._sections @property def used_ranges(self): - """! @brief Access the list of used ranges of memory in the ELF file. + """@brief Access the list of used ranges of memory in the ELF file. @return A list of MemoryRange objects sorted by start address. """ return self._used @property def unused_ranges(self): - """! @brief Access the list of unused ranges of memory in the ELF file. + """@brief Access the list of unused ranges of memory in the ELF file. @return A list of MemoryRange objects sorted by start address. """ return self._unused diff --git a/pyocd/debug/elf/elf_reader.py b/pyocd/debug/elf/elf_reader.py index 1d8e29c6d..c834b89c1 100644 --- a/pyocd/debug/elf/elf_reader.py +++ b/pyocd/debug/elf/elf_reader.py @@ -24,7 +24,7 @@ LOG = logging.getLogger(__name__) class ElfReaderContext(DebugContext): - """! @brief Reads flash memory regions from an ELF file instead of the target.""" + """@brief Reads flash memory regions from an ELF file instead of the target.""" def __init__(self, parent, elf): super(ElfReaderContext, self).__init__(parent) diff --git a/pyocd/debug/elf/symbols.py b/pyocd/debug/elf/symbols.py index eb5dfeeb5..c74b5b761 100644 --- a/pyocd/debug/elf/symbols.py +++ b/pyocd/debug/elf/symbols.py @@ -17,7 +17,7 @@ from ..symbols import SymbolProvider class ELFSymbolProvider(SymbolProvider): - """! @brief Get symbol information from an ELF file.""" + """@brief Get symbol information from an ELF file.""" def __init__(self, elf): self._symbols = elf.symbol_decoder diff --git a/pyocd/debug/semihost.py b/pyocd/debug/semihost.py index a20d6985e..0b56f445a 100644 --- a/pyocd/debug/semihost.py +++ b/pyocd/debug/semihost.py @@ -73,7 +73,7 @@ MAX_STRING_LENGTH = 2048 class SemihostIOHandler(object): - """! @brief Interface for semihosting file I/O handlers. + """@brief Interface for semihosting file I/O handlers. This class is also used as the default I/O handler if none is provided to SemihostAgent. In this case, all file I/O requests are rejected. @@ -91,7 +91,7 @@ def errno(self): return self._errno def _std_open(self, fnptr, fnlen, mode): - """! @brief Helper for standard I/O open requests. + """@brief Helper for standard I/O open requests. In the ARM semihosting spec, standard I/O files are opened using a filename of ":tt" with the open mode specifying which standard I/O file to open. This method takes care @@ -153,7 +153,7 @@ def rename(self, oldptr, oldlength, newptr, newlength): raise NotImplementedError() class InternalSemihostIOHandler(SemihostIOHandler): - """! @brief Implements semihosting requests directly in the Python process. + """@brief Implements semihosting requests directly in the Python process. This class maintains its own list of pseudo-file descriptors for files opened by the debug target. By default, this class uses the system stdin, stdout, and stderr file objects @@ -286,7 +286,7 @@ def flen(self, fd): return -1 class ConsoleIOHandler(SemihostIOHandler): - """! @brief Simple IO handler for console.""" + """@brief Simple IO handler for console.""" def __init__(self, stdin_file, stdout_file=None): super(ConsoleIOHandler, self).__init__() @@ -320,7 +320,7 @@ def readc(self): return -1 class SemihostAgent(object): - """! @brief Handler for ARM semihosting requests. + """@brief Handler for ARM semihosting requests. Semihosting requests are made by the target by executing a 'bkpt #0xab' instruction. The requested operation is specified by R0 and any arguments by R1. Many requests use a block @@ -396,7 +396,7 @@ def __init__(self, context, io_handler=None, console=None): } def check_and_handle_semihost_request(self): - """! @brief Handle a semihosting request. + """@brief Handle a semihosting request. This method should be called after the target has halted, to check if the halt was due to a semihosting request. It first checks to see if the target halted because @@ -457,7 +457,7 @@ def check_and_handle_semihost_request(self): return True def cleanup(self): - """! @brief Clean up any resources allocated by semihost requests. + """@brief Clean up any resources allocated by semihost requests. @note May be called more than once. """ diff --git a/pyocd/debug/svd/loader.py b/pyocd/debug/svd/loader.py index 5060ab285..0214fce24 100644 --- a/pyocd/debug/svd/loader.py +++ b/pyocd/debug/svd/loader.py @@ -47,7 +47,7 @@ def load(self): self.device = SVDParser.for_xml_file(self.filename).get_device() class SVDLoader(threading.Thread): - """! @brief Thread to read an SVD file in the background.""" + """@brief Thread to read an SVD file in the background.""" def __init__(self, svdFile, completionCallback): super(SVDLoader, self).__init__(name='load-svd') diff --git a/pyocd/debug/svd/model.py b/pyocd/debug/svd/model.py index e87b683fd..ad6674617 100644 --- a/pyocd/debug/svd/model.py +++ b/pyocd/debug/svd/model.py @@ -24,7 +24,7 @@ def _check_type(value, expected_type): - """! @brief Perform type checking on the provided value + """@brief Perform type checking on the provided value This is a helper that will raise ``TypeError`` if the provided value is not an instance of the provided type. This method should be used sparingly @@ -66,7 +66,7 @@ def default(self, obj): class SVDElement(object): - """! @brief Base class for all SVD Elements""" + """@brief Base class for all SVD Elements""" def __init__(self): self.parent = None @@ -155,7 +155,7 @@ def get_derived_from(self): @property def is_enumerated_type(self): - """! @brief Return True if the field is an enumerated type""" + """@brief Return True if the field is an enumerated type""" return self.enumerated_values is not None @property @@ -164,7 +164,7 @@ def is_reserved(self): class SVDRegisterArray(SVDElement): - """! @brief Represent a register array in the tree""" + """@brief Represent a register array in the tree""" def __init__(self, name, derived_from, description, address_offset, size, access, protection, reset_value, reset_mask, fields, @@ -283,7 +283,7 @@ def is_reserved(self): class SVDRegisterCluster(SVDElement): - """! @brief Represent a register cluster in the tree""" + """@brief Represent a register cluster in the tree""" def __init__(self, name, derived_from, description, address_offset, size, alternate_cluster, header_struct_name, @@ -359,7 +359,7 @@ def is_reserved(self): class SVDRegisterClusterArray(SVDElement): - """! @brief Represent a register cluster in the tree""" + """@brief Represent a register cluster in the tree""" def __init__(self, name, derived_from, description, address_offset, size, alternate_cluster, header_struct_name, diff --git a/pyocd/debug/svd/parser.py b/pyocd/debug/svd/parser.py index 853dce251..e37886e83 100644 --- a/pyocd/debug/svd/parser.py +++ b/pyocd/debug/svd/parser.py @@ -30,7 +30,7 @@ def _get_text(node, tag, default=None): - """! @brief Get the text for the provided tag from the provided node""" + """@brief Get the text for the provided tag from the provided node""" try: return node.find(tag).text except AttributeError: @@ -67,7 +67,7 @@ def _get_int(node, tag, default=None): class SVDParser(object): - """! @brief The SVDParser is responsible for mapping the SVD XML to Python Objects""" + """@brief The SVDParser is responsible for mapping the SVD XML to Python Objects""" @classmethod def for_xml_file(cls, path, remove_reserved=False): @@ -407,7 +407,7 @@ def _parse_device(self, device_node): ) def get_device(self): - """! @brief Get the device described by this SVD""" + """@brief Get the device described by this SVD""" return self._parse_device(self._root) diff --git a/pyocd/debug/symbols.py b/pyocd/debug/symbols.py index cf97e04ec..a7be321c6 100644 --- a/pyocd/debug/symbols.py +++ b/pyocd/debug/symbols.py @@ -15,7 +15,7 @@ # limitations under the License. class SymbolProvider(object): - """! @brief Abstract class for getting information about symbols in the target program.""" + """@brief Abstract class for getting information about symbols in the target program.""" def get_symbol_value(self, name): raise NotImplementedError() diff --git a/pyocd/flash/builder.py b/pyocd/flash/builder.py index 82dd95017..0f2b9d864 100644 --- a/pyocd/flash/builder.py +++ b/pyocd/flash/builder.py @@ -34,14 +34,14 @@ LOG = logging.getLogger(__name__) def get_page_count(count: int) -> str: - """! @brief Return string for page count with correct plurality.""" + """@brief Return string for page count with correct plurality.""" if count == 1: return "1 page" else: return "{} pages".format(count) def get_sector_count(count: int) -> str: - """! @brief Return string for sector count with correct plurality.""" + """@brief Return string for sector count with correct plurality.""" if count == 1: return "1 sector" else: @@ -93,7 +93,7 @@ def _stub_progress(percent): pass class _FlashSector: - """! @brief Info about an erase sector and all pages to be programmed within it.""" + """@brief Info about an erase sector and all pages to be programmed within it.""" def __init__(self, sector_info): self.addr: int = sector_info.base_addr self.size: int = sector_info.size @@ -113,11 +113,11 @@ def add_page(self, page): self.page_list.sort(key=lambda p:p.addr) def are_any_pages_not_same(self): - """! @brief Returns True if any pages in this sector might need to be programmed.""" + """@brief Returns True if any pages in this sector might need to be programmed.""" return any(page.same is not True for page in self.page_list) def mark_all_pages_not_same(self): - """! @brief Sets the same flag to False for all pages in this sector.""" + """@brief Sets the same flag to False for all pages in this sector.""" for page in self.page_list: page.same = False @@ -126,7 +126,7 @@ def __repr__(self): id(self), self.addr, self.size, self.erase_weight, self.page_list) class _FlashPage: - """! @brief A page to be programmed and its data.""" + """@brief A page to be programmed and its data.""" def __init__(self, page_info): self.addr: int = page_info.base_addr self.size: int = page_info.size @@ -138,12 +138,12 @@ def __init__(self, page_info): self.cached_estimate_data: Optional[List[int]] = None def get_program_weight(self): - """! @brief Get time to program a page including the data transfer.""" + """@brief Get time to program a page including the data transfer.""" return self.program_weight + \ float(len(self.data)) / float(DATA_TRANSFER_B_PER_S) def get_verify_weight(self): - """! @brief Get time to verify a page.""" + """@brief Get time to verify a page.""" return float(self.size) / float(DATA_TRANSFER_B_PER_S) def __repr__(self): @@ -151,13 +151,13 @@ def __repr__(self): id(self), self.addr, self.size, len(self.data), self.program_weight, self.erased, self.same) class _FlashOperation: - """! @brief Holds requested data to be programmed at a given address.""" + """@brief Holds requested data to be programmed at a given address.""" def __init__(self, addr, data): self.addr = addr self.data = data class FlashBuilder(MemoryBuilder): - """! @brief Manages programming flash within one flash memory region. + """@brief Manages programming flash within one flash memory region. The purpose of this class is to optimize flash programming within a single region to achieve the highest flash programming performance possible. Various methods are used to estimate the @@ -209,7 +209,7 @@ def enable_double_buffer(self, enable): self.enable_double_buffering = enable def add_data(self, addr, data): - """! @brief Add a block of data to be programmed. + """@brief Add a block of data to be programmed. @note Programming does not start until the method program() is called. @@ -248,7 +248,7 @@ def add_data(self, addr, data): prev_flash_operation = operation def _enable_read_access(self): - """! @brief Ensure flash is accessible by initing the algo for verify. + """@brief Ensure flash is accessible by initing the algo for verify. Not all flash memories are always accessible. For instance, external QSPI. Initing the flash algo for the VERIFY operation is the canonical way to ensure that the flash is @@ -263,7 +263,7 @@ def _enable_read_access(self): self.algo_inited_for_read = True def _build_sectors_and_pages(self, keep_unwritten): - """! @brief Converts the list of flash operations to flash sectors and pages. + """@brief Converts the list of flash operations to flash sectors and pages. @param self @param keep_unwritten If true, unwritten pages in an erased sector and unwritten @@ -360,7 +360,7 @@ def fill_end_of_page_gap(): self._fill_unwritten_sector_pages() def _fill_unwritten_sector_pages(self): - """! @brief Fill in missing pages from sectors we are going to modify.""" + """@brief Fill in missing pages from sectors we are going to modify.""" for sector in self.sector_list: sector_page_number = 0 sector_page_addr = sector.addr @@ -395,7 +395,7 @@ def add_page_with_existing_data(): sector_page_addr += page.size def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_verify=False, keep_unwritten=True): - """! @brief Determine fastest method of flashing and then run flash programming. + """@brief Determine fastest method of flashing and then run flash programming. Data must have already been added with add_data(). @@ -573,7 +573,7 @@ def _mark_all_pages_for_programming(self): page.same = False def _compute_chip_erase_pages_and_weight(self): - """! @brief Compute the number of erased pages. + """@brief Compute the number of erased pages. Determine how many pages in the new data are already erased. """ @@ -594,7 +594,7 @@ def _compute_sector_erase_pages_weight_min(self): return sum(page.get_verify_weight() for page in self.page_list) def _analyze_pages_with_partial_read(self): - """! @brief Estimate how many pages are the same by reading data. + """@brief Estimate how many pages are the same by reading data. Pages are analyzed by reading the first 32 bytes and comparing with data to be programmed. @@ -615,7 +615,7 @@ def _analyze_pages_with_partial_read(self): page.cached_estimate_data = data def _analyze_pages_with_crc32(self, assume_estimate_correct=False): - """! @brief Estimate how many pages are the same using a CRC32 analyzer. + """@brief Estimate how many pages are the same using a CRC32 analyzer. A CRC32 analyzer program is loaded into target RAM and is passed an array of pages and sizes. When executed, it computes the CRC32 for every page. @@ -652,7 +652,7 @@ def _analyze_pages_with_crc32(self, assume_estimate_correct=False): page.same = False def _compute_sector_erase_pages_and_weight(self, fast_verify): - """! @brief Quickly analyze flash contents and compute weights for sector erase. + """@brief Quickly analyze flash contents and compute weights for sector erase. Quickly estimate how many pages are the same. These estimates are used by _sector_erase_program so it is recommended to call this before beginning programming @@ -702,7 +702,7 @@ def _compute_sector_erase_pages_and_weight(self, fast_verify): return sector_erase_count, sector_erase_weight def _chip_erase_program(self, progress_cb=_stub_progress): - """! @brief Program by first performing an erase all.""" + """@brief Program by first performing an erase all.""" LOG.debug("%i of %i pages have erased data", len(self.page_list) - self.chip_erase_count, len(self.page_list)) progress_cb(0.0) progress = 0 @@ -736,7 +736,7 @@ def _next_unerased_page(self, i): return page, i + 1 def _chip_erase_program_double_buffer(self, progress_cb=_stub_progress): - """! @brief Double-buffered program by first performing an erase all.""" + """@brief Double-buffered program by first performing an erase all.""" LOG.debug("%i of %i pages have erased data", len(self.page_list) - self.chip_erase_count, len(self.page_list)) progress_cb(0.0) progress = 0 @@ -790,7 +790,7 @@ def _chip_erase_program_double_buffer(self, progress_cb=_stub_progress): return FlashBuilder.FLASH_CHIP_ERASE def _sector_erase_program(self, progress_cb=_stub_progress): - """! @brief Program by performing sector erases.""" + """@brief Program by performing sector erases.""" actual_sector_erase_count = 0 actual_sector_erase_weight = 0 progress = 0 @@ -839,7 +839,7 @@ def _sector_erase_program(self, progress_cb=_stub_progress): return FlashBuilder.FLASH_SECTOR_ERASE def _scan_pages_for_same(self, progress_cb=_stub_progress): - """! @brief Read the full page data to determine if it is unchanged. + """@brief Read the full page data to determine if it is unchanged. When this function exits, the same flag will be set to either True or False for every page. In addition, sectors that need at least one page programmed will have @@ -890,7 +890,7 @@ def _next_nonsame_page(self, i): return page, i + 1 def _sector_erase_program_double_buffer(self, progress_cb=_stub_progress): - """! @brief Double-buffered program by performing sector erases.""" + """@brief Double-buffered program by performing sector erases.""" actual_sector_erase_count = 0 actual_sector_erase_weight = 0 progress = 0 diff --git a/pyocd/flash/eraser.py b/pyocd/flash/eraser.py index 19a3ffbbc..2dfe6a724 100755 --- a/pyocd/flash/eraser.py +++ b/pyocd/flash/eraser.py @@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__) class FlashEraser(object): - """! @brief Class that manages high level flash erasing. + """@brief Class that manages high level flash erasing. Can erase a target in one of three modes: - chip erase: Erase all flash on the target. @@ -39,7 +39,7 @@ class Mode(Enum): SECTOR = 3 def __init__(self, session, mode): - """! @brief Constructor. + """@brief Constructor. @param self @param session The session instance. @@ -49,7 +49,7 @@ def __init__(self, session, mode): self._mode = mode def erase(self, addresses=None): - """! @brief Perform the type of erase operation selected when the object was created. + """@brief Perform the type of erase operation selected when the object was created. For sector erase mode, an iterable of sector addresses specifications must be provided via the _addresses_ parameter. The address iterable elements can be either strings, tuples, diff --git a/pyocd/flash/file_programmer.py b/pyocd/flash/file_programmer.py index 238a3fa3f..0d45e7260 100755 --- a/pyocd/flash/file_programmer.py +++ b/pyocd/flash/file_programmer.py @@ -35,8 +35,7 @@ LOG = logging.getLogger(__name__) def ranges(i: List[int]) -> List[Tuple[int, int]]: - """! - Accepts a sorted list of byte addresses. Breaks the addresses into contiguous ranges. + """Accepts a sorted list of byte addresses. Breaks the addresses into contiguous ranges. Yields 2-tuples of the start and end address for each contiguous range. For instance, the input [0, 1, 2, 3, 32, 33, 34, 35] will yield the following 2-tuples: @@ -47,7 +46,7 @@ def ranges(i: List[int]) -> List[Tuple[int, int]]: yield b[0][1], b[-1][1] class FileProgrammer(object): - """! @brief Class to manage programming a file in any supported format with many options. + """@brief Class to manage programming a file in any supported format with many options. Most specifically, this class implements the behaviour provided by the command-line flash programming tool. The code in this class simply extracts data from the given file, potentially @@ -67,7 +66,7 @@ def __init__(self, trust_crc: Optional[bool] = None, keep_unwritten: Optional[bool] = None ): - """! @brief Constructor. + """@brief Constructor. @param self @param session The session object. @@ -103,7 +102,7 @@ def __init__(self, } def program(self, file_or_path: Union[str, IO[bytes]], file_format: Optional[str] = None, **kwargs: Any): - """! @brief Program a file into flash. + """@brief Program a file into flash. @param self @param file_or_path Either a string that is a path to a file, or a file-like object. @@ -174,7 +173,7 @@ def program(self, file_or_path: Union[str, IO[bytes]], file_format: Optional[str file_obj.close() def _program_bin(self, file_obj: IO[bytes], **kwargs: Any) -> None: - """! @brief Binary file format loader""" + """@brief Binary file format loader""" assert self._loader # If no base address is specified use the start of the boot memory. @@ -195,7 +194,7 @@ def _program_bin(self, file_obj: IO[bytes], **kwargs: Any) -> None: self._loader.add_data(address, data) def _program_hex(self, file_obj: IO[bytes], **kwargs: Any) -> None: - """! Intel hex file format loader""" + """Intel hex file format loader""" assert self._loader hexfile = IntelHex(file_obj) diff --git a/pyocd/flash/flash.py b/pyocd/flash/flash.py index 522acc8d0..564937392 100644 --- a/pyocd/flash/flash.py +++ b/pyocd/flash/flash.py @@ -193,8 +193,7 @@ def region(self, flashRegion): self._region = flashRegion def init(self, operation, address=None, clock=0, reset=True): - """! - @brief Prepare the flash algorithm for performing operations. + """@brief Prepare the flash algorithm for performing operations. First, the target is prepared to execute flash algo operations, including loading the algo to target RAM. This step is skipped if the target is already prepared, i.e., init() has been @@ -255,7 +254,7 @@ def init(self, operation, address=None, clock=0, reset=True): self._active_operation = operation def cleanup(self): - """! @brief Deinitialize the flash algo and restore the target. + """@brief Deinitialize the flash algo and restore the target. Before further operations are executed, the algo must be reinited. Unlike uninit(), this method marks the target and unprepared to execute flash algo functions. So on the next call @@ -266,7 +265,7 @@ def cleanup(self): self._did_prepare_target = False def uninit(self): - """! @brief Uninitialize the flash algo. + """@brief Uninitialize the flash algo. Before further operations are executed, the algo must be reinited. The target is left in a state where algo does not have to be reloaded when init() is called. @@ -294,11 +293,11 @@ def uninit(self): self._active_operation = None def prepare_target(self): - """! @brief Subclasses can override this method to perform special target configuration.""" + """@brief Subclasses can override this method to perform special target configuration.""" pass def restore_target(self): - """! @brief Subclasses can override this method to undo any target configuration changes.""" + """@brief Subclasses can override this method to undo any target configuration changes.""" pass def compute_crcs(self, sectors): @@ -333,8 +332,7 @@ def compute_crcs(self, sectors): return data def erase_all(self): - """! - @brief Erase all the flash. + """@brief Erase all the flash. @exception FlashEraseFailure """ @@ -354,8 +352,7 @@ def erase_all(self): raise FlashEraseFailure('flash erase all failure', result_code=result) def erase_sector(self, address): - """! - @brief Erase one sector. + """@brief Erase one sector. @exception FlashEraseFailure """ @@ -374,8 +371,7 @@ def erase_sector(self, address): raise FlashEraseFailure('flash erase sector failure', address=address, result_code=result) def program_page(self, address, bytes): - """! - @brief Flash one or more pages. + """@brief Flash one or more pages. @exception FlashProgramFailure """ @@ -400,8 +396,7 @@ def program_page(self, address, bytes): raise FlashProgramFailure('flash program page failure', address=address, result_code=result) def start_program_page_with_buffer(self, buffer_number, address): - """! - @brief Start flashing one or more pages. + """@brief Start flashing one or more pages. """ assert self.region is not None assert buffer_number < len(self.page_buffers), "Invalid buffer number" @@ -413,8 +408,7 @@ def start_program_page_with_buffer(self, buffer_number, address): self._call_function(self.flash_algo['pc_program_page'], address, self.region.page_size, self.page_buffers[buffer_number]) def load_page_buffer(self, buffer_number, address, bytes): - """! - @brief Load data to a numbered page buffer. + """@brief Load data to a numbered page buffer. This method is used in conjunction with start_program_page_with_buffer() to implement double buffered programming. @@ -428,8 +422,7 @@ def load_page_buffer(self, buffer_number, address, bytes): self.target.write_memory_block8(self.page_buffers[buffer_number], bytes) def program_phrase(self, address, bytes): - """! - @brief Flash a portion of a page. + """@brief Flash a portion of a page. @exception FlashFailure The address or data length is not aligned to the minimum programming length specified in the flash algorithm. @@ -464,8 +457,7 @@ def program_phrase(self, address, bytes): raise FlashProgramFailure('flash program phrase failure', address=address, result_code=result) def get_sector_info(self, addr): - """! - @brief Get info about the sector that contains this address. + """@brief Get info about the sector that contains this address. """ assert self.region is not None if not self.region.contains_address(addr): @@ -479,8 +471,7 @@ def get_sector_info(self, addr): return info def get_page_info(self, addr): - """! - @brief Get info about the page that contains this address. + """@brief Get info about the page that contains this address. """ assert self.region is not None if not self.region.contains_address(addr): @@ -494,8 +485,7 @@ def get_page_info(self, addr): return info def get_flash_info(self): - """! - @brief Get info about the flash. + """@brief Get info about the flash. Override this method to return different values. """ @@ -512,8 +502,7 @@ def get_flash_builder(self): return FlashBuilder(self) def flash_block(self, addr, data, smart_flash=True, chip_erase=None, progress_cb=None, fast_verify=False): - """! - @brief Flash a block of data. + """@brief Flash a block of data. @note Deprecated. Will be removed in v1.0. """ @@ -610,8 +599,7 @@ def _flash_algo_debug_check(self): self.target.set_vector_catch(self._saved_vector_catch) def wait_for_completion(self, timeout=None): - """! - @brief Wait until the breakpoint is hit. + """@brief Wait until the breakpoint is hit. """ with Timeout(timeout) as time_out: while time_out.check(): @@ -632,8 +620,7 @@ def _call_function_and_wait(self, pc, r0=None, r1=None, r2=None, r3=None, init=F return self.wait_for_completion(timeout=timeout) def set_flash_algo_debug(self, enable): - """! - @brief Turn on extra flash algorithm checking + """@brief Turn on extra flash algorithm checking When set this may slow down flash algo performance. """ diff --git a/pyocd/flash/loader.py b/pyocd/flash/loader.py index fca32c264..b61034408 100755 --- a/pyocd/flash/loader.py +++ b/pyocd/flash/loader.py @@ -154,7 +154,7 @@ def __init__(self, trust_crc: Optional[bool] = None, keep_unwritten: Optional[bool] = None ): - """! @brief Constructor. + """@brief Constructor. @param self @param session The session object. @@ -199,7 +199,7 @@ def __init__(self, self._reset_state() def _reset_state(self): - """! @brief Clear all state variables. """ + """@brief Clear all state variables. """ # _builders is a dict that maps memory regions to either a FlashBuilder or, for writable memories, # a bytearray. self._builders = {} @@ -208,7 +208,7 @@ def _reset_state(self): self._current_progress_fraction = 0.0 def add_data(self, address, data): - """! @brief Add a chunk of data to be programmed. + """@brief Add a chunk of data to be programmed. The data may cross memory region boundaries, as long as the regions are contiguous. @@ -264,7 +264,7 @@ def add_data(self, address, data): return self def commit(self): - """! @brief Write all collected data to memory. + """@brief Write all collected data to memory. This routine ensures that chip erase is only used once if either the auto mode or chip erase mode are used. As an example, if two regions are to be written to and True was @@ -303,7 +303,7 @@ def commit(self): self._reset_state() def _log_performance(self, perf_list): - """! @brief Log a report of programming performance numbers.""" + """@brief Log a report of programming performance numbers.""" # Compute overall performance numbers. totalProgramTime = sum(perf.program_time for perf in perf_list) program_byte_count = sum(perf.total_byte_count for perf in perf_list) @@ -339,7 +339,7 @@ def _progress_cb(self, amount): @classmethod def program_binary_data(cls, session, address, data): - """! @brief Helper routine to write a single chunk of data. + """@brief Helper routine to write a single chunk of data. The session options for chip_erase and trust_crc are used. diff --git a/pyocd/gdbserver/context_facade.py b/pyocd/gdbserver/context_facade.py index 027d11f2f..0f7270df5 100644 --- a/pyocd/gdbserver/context_facade.py +++ b/pyocd/gdbserver/context_facade.py @@ -56,7 +56,7 @@ } class GDBDebugContextFacade(object): - """! @brief Provides GDB specific transformations to a DebugContext.""" + """@brief Provides GDB specific transformations to a DebugContext.""" ## The order certain target features should appear in target XML. REQUIRED_FEATURE_ORDER = ("org.gnu.gdb.arm.m-profile", "org.gnu.gdb.arm.vfp") @@ -90,7 +90,7 @@ def set_context(self, new_context): self._context = new_context def get_register_context(self): - """! @brief Return hexadecimal dump of registers as expected by GDB. + """@brief Return hexadecimal dump of registers as expected by GDB. @exception CoreRegisterAccessError """ @@ -114,7 +114,7 @@ def get_register_context(self): return resp def set_register_context(self, data): - """! @brief Set registers from GDB hexadecimal string. + """@brief Set registers from GDB hexadecimal string. @exception CoreRegisterAccessError """ @@ -135,7 +135,7 @@ def set_register_context(self, data): self._context.write_core_registers_raw(reg_num_list, reg_data_list) def set_register(self, gdb_regnum, data): - """! @brief Set single register from GDB hexadecimal string. + """@brief Set single register from GDB hexadecimal string. @param self The object. @param gdb_regnum The regnum of register in target XML sent to GDB. @@ -152,7 +152,7 @@ def set_register(self, gdb_regnum, data): LOG.warning("GDB: attempt to set invalid register (regnum %d)", gdb_regnum) def gdb_get_register(self, gdb_regnum): - """! @brief Set single core register. + """@brief Set single core register. @param self The object. @param gdb_regnum The regnum of register in target XML sent to GDB. @@ -175,7 +175,7 @@ def gdb_get_register(self, gdb_regnum): return resp def get_t_response(self, force_signal=None): - """! @brief Returns a GDB T response string. + """@brief Returns a GDB T response string. This includes: - The signal encountered. @@ -210,7 +210,7 @@ def get_signal_value(self): return signal def _get_reg_index_value_pairs(self, reg_list): - """! @brief Return register values as pairs for the T response. + """@brief Return register values as pairs for the T response. Returns a string like NN:MMMMMMMM;NN:MMMMMMMM;... for the T response string. NN is the index of the @@ -232,7 +232,7 @@ def _get_reg_index_value_pairs(self, reg_list): return result def get_memory_map_xml(self): - """! @brief Generate GDB memory map XML. + """@brief Generate GDB memory map XML. """ root = ElementTree.Element('memory-map') for r in self._context.core.memory_map: @@ -249,7 +249,7 @@ def get_memory_map_xml(self): return MAP_XML_HEADER + ElementTree.tostring(root) def _define_xpsr_control_fields(self, xml_feature): - """! @brief Define XPSR and CONTROL register types with fields.""" + """@brief Define XPSR and CONTROL register types with fields.""" control = ElementTree.SubElement(xml_feature, 'flags', id="control", size="4") ElementTree.SubElement(control, "field", name="nPRIV", start="0", end="0", type="bool") ElementTree.SubElement(control, "field", name="SPSEL", start="1", end="1", type="bool") diff --git a/pyocd/gdbserver/gdbserver.py b/pyocd/gdbserver/gdbserver.py index 500176e20..fa877371b 100644 --- a/pyocd/gdbserver/gdbserver.py +++ b/pyocd/gdbserver/gdbserver.py @@ -57,7 +57,7 @@ TRACE_MEM.setLevel(logging.CRITICAL) def unescape(data: bytes) -> List[int]: - """! @brief De-escapes binary data from Gdb. + """@brief De-escapes binary data from Gdb. @param data Bytes-like object with possibly escaped values. @return List of integers in the range 0-255, with all escaped bytes de-escaped. @@ -80,7 +80,7 @@ def unescape(data: bytes) -> List[int]: _GDB_ESCAPED_CHARS = tuple(b'#$}*') def escape(data): - """! @brief Escape binary data to be sent to Gdb. + """@brief Escape binary data to be sent to Gdb. @param data Bytes-like object containing raw binary. @return Bytes object with the characters in '#$}*' escaped as required by Gdb. @@ -95,11 +95,11 @@ def escape(data): return bytes(result) class GDBError(exceptions.Error): - """! @brief Error communicating with GDB.""" + """@brief Error communicating with GDB.""" pass class GDBServer(threading.Thread): - """! @brief GDB remote server thread. + """@brief GDB remote server thread. This class start a GDB server listening a gdb connection on a specific port. It implements the RSP (Remote Serial Protocol). @@ -263,7 +263,7 @@ def __init__(self, session, core=None): self.setDaemon(True) def _init_remote_commands(self): - """! @brief Initialize the remote command processor infrastructure.""" + """@brief Initialize the remote command processor infrastructure.""" # Create command execution context. The output stream will default to stdout # but we'll change it to a fresh StringIO prior to running each command. # @@ -1045,7 +1045,7 @@ def get_symbol(self, name): return symValue def handle_remote_command(self, cmd): - """! @brief Pass remote commands to the commander command processor.""" + """@brief Pass remote commands to the commander command processor.""" # Convert the command line to a string. cmd = to_str_safe(cmd) LOG.debug('Remote command: %s', cmd) @@ -1239,7 +1239,7 @@ def event_handler(self, notification): self.thread_provider.read_from_target = False def _option_did_change(self, notification): - """! @brief Handle an option changing at runtime. + """@brief Handle an option changing at runtime. For option notifications, the event is the name of the option and the `data` attribute is an OptionChangeInfo object with `new_value` and `old_value` attributes. diff --git a/pyocd/gdbserver/packet_io.py b/pyocd/gdbserver/packet_io.py index edbfab10f..6d7d99e24 100644 --- a/pyocd/gdbserver/packet_io.py +++ b/pyocd/gdbserver/packet_io.py @@ -33,11 +33,11 @@ def checksum(data: bytes) -> bytes: return ("%02x" % (sum(data) % 256)).encode() class ConnectionClosedException(Exception): - """! @brief Exception used to signal the GDB server connection closed.""" + """@brief Exception used to signal the GDB server connection closed.""" pass class GDBServerPacketIOThread(threading.Thread): - """! @brief Packet I/O thread. + """@brief Packet I/O thread. This class is a thread used by the GDBServer class to perform all RSP packet I/O. It handles verifying checksums, acking, and receiving Ctrl-C interrupts. There is a queue @@ -166,7 +166,7 @@ def _check_expected_ack(self): LOG.debug("GDB: expected n/ack but got '%s'", c) def _process_data(self): - """! @brief Process all incoming data until there are no more complete packets.""" + """@brief Process all incoming data until there are no more complete packets.""" while len(self._buffer): if self._expecting_ack: self._expecting_ack = False diff --git a/pyocd/gdbserver/symbols.py b/pyocd/gdbserver/symbols.py index dd5d0c1c3..bfe7848b3 100644 --- a/pyocd/gdbserver/symbols.py +++ b/pyocd/gdbserver/symbols.py @@ -18,7 +18,7 @@ from ..utility.compatibility import to_bytes_safe class GDBSymbolProvider(SymbolProvider): - """! @brief Request symbol information from gdb.""" + """@brief Request symbol information from gdb.""" def __init__(self, gdbserver): self._gdbserver = gdbserver diff --git a/pyocd/gdbserver/syscall.py b/pyocd/gdbserver/syscall.py index 59c21653a..deb52e091 100644 --- a/pyocd/gdbserver/syscall.py +++ b/pyocd/gdbserver/syscall.py @@ -32,7 +32,7 @@ FD_OFFSET = 4 class GDBSyscallIOHandler(SemihostIOHandler): - """! @brief Semihosting file I/O handler that performs GDB syscalls.""" + """@brief Semihosting file I/O handler that performs GDB syscalls.""" def __init__(self, server): super(GDBSyscallIOHandler, self).__init__() diff --git a/pyocd/probe/aggregator.py b/pyocd/probe/aggregator.py index 5b4898c27..600c1ea64 100644 --- a/pyocd/probe/aggregator.py +++ b/pyocd/probe/aggregator.py @@ -23,11 +23,11 @@ PROBE_CLASSES = {} class DebugProbeAggregator(object): - """! @brief Simple class to enable collecting probes of all supported probe types.""" + """@brief Simple class to enable collecting probes of all supported probe types.""" @staticmethod def _get_probe_classes(unique_id): - """! @brief Return probe classes to query based on the unique ID string.""" + """@brief Return probe classes to query based on the unique ID string.""" probe_type = None if unique_id is not None: fields = unique_id.split(':', 1) diff --git a/pyocd/probe/cmsis_dap_probe.py b/pyocd/probe/cmsis_dap_probe.py index 6939c3ae2..ca4d463aa 100644 --- a/pyocd/probe/cmsis_dap_probe.py +++ b/pyocd/probe/cmsis_dap_probe.py @@ -37,7 +37,7 @@ TRACE.setLevel(logging.CRITICAL) class CMSISDAPProbe(DebugProbe): - """! @brief Wraps a pydapaccess link as a DebugProbe. + """@brief Wraps a pydapaccess link as a DebugProbe. Supports CMSIS-DAP v1 and v2. """ @@ -112,7 +112,7 @@ def __init__(self, device: DAPAccessIntf) -> None: @property def board_id(self) -> Optional[str]: - """! @brief Unique identifier for the board. + """@brief Unique identifier for the board. Only board IDs for DAPLink firmware are supported. We can't assume other CMSIS-DAP firmware is using the same serial number format, so we cannot reliably @@ -145,7 +145,7 @@ def product_name(self) -> str: @property def supported_wire_protocols(self) -> Collection[DebugProbe.Protocol]: - """! @brief Only valid after opening.""" + """@brief Only valid after opening.""" return self._supported_protocols @property @@ -534,7 +534,7 @@ def _convert_exception(exc: Exception) -> Exception: return exc class CMSISDAPProbePlugin(Plugin): - """! @brief Plugin class for CMSISDAPProbe.""" + """@brief Plugin class for CMSISDAPProbe.""" def load(self): return CMSISDAPProbe diff --git a/pyocd/probe/common.py b/pyocd/probe/common.py index d9912b81f..f5b896012 100644 --- a/pyocd/probe/common.py +++ b/pyocd/probe/common.py @@ -30,14 +30,14 @@ libusb_error_device_set = set() def show_no_libusb_warning(): - """! @brief Logs a warning about missing libusb library only the first time it is called.""" + """@brief Logs a warning about missing libusb library only the first time it is called.""" global did_show_no_libusb_warning if not did_show_no_libusb_warning: LOG.warning("STLink, CMSIS-DAPv2 and PicoProbe probes are not supported because no libusb library was found.") did_show_no_libusb_warning = True def should_show_libusb_device_error(vidpid): - """! @brief Returns whether a debug warning should be shown for the given VID/PID pair. + """@brief Returns whether a debug warning should be shown for the given VID/PID pair. The first time a given VID/PID is passed to this function, the result will be True. Any subsequent times, False will be returned for the same VID/PID pair. diff --git a/pyocd/probe/debug_probe.py b/pyocd/probe/debug_probe.py index c477fe45d..03653b45d 100644 --- a/pyocd/probe/debug_probe.py +++ b/pyocd/probe/debug_probe.py @@ -27,7 +27,7 @@ from ..coresight.ap import APAddressBase class DebugProbe: - """! @brief Abstract debug probe class. + """@brief Abstract debug probe class. Subclasses of this abstract class are drivers for different debug probe interfaces, either hardware such as a USB based probe, or software such as connecting with a simulator. @@ -63,7 +63,7 @@ class DebugProbe: """ class Protocol(Enum): - """! @brief Debug wire protocols.""" + """@brief Debug wire protocols.""" DEFAULT = 0 SWD = 1 JTAG = 2 @@ -76,7 +76,7 @@ class Protocol(Enum): } class Capability(Enum): - """! @brief Probe capabilities.""" + """@brief Probe capabilities.""" ## @brief Whether the probe supports the swj_sequence() API. # # If this property is True, then the swj_sequence() method is used to move between protocols. @@ -116,7 +116,7 @@ class Capability(Enum): @classmethod def get_all_connected_probes(cls, unique_id: str = None, is_explicit: bool = False) -> Sequence["DebugProbe"]: - """! @brief Returns a list of DebugProbe instances. + """@brief Returns a list of DebugProbe instances. To filter the list of returned probes, the `unique_id` parameter may be set to a string with a full or partial unique ID (canonically the serial number). Alternatively, the probe class may simply return all @@ -135,7 +135,7 @@ def get_all_connected_probes(cls, unique_id: str = None, is_explicit: bool = Fal @classmethod def get_probe_with_id(cls, unique_id: str, is_explicit: bool = False) -> Optional["DebugProbe"]: - """! @brief Returns a DebugProbe instance for a probe with the given unique ID. + """@brief Returns a DebugProbe instance for a probe with the given unique ID. If no probe is connected with a fully matching unique ID, then None will be returned. @@ -147,13 +147,13 @@ def get_probe_with_id(cls, unique_id: str, is_explicit: bool = False) -> Optiona raise NotImplementedError() def __init__(self) -> None: - """! @brief Constructor.""" + """@brief Constructor.""" self._session: Optional["Session"] = None self._lock = threading.RLock() @property def session(self) -> Optional["Session"]: - """! @brief Session associated with this probe.""" + """@brief Session associated with this probe.""" return self._session @session.setter @@ -162,22 +162,22 @@ def session(self, the_session: "Session") -> None: @property def description(self) -> str: - """! @brief Combined description of the debug probe and/or associated board.""" + """@brief Combined description of the debug probe and/or associated board.""" return self.vendor_name + " " + self.product_name @property def vendor_name(self) -> str: - """! @brief Name of the debug probe's manufacturer.""" + """@brief Name of the debug probe's manufacturer.""" raise NotImplementedError() @property def product_name(self) -> str: - """! @brief Name of the debug probe.""" + """@brief Name of the debug probe.""" raise NotImplementedError() @property def supported_wire_protocols(self) -> Collection[Protocol]: - """! @brief List of DebugProbe.Protocol supported by the probe. + """@brief List of DebugProbe.Protocol supported by the probe. Only one of the values returned from this property may be passed to connect(). """ @@ -185,7 +185,7 @@ def supported_wire_protocols(self) -> Collection[Protocol]: @property def unique_id(self) -> str: - """! @brief The unique ID of this device. + """@brief The unique ID of this device. This property will be valid before open() is called. This value can be passed to get_probe_with_id(). @@ -194,7 +194,7 @@ def unique_id(self) -> str: @property def wire_protocol(self) -> Optional[Protocol]: - """! @brief Currently selected wire protocol. + """@brief Currently selected wire protocol. If the probe is not open and connected, i.e., open() and connect() have not been called, then this property will be None. If a value other than None is returned, then the probe @@ -204,7 +204,7 @@ def wire_protocol(self) -> Optional[Protocol]: @property def is_open(self) -> bool: - """! @brief Whether the probe is currently open. + """@brief Whether the probe is currently open. To open the probe, call the open() method. """ @@ -212,14 +212,14 @@ def is_open(self) -> bool: @property def capabilities(self) -> Set[Capability]: - """! @brief A set of DebugProbe.Capability enums indicating the probe's features. + """@brief A set of DebugProbe.Capability enums indicating the probe's features. This value should not be trusted until after the probe is opened. """ raise NotImplementedError() def create_associated_board(self) -> Optional["Board"]: - """! @brief Create a board instance representing the board of which the probe is a component. + """@brief Create a board instance representing the board of which the probe is a component. If the probe is part of a board, then this method will create a Board instance that represents the associated board. Usually, for an on-board debug probe, this would be the @@ -232,15 +232,15 @@ def create_associated_board(self) -> Optional["Board"]: return None def open(self) -> None: - """! @brief Open the USB interface to the probe for sending commands.""" + """@brief Open the USB interface to the probe for sending commands.""" raise NotImplementedError() def close(self) -> None: - """! @brief Close the probe's USB interface.""" + """@brief Close the probe's USB interface.""" raise NotImplementedError() def lock(self) -> None: - """! @brief Lock the probe from access by other threads. + """@brief Lock the probe from access by other threads. This lock is recursive, so locking multiple times from a single thread is acceptable as long as the thread unlocks the same number of times. @@ -250,7 +250,7 @@ def lock(self) -> None: self._lock.acquire() def unlock(self) -> None: - """! @brief Unlock the probe. + """@brief Unlock the probe. Only when the thread unlocks the probe the same number of times it has called lock() will the lock actually be released and other threads allowed access. @@ -261,15 +261,15 @@ def unlock(self) -> None: ##@{ def connect(self, protocol: Optional[Protocol] = None) -> None: - """! @brief Initialize DAP IO pins for JTAG or SWD""" + """@brief Initialize DAP IO pins for JTAG or SWD""" raise NotImplementedError() def disconnect(self) -> None: - """! @brief Deinitialize the DAP I/O pins""" + """@brief Deinitialize the DAP I/O pins""" raise NotImplementedError() def swj_sequence(self, length: int, bits: int) -> None: - """! @brief Transfer some number of bits on SWDIO/TMS. + """@brief Transfer some number of bits on SWDIO/TMS. @param self @param length Number of bits to transfer. Must be less than or equal to 256. @@ -278,7 +278,7 @@ def swj_sequence(self, length: int, bits: int) -> None: pass def swd_sequence(self, sequences: Sequence[Union[Tuple[int], Tuple[int, int]]]) -> Tuple[int, Sequence[bytes]]: - """! @brief Send a sequences of bits on the SWDIO signal. + """@brief Send a sequences of bits on the SWDIO signal. Each sequence in the _sequences_ parameter is a tuple with 1 or 2 members in this order: - 0: int: number of TCK cycles from 1-64 @@ -295,7 +295,7 @@ def swd_sequence(self, sequences: Sequence[Union[Tuple[int], Tuple[int, int]]]) raise NotImplementedError() def jtag_sequence(self, cycles: int, tms: int, read_tdo: bool, tdi: int) -> Optional[int]: - """! @brief Send JTAG sequence. + """@brief Send JTAG sequence. @param self @param cycles Number of TCK cycles, from 1-64. @@ -309,18 +309,18 @@ def jtag_sequence(self, cycles: int, tms: int, read_tdo: bool, tdi: int) -> Opti raise NotImplementedError() def set_clock(self, frequency: float) -> None: - """! @brief Set the frequency for JTAG and SWD in Hz. + """@brief Set the frequency for JTAG and SWD in Hz. This function is safe to call before connect is called. """ raise NotImplementedError() def reset(self) -> None: - """! @brief Perform a hardware reset of the target.""" + """@brief Perform a hardware reset of the target.""" raise NotImplementedError() def assert_reset(self, asserted: bool) -> None: - """! @brief Assert or de-assert target's nRESET signal. + """@brief Assert or de-assert target's nRESET signal. Because nRESET is negative logic and usually open drain, passing True will drive it low, and passing False will stop driving so nRESET will be pulled up. @@ -328,7 +328,7 @@ def assert_reset(self, asserted: bool) -> None: raise NotImplementedError() def is_reset_asserted(self) -> bool: - """! @brief Returns True if nRESET is asserted or False if de-asserted. + """@brief Returns True if nRESET is asserted or False if de-asserted. If the debug probe cannot actively read the reset signal, the value returned will be the last value passed to assert_reset(). @@ -336,7 +336,7 @@ def is_reset_asserted(self) -> bool: raise NotImplementedError() def flush(self) -> None: - """! @brief Write out all unsent commands. + """@brief Write out all unsent commands. This API may be a no-op for certain debug probe types. """ @@ -364,7 +364,7 @@ def read_dp(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: ... def read_dp(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Read a DP register. + """@brief Read a DP register. @param self @param addr Integer register address being one of (0x0, 0x4, 0x8, 0xC). @@ -375,7 +375,7 @@ def read_dp(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: raise NotImplementedError() def write_dp(self, addr: int, data: int) -> None: - """! @brief Write a DP register. + """@brief Write a DP register. @param self @param addr Integer register address being one of (0x0, 0x4, 0x8, 0xC). @@ -400,11 +400,11 @@ def read_ap(self, addr: int, now: bool) -> Union[int, Callable[[], int]]: ... def read_ap(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]: - """! @brief Read an AP register.""" + """@brief Read an AP register.""" raise NotImplementedError() def write_ap(self, addr: int, data) -> None: - """! @brief Write an AP register.""" + """@brief Write an AP register.""" raise NotImplementedError() @overload @@ -425,15 +425,15 @@ def read_ap_multiple(self, addr: int, count: int, now: bool) -> Union[Sequence[i def read_ap_multiple(self, addr: int, count: int = 1, now: bool = True) \ -> Union[Sequence[int], Callable[[], Sequence[int]]]: - """! @brief Read one AP register multiple times.""" + """@brief Read one AP register multiple times.""" raise NotImplementedError() def write_ap_multiple(self, addr: int, values) -> None: - """! @brief Write one AP register multiple times.""" + """@brief Write one AP register multiple times.""" raise NotImplementedError() def get_memory_interface_for_ap(self, ap_address: "APAddressBase") -> Optional["MemoryInterface"]: - """! @brief Returns a @ref pyocd.core.memory_interface.MemoryInterface "MemoryInterface" for + """@brief Returns a @ref pyocd.core.memory_interface.MemoryInterface "MemoryInterface" for the specified AP. Some debug probe types have accelerated memory read and write commands. This method is used @@ -452,7 +452,7 @@ def get_memory_interface_for_ap(self, ap_address: "APAddressBase") -> Optional[" ##@{ def swo_start(self, baudrate: float) -> None: - """! @brief Start receiving SWO data at the given baudrate. + """@brief Start receiving SWO data at the given baudrate. Once SWO reception has started, the swo_read() method must be called at regular intervals to receive SWO data. If this is not done, the probe's internal SWO data buffer may overflow @@ -461,11 +461,11 @@ def swo_start(self, baudrate: float) -> None: raise NotImplementedError() def swo_stop(self) -> None: - """! @brief Stop receiving SWO data.""" + """@brief Stop receiving SWO data.""" raise NotImplementedError() def swo_read(self) -> bytearray: - """! @brief Read buffered SWO data from the target. + """@brief Read buffered SWO data from the target. @eturn Bytearray of the received data. May be 0 bytes in length if no SWO data is buffered at the probe. diff --git a/pyocd/probe/jlink_probe.py b/pyocd/probe/jlink_probe.py index 267720878..c4664203b 100644 --- a/pyocd/probe/jlink_probe.py +++ b/pyocd/probe/jlink_probe.py @@ -86,7 +86,7 @@ def get_probe_with_id(cls, unique_id, is_explicit=False): @classmethod def _get_probe_info(cls, serial_number, jlink): - """! @brief Look up and return a JLinkConnectInfo for the probe with matching serial number. + """@brief Look up and return a JLinkConnectInfo for the probe with matching serial number. @param cls The class object. @param serial_number String serial number. Must be the full serial number. @return JLinkConnectInfo object or None if there was no match. @@ -100,7 +100,7 @@ def _get_probe_info(cls, serial_number, jlink): raise cls._convert_exception(exc) from exc def __init__(self, serial_number): - """! @brief Constructor. + """@brief Constructor. @param self The object. @param serial_number String. The J-Link's serial number. """ @@ -198,7 +198,7 @@ def close(self): # Target control functions # ------------------------------------------- # def connect(self, protocol=None): - """! @brief Connect to the target via JTAG or SWD.""" + """@brief Connect to the target via JTAG or SWD.""" # Handle default protocol. if (protocol is None) or (protocol == DebugProbe.Protocol.DEFAULT): protocol = self._default_protocol @@ -244,7 +244,7 @@ def swj_sequence(self, length, bits): self._link.swd_sync() def disconnect(self): - """! @brief Disconnect from the target.""" + """@brief Disconnect from the target.""" try: if self.session.options.get('jlink.power'): self._link.power_off() @@ -370,10 +370,10 @@ def _convert_exception(exc): return exc class JLinkProbePlugin(Plugin): - """! @brief Plugin class for JLinkProbe.""" + """@brief Plugin class for JLinkProbe.""" def should_load(self): - """! @brief Load the J-Link plugin if the J-Link library is available.""" + """@brief Load the J-Link plugin if the J-Link library is available.""" return JLinkProbe._get_jlink() is not None def load(self): @@ -389,7 +389,7 @@ def description(self): @property def options(self): - """! @brief Returns J-Link probe options.""" + """@brief Returns J-Link probe options.""" return [ OptionInfo('jlink.device', str, None, "If this option is set to a supported J-Link device name, then the J-Link will be asked connect " diff --git a/pyocd/probe/picoprobe.py b/pyocd/probe/picoprobe.py index 3f25fc90a..481e984f0 100644 --- a/pyocd/probe/picoprobe.py +++ b/pyocd/probe/picoprobe.py @@ -37,7 +37,7 @@ class PicoLink(object): - """! @brief Wrapper to handle picoprobe USB. + """@brief Wrapper to handle picoprobe USB. Just to hide details of USB and Picoprobe command layer """ @@ -105,7 +105,7 @@ def close(self): @classmethod def enumerate_picoprobes(cls, uid=None) -> List["PicoLink"]: - """! @brief Find and return all Picoprobes """ + """@brief Find and return all Picoprobes """ try: # Use a custom matcher to make sure the probe is a Picoprobe and accessible. return [PicoLink(probe) for probe in libusb_package.find(find_all=True, custom_match=FindPicoprobe(uid))] @@ -114,12 +114,12 @@ def enumerate_picoprobes(cls, uid=None) -> List["PicoLink"]: return [] def q_read_bits(self, bits): - """! @brief Queue a read request for 'bits' bits to the probe """ + """@brief Queue a read request for 'bits' bits to the probe """ # Cannot be called with bits = 0 self._queue_cmd_header(self.PROBE_READ_BITS, bits) def q_write_bits(self, data, bits=None): - """! @brief Queue a write reeust 'bits' bits. + """@brief Queue a write reeust 'bits' bits. @param data Values to be weritten. Either int or iterable yielding bytes (0-255). @param bits How many bits to write. Mandatory if data is int. """ @@ -130,7 +130,7 @@ def q_write_bits(self, data, bits=None): self._queue.extend(data if type(data) is not int else data.to_bytes(count, 'little')) def flush_queue(self): - """! @brief Execute all the queued probe actions""" + """@brief Execute all the queued probe actions""" # Put in the packet header (byte count) self._queue[:self.PKT_HDR_LEN] = array( 'B', self._qulen.to_bytes(4, 'little')) @@ -145,7 +145,7 @@ def flush_queue(self): self._clear_queue() def get_bits(self): - """! @briefExecute all the queued probe actions and return read values""" + """@briefExecute all the queued probe actions and return read values""" self.flush_queue() try: # A single read is enough, as the 8 kB buffer in the Picoprobe can @@ -209,13 +209,13 @@ def product_name(self): # Picoprobe intenal functions # ------------------------------------------- # def _next_id(self): - """! @brief Returns a progressive id for a Picoprobe command""" + """@brief Returns a progressive id for a Picoprobe command""" id = self._id self._id = (self._id + 1) % 0x100 return id def _queue_cmd_header(self, cmd, bits, length=0, id=None): - """! @brief Prepare a header structure in _queue byte array""" + """@brief Prepare a header structure in _queue byte array""" if id is None: id = self._next_id() length += self.CMD_HDR_LEN @@ -235,16 +235,16 @@ def start_queue(self): class FindPicoprobe(object): - """! @brief Custom matcher for Picoprobe to be used in core.find() """ + """@brief Custom matcher for Picoprobe to be used in core.find() """ VID_PID_CLASS = (0x2E8A, 0x0004, 0x00) # Match for a Picoprobe def __init__(self, serial=None): - """! @brief Create a new FindPicoprobe object with an optional serial number""" + """@brief Create a new FindPicoprobe object with an optional serial number""" self._serial = serial def __call__(self, dev): - """! @brief Return True if this is a Picoprobe device, False otherwise""" + """@brief Return True if this is a Picoprobe device, False otherwise""" # Check if vid, pid and the device class are valid ones for Picoprobe. if (dev.idVendor, dev.idProduct, dev.bDeviceClass) != self.VID_PID_CLASS: @@ -292,7 +292,7 @@ def __call__(self, dev): class Picoprobe(DebugProbe): - """! @brief Wraps a Picolink link as a DebugProbe. """ + """@brief Wraps a Picolink link as a DebugProbe. """ # Address of read buffer register in DP. RDBUFF = 0xC @@ -374,7 +374,7 @@ def unique_id(self): @ property def wire_protocol(self): - """! @brief Only valid after connecting.""" + """@brief Only valid after connecting.""" return DebugProbe.Protocol.SWD if self._is_connected else None @ property @@ -397,7 +397,7 @@ def close(self): # Target control functions # ------------------------------------------- # def connect(self, protocol=None): - """! @brief Connect to the target via SWD.""" + """@brief Connect to the target via SWD.""" # Make sure the protocol is supported if (protocol is None) or (protocol == DebugProbe.Protocol.DEFAULT): protocol = DebugProbe.Protocol.SWD @@ -425,7 +425,7 @@ def swj_sequence(self, length, bits): self._link.flush_queue() def swd_sequence(self, sequences): - """! @brief Send a sequences of bits on the SWDIO signal. + """@brief Send a sequences of bits on the SWDIO signal. Each sequence in the _sequences_ parameter is a tuple with 1 or 2 members in this order: - 0: int: number of TCK cycles from 1-64 @@ -636,7 +636,7 @@ def _write_reg(self, addr, APnDP, value): self._link.flush_queue() def _swd_command(self, RnW, APnDP, addr): - """! @brief Builds and queues an SWD command byte plus an ACK read""" + """@brief Builds and queues an SWD command byte plus an ACK read""" cmd = (APnDP << 1) + (RnW << 2) + ((addr << 1) & self.SWD_CMD_A32) cmd |= parity32_high(cmd) >> (32 - 5) cmd |= self.SWD_CMD_START | self.SWD_CMD_STOP | self.SWD_CMD_PARK @@ -681,7 +681,7 @@ def _change_options(self, notification): class PicoprobePlugin(Plugin): - """! @brief Plugin class for Picoprobe.""" + """@brief Plugin class for Picoprobe.""" def load(self): return Picoprobe @@ -696,7 +696,7 @@ def description(self): @ property def options(self): - """! @brief Returns picoprobe options.""" + """@brief Returns picoprobe options.""" return [ OptionInfo(Picoprobe.SAFESWD_OPTION, bool, False, "Use safe but slower SWD transfer functions with Picoprobe.")] diff --git a/pyocd/probe/pydapaccess/cmsis_dap_core.py b/pyocd/probe/pydapaccess/cmsis_dap_core.py index 04013fe1c..d5dbae6d2 100644 --- a/pyocd/probe/pydapaccess/cmsis_dap_core.py +++ b/pyocd/probe/pydapaccess/cmsis_dap_core.py @@ -77,7 +77,7 @@ class Pin: ] class CMSISDAPVersion: - """! @brief Known CMSIS-DAP versions. + """@brief Known CMSIS-DAP versions. The tuple fields are major, minor, patch. Generally, patch release versions are excluded from this list, unless there is a specific reason to know about a particular patch release. @@ -144,7 +144,7 @@ class DAPSWOStatus: DAP_ERROR = 0xff class DAPTransferResponse: - """! Responses to DAP_Transfer and DAP_TransferBlock""" + """Responses to DAP_Transfer and DAP_TransferBlock""" ACK_MASK = 0x07 # Bits [2:0] PROTOCOL_ERROR_MASK = 0x08 # Bit [3] VALUE_MISMATCH_MASK = 0x08 # Bit [4] @@ -156,13 +156,13 @@ class DAPTransferResponse: ACK_NO_ACK = 7 class CMSISDAPProtocol(object): - """! @brief This class implements the CMSIS-DAP wire protocol.""" + """@brief This class implements the CMSIS-DAP wire protocol.""" def __init__(self, interface): self.interface = interface def dap_info(self, id_): - """! @brief Sends the DAP_Info command to read info from the CMSIS-DAP probe. + """@brief Sends the DAP_Info command to read info from the CMSIS-DAP probe. @param self This object. @param id_ One of the @ref pyocd.probe.pydapaccess.dap_access_api.DAPAcessIntf.ID "DAPAcessIntf.ID" constants. @return The `id_` parameter determines the return value data type. For those IDs defined as integer values @@ -374,7 +374,7 @@ def swd_configure(self, turnaround=1, always_send_data_phase=False): return resp[1] def swd_sequence(self, sequences): - """! @brief Send the DAP_SWD_Sequence command. + """@brief Send the DAP_SWD_Sequence command. Each sequence in the _sequences_ parameter is a tuple with 1 or 2 members: - 0: int: number of TCK cycles from 1-64 diff --git a/pyocd/probe/pydapaccess/dap_access_api.py b/pyocd/probe/pydapaccess/dap_access_api.py index d51a8ff03..015db7dc4 100644 --- a/pyocd/probe/pydapaccess/dap_access_api.py +++ b/pyocd/probe/pydapaccess/dap_access_api.py @@ -21,13 +21,13 @@ class DAPAccessIntf(object): class PORT(Enum): - """! @brief Physical access ports""" + """@brief Physical access ports""" DEFAULT = 0 SWD = 1 JTAG = 2 class REG(Enum): - """! @brief Register for DAP access functions""" + """@brief Register for DAP access functions""" DP_0x0 = 0 DP_0x4 = 1 DP_0x8 = 2 @@ -38,7 +38,7 @@ class REG(Enum): AP_0xC = 7 class ID(Enum): - """! @brief Information ID used for call to identify""" + """@brief Information ID used for call to identify""" VENDOR = 1 PRODUCT = 2 SER_NUM = 3 @@ -57,51 +57,51 @@ class ID(Enum): MAX_PACKET_SIZE = 0xff class Error(Exception): - """! @brief Parent of all error DAPAccess can raise""" + """@brief Parent of all error DAPAccess can raise""" pass class DeviceError(Error): - """! @brief Error communicating with device""" + """@brief Error communicating with device""" pass class CommandError(DeviceError): - """! @brief The host debugger reported failure for the given command""" + """@brief The host debugger reported failure for the given command""" pass class TransferError(CommandError): - """! @brief Error occurred with a transfer over SWD or JTAG""" + """@brief Error occurred with a transfer over SWD or JTAG""" pass class TransferTimeoutError(TransferError): - """! @brief A SWD or JTAG timeout occurred""" + """@brief A SWD or JTAG timeout occurred""" pass class TransferFaultError(TransferError): - """! @brief A SWD Fault occurred""" + """@brief A SWD Fault occurred""" pass class TransferProtocolError(TransferError): - """! @brief A SWD protocol error occurred""" + """@brief A SWD protocol error occurred""" pass @staticmethod def get_connected_devices(): - """! @brief Return a list of DAPAccess devices""" + """@brief Return a list of DAPAccess devices""" raise NotImplementedError() @staticmethod def get_device(device_id): - """! @brief Return the DAPAccess device with the give ID""" + """@brief Return the DAPAccess device with the give ID""" raise NotImplementedError() @staticmethod def set_args(arg_list): - """! @brief Set arguments to configure behavior""" + """@brief Set arguments to configure behavior""" raise NotImplementedError() @property def protocol_version(self): - """! @brief CMSIS-DAP protocol version. + """@brief CMSIS-DAP protocol version. The version is represented as 3-tuple with elements, in order, of major version, minor version, and patch version. @@ -120,12 +120,12 @@ def product_name(self): @property def vidpid(self): - """! @brief A tuple of USB VID and PID, in that order.""" + """@brief A tuple of USB VID and PID, in that order.""" raise NotImplementedError() @property def has_swd_sequence(self): - """! @brief Boolean indicating whether the DAP_SWD_Sequence command is supported. + """@brief Boolean indicating whether the DAP_SWD_Sequence command is supported. This property is only valid after the probe is opened. Until then, the value will be None. """ @@ -135,33 +135,33 @@ def has_swd_sequence(self): # Host control functions # ------------------------------------------- # def open(self): - """! @brief Open device and lock it for exclusive access""" + """@brief Open device and lock it for exclusive access""" raise NotImplementedError() def close(self): - """! @brief Close device and unlock it""" + """@brief Close device and unlock it""" raise NotImplementedError() def get_unique_id(self): - """! @brief Get the unique ID of this device which can be used in get_device + """@brief Get the unique ID of this device which can be used in get_device This function is safe to call before open is called. """ raise NotImplementedError() def identify(self, item): - """! @brief Return the requested information for this device""" + """@brief Return the requested information for this device""" raise NotImplementedError() # ------------------------------------------- # # Target control functions # ------------------------------------------- # def connect(self, port=None): - """! @brief Initialize DAP IO pins for JTAG or SWD""" + """@brief Initialize DAP IO pins for JTAG or SWD""" raise NotImplementedError() def configure_swd(self, turnaround=1, always_send_data_phase=False): - """! @brief Modify SWD configuration. + """@brief Modify SWD configuration. @param self @param turnaround Number of turnaround phase clocks, from 1-4. @@ -171,7 +171,7 @@ def configure_swd(self, turnaround=1, always_send_data_phase=False): raise NotImplementedError() def configure_jtag(self, devices_irlen=None): - """! @brief Modify JTAG configuration. + """@brief Modify JTAG configuration. @param self @param devices_irlen Sequence of IR lengths for each device, thus also specifying the @@ -180,7 +180,7 @@ def configure_jtag(self, devices_irlen=None): raise NotImplementedError() def swj_sequence(self, length, bits): - """! @brief Send sequence to activate JTAG or SWD on the target. + """@brief Send sequence to activate JTAG or SWD on the target. @param self @param length Number of bits to transfer on TCK/TMS. @@ -189,7 +189,7 @@ def swj_sequence(self, length, bits): raise NotImplementedError() def swd_sequence(self, sequences) -> Tuple[int, Sequence[bytes]]: - """! @brief Send a sequences of bits on the SWDIO signal. + """@brief Send a sequences of bits on the SWDIO signal. This method sends the DAP_SWD_Sequence CMSIS-DAP command. @@ -208,7 +208,7 @@ def swd_sequence(self, sequences) -> Tuple[int, Sequence[bytes]]: raise NotImplementedError() def jtag_sequence(self, cycles, tms, read_tdo, tdi): - """! @brief Send JTAG sequence. + """@brief Send JTAG sequence. @param self @param cycles Number of TCK cycles, from 1-64. @@ -222,63 +222,63 @@ def jtag_sequence(self, cycles, tms, read_tdo, tdi): raise NotImplementedError() def disconnect(self): - """! @brief Deinitialize the DAP I/O pins""" + """@brief Deinitialize the DAP I/O pins""" raise NotImplementedError() def set_clock(self, frequency): - """! @brief Set the frequency for JTAG and SWD in Hz + """@brief Set the frequency for JTAG and SWD in Hz This function is safe to call before connect is called. """ raise NotImplementedError() def get_swj_mode(self): - """! @brief Return the current port type - SWD or JTAG""" + """@brief Return the current port type - SWD or JTAG""" raise NotImplementedError() def reset(self): - """! @brief Reset the target""" + """@brief Reset the target""" raise NotImplementedError() def assert_reset(self, asserted): - """! @brief Assert or de-assert target reset line""" + """@brief Assert or de-assert target reset line""" raise NotImplementedError() def is_reset_asserted(self): - """! @brief Returns True if the target reset line is asserted or False if de-asserted""" + """@brief Returns True if the target reset line is asserted or False if de-asserted""" raise NotImplementedError() def set_deferred_transfer(self, enable): - """! @brief Allow reads and writes to be buffered for increased speed""" + """@brief Allow reads and writes to be buffered for increased speed""" raise NotImplementedError() def flush(self): - """! @brief Write out all unsent commands""" + """@brief Write out all unsent commands""" raise NotImplementedError() def vendor(self, index, data=None): - """! @brief Send a vendor specific command""" + """@brief Send a vendor specific command""" raise NotImplementedError() def has_swo(self): - """! @brief Returns bool indicating whether the link supports SWO.""" + """@brief Returns bool indicating whether the link supports SWO.""" raise NotImplementedError() def swo_configure(self, enabled, rate): - """! @brief Enable or disable SWO and set the baud rate.""" + """@brief Enable or disable SWO and set the baud rate.""" raise NotImplementedError() def swo_control(self, start): - """! @brief Pass True to start recording SWO data, False to stop.""" + """@brief Pass True to start recording SWO data, False to stop.""" raise NotImplementedError() def get_swo_status(self): - """! @brief Returns a 2-tuple with a status mask at index 0, and the number of buffered + """@brief Returns a 2-tuple with a status mask at index 0, and the number of buffered SWO data bytes at index 1.""" raise NotImplementedError() def swo_read(self, count=None): - """! @brief Read buffered SWO data from the target. + """@brief Read buffered SWO data from the target. The count parameter is optional. If provided, it is the number of bytes to read, which must be less than the packet size. @@ -292,17 +292,17 @@ def swo_read(self, count=None): # DAP Access functions # ------------------------------------------- # def write_reg(self, reg_id, value, dap_index=0): - """! @brief Write a single word to a DP or AP register""" + """@brief Write a single word to a DP or AP register""" raise NotImplementedError() def read_reg(self, reg_id, dap_index=0, now=True): - """! @brief Read a single word to a DP or AP register""" + """@brief Read a single word to a DP or AP register""" raise NotImplementedError() def reg_write_repeat(self, num_repeats, reg_id, data_array, dap_index=0): - """! @brief Write one or more words to the same DP or AP register""" + """@brief Write one or more words to the same DP or AP register""" raise NotImplementedError() def reg_read_repeat(self, num_repeats, reg_id, dap_index=0, now=True): - """! @brief Read one or more words from the same DP or AP register""" + """@brief Read one or more words from the same DP or AP register""" raise NotImplementedError() diff --git a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py index 99d5f3a8a..25ae20b00 100644 --- a/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py +++ b/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py @@ -63,7 +63,7 @@ class SWOStatus: TRACE.setLevel(logging.CRITICAL) def _get_interfaces(): - """! @brief Get the connected USB devices""" + """@brief Get the connected USB devices""" # Get CMSIS-DAPv1 interfaces. v1_interfaces = INTERFACE[USB_BACKEND].get_all_connected_interfaces() @@ -88,12 +88,12 @@ def _get_interfaces(): def _get_unique_id(interface): - """! @brief Get the unique id from an interface""" + """@brief Get the unique id from an interface""" return interface.get_serial_number() class _Transfer(object): - """! @brief A wrapper object representing a command invoked by the layer above. + """@brief A wrapper object representing a command invoked by the layer above. The transfer class contains a logical register read or a block of reads to the same register. @@ -119,12 +119,12 @@ def __init__(self, daplink, dap_index, transfer_count, self._error = None def get_data_size(self): - """! @brief Get the size in bytes of the return value of this transfer + """@brief Get the size in bytes of the return value of this transfer """ return self._size_bytes def add_response(self, data): - """! @brief Add data read from the remote device to this object. + """@brief Add data read from the remote device to this object. The size of data added must match exactly the size that get_data_size returns. @@ -138,13 +138,13 @@ def add_response(self, data): self._result = result def add_error(self, error): - """! @brief Attach an exception to this transfer rather than data. + """@brief Attach an exception to this transfer rather than data. """ assert isinstance(error, Exception) self._error = error def get_result(self): - """! @brief Get the result of this transfer. + """@brief Get the result of this transfer. """ while self._result is None: if len(self.daplink._commands_to_read) > 0: @@ -164,7 +164,7 @@ def get_result(self): return self._result class _Command(object): - """! @brief Wrapper object representing a command sent to the layer below (ex. USB). + """@brief Wrapper object representing a command sent to the layer below (ex. USB). This class wraps the physical commands DAP_Transfer and DAP_TransferBlock to provide a uniform way to build the command to most efficiently transfer @@ -195,7 +195,7 @@ def uid(self) -> int: return self._id def _get_free_transfers(self, blockAllowed, isRead): - """! @brief Return the number of available read or write transfers. + """@brief Return the number of available read or write transfers. """ if blockAllowed: # DAP_TransferBlock request packet: @@ -274,12 +274,12 @@ def get_full(self): (self._get_free_transfers(self._block_allowed, False) == 0) def get_empty(self): - """! @brief Return True if no transfers have been added to this packet + """@brief Return True if no transfers have been added to this packet """ return len(self._data) == 0 def add(self, count, request, data, dap_index): - """! @brief Add a single or block register transfer operation to this command + """@brief Add a single or block register transfer operation to this command """ assert self._data_encoded is False if self._dap_index is None: @@ -303,7 +303,7 @@ def add(self, count, request, data, dap_index): self._block_allowed) def _encode_transfer_data(self): - """! @brief Encode this command into a byte array that can be sent + """@brief Encode this command into a byte array that can be sent The data returned by this function is a bytearray in the format that of a DAP_Transfer CMSIS-DAP command. @@ -337,7 +337,7 @@ def _encode_transfer_data(self): return buf[:pos] def _check_response(self, response): - """! @brief Check the response status byte from CMSIS-DAP transfer commands. + """@brief Check the response status byte from CMSIS-DAP transfer commands. The ACK bits [2:0] and the protocol error bit are checked. If any error is indicated, the appropriate exception is raised. An exception is also raised for unrecognised ACK @@ -367,7 +367,7 @@ def _check_response(self, response): raise DAPAccessIntf.TransferError("SWD protocol error") def _decode_transfer_data(self, data): - """! @brief Take a byte array and extract the data from it + """@brief Take a byte array and extract the data from it Decode the response returned by a DAP_Transfer CMSIS-DAP command and return it as an array of bytes. @@ -389,7 +389,7 @@ def _decode_transfer_data(self, data): return data[3:3 + 4 * self._read_count] def _encode_transfer_block_data(self): - """! @brief Encode this command into a byte array that can be sent + """@brief Encode this command into a byte array that can be sent The data returned by this function is a bytearray in the format that of a DAP_TransferBlock CMSIS-DAP command. @@ -428,7 +428,7 @@ def _encode_transfer_block_data(self): return buf[:pos] def _decode_transfer_block_data(self, data): - """! @brief Take a byte array and extract the data from it + """@brief Take a byte array and extract the data from it Decode the response returned by a DAP_TransferBlock CMSIS-DAP command and return it as an array of bytes. @@ -451,7 +451,7 @@ def _decode_transfer_block_data(self, data): return data[4:4 + 4 * self._read_count] def encode_data(self): - """! @brief Encode this command into a byte array that can be sent + """@brief Encode this command into a byte array that can be sent The actual command this is encoded into depends on the data that was added. @@ -465,7 +465,7 @@ def encode_data(self): return data def decode_data(self, data): - """! @brief Decode the response data + """@brief Decode the response data """ assert self.get_empty() is False assert self._data_encoded is True @@ -476,7 +476,7 @@ def decode_data(self, data): return data class DAPAccessCMSISDAP(DAPAccessIntf): - """! @brief An implementation of the DAPAccessIntf layer for DAPLink boards + """@brief An implementation of the DAPAccessIntf layer for DAPLink boards @internal All methods that use the CMSISDAPProtocol instance must be locked and must flush the command queue @@ -488,7 +488,7 @@ class DAPAccessCMSISDAP(DAPAccessIntf): # ------------------------------------------- # @staticmethod def get_connected_devices(): - """! @brief Return an array of all mbed boards connected + """@brief Return an array of all mbed boards connected """ all_daplinks = [] all_interfaces = _get_interfaces() @@ -588,7 +588,7 @@ def __init__(self, unique_id, interface=None): @property def protocol_version(self) -> VersionTuple: - """! @brief Tuple of CMSIS-DAP protocol version. + """@brief Tuple of CMSIS-DAP protocol version. @return 3-tuple consisting of (major, minor, micro) version of the CMSIS-DAP protocol implemented by the debug probe. """ @@ -596,7 +596,7 @@ def protocol_version(self) -> VersionTuple: @property def firmware_version(self) -> Optional[str]: - """! @brief A string of the product firmware version, or None. + """@brief A string of the product firmware version, or None. Only probes supporting CMSIS-DAP protocol v2.1 or later can return their firmware version. """ @@ -612,7 +612,7 @@ def product_name(self): @property def vidpid(self): - """! @brief A tuple of USB VID and PID, in that order.""" + """@brief A tuple of USB VID and PID, in that order.""" return self._vidpid @property @@ -620,15 +620,15 @@ def has_swd_sequence(self): return self._cmsis_dap_version >= CMSISDAPVersion.V1_2_0 def lock(self): - """! @brief Lock the interface.""" + """@brief Lock the interface.""" self._lock.acquire() def unlock(self): - """! @brief Unlock the interface.""" + """@brief Unlock the interface.""" self._lock.release() def _read_protocol_version(self): - """! Determine the CMSIS-DAP protocol version.""" + """Determine the CMSIS-DAP protocol version.""" # The fallback version to use when version parsing fails depends on whether v2 bulk endpoints are used # (unfortunately conflating transport with protocol). fallback_protocol_version = (CMSISDAPVersion.V1_0_0, CMSISDAPVersion.V2_0_0)[self._interface.is_bulk] @@ -764,7 +764,7 @@ def get_swj_mode(self): return self._dap_port def set_deferred_transfer(self, enable): - """! @brief Allow transfers to be delayed and buffered + """@brief Allow transfers to be delayed and buffered By default deferred transfers are turned on. When off, all reads and writes will be completed by the time the function returns. @@ -1021,7 +1021,7 @@ def reg_read_repeat_cb(): # ------------------------------------------- # def _init_deferred_buffers(self): - """! @brief Initialize or reinitialize all the deferred transfer buffers + """@brief Initialize or reinitialize all the deferred transfer buffers Calling this method will drop all pending transactions so use with care. @@ -1041,7 +1041,7 @@ def _init_deferred_buffers(self): @locked def _read_packet(self): - """! @brief Reads and decodes a single packet + """@brief Reads and decodes a single packet Reads a single packet from the device and stores the data from it in the current Command @@ -1086,7 +1086,7 @@ def _read_packet(self): @locked def _send_packet(self): - """! @brief Send a single packet to the interface + """@brief Send a single packet to the interface This function guarantees that the number of packets that are stored in daplink's buffer (the number of @@ -1115,7 +1115,7 @@ def _send_packet(self): @locked def _write(self, dap_index, transfer_count, transfer_request, transfer_data): - """! @brief Write one or more commands + """@brief Write one or more commands """ assert dap_index == 0 # dap index currently unsupported assert isinstance(transfer_count, int) @@ -1166,7 +1166,7 @@ def _write(self, dap_index, transfer_count, @locked def _abort_all_transfers(self, exception): - """! @brief Abort any ongoing transfers and clear all buffers + """@brief Abort any ongoing transfers and clear all buffers """ pending_reads = len(self._commands_to_read) TRACE.debug("aborting %d pending reads after exception %r", pending_reads, exception) diff --git a/pyocd/probe/shared_probe_proxy.py b/pyocd/probe/shared_probe_proxy.py index bdbc46378..4831ecc3b 100644 --- a/pyocd/probe/shared_probe_proxy.py +++ b/pyocd/probe/shared_probe_proxy.py @@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__) class SharedDebugProbeProxy(object): - """! @brief Proxy for a DebugProbe that allows it to be shared by multiple clients. + """@brief Proxy for a DebugProbe that allows it to be shared by multiple clients. The main purpose of this class is to keep track of the number of times the probe has been opened and connected, and to perform checks to ensure that probes don't interfere with each @@ -38,7 +38,7 @@ def __init__(self, probe): @property def session(self): - """! @brief Session associated with this probe.""" + """@brief Session associated with this probe.""" return self._session @session.setter @@ -77,7 +77,7 @@ def swj_sequence(self, length, bits): self._probe.swj_sequence(length, bits) def __getattr__(self, name): - """! @brief Redirect to underlying probe object methods.""" + """@brief Redirect to underlying probe object methods.""" if hasattr(self._probe, name): return getattr(self._probe, name) else: diff --git a/pyocd/probe/stlink/detect/base.py b/pyocd/probe/stlink/detect/base.py index 54b1596ec..057d7c270 100644 --- a/pyocd/probe/stlink/detect/base.py +++ b/pyocd/probe/stlink/detect/base.py @@ -102,7 +102,7 @@ def _update_device_from_htm(self, device): # Private functions supporting API def _read_htm_ids(self, mount_point): - """! Function scans mbed.htm to get information about TargetID. + """Function scans mbed.htm to get information about TargetID. @param mount_point mbed mount point (disk / drive letter) @return Function returns targetID, in case of failure returns None. @details Note: This function should be improved to scan variety of boards' @@ -121,7 +121,7 @@ def _htm_lines(self, mount_point): return f.readlines() def _target_id_from_htm(self, line): - """! Extract Target id from htm line. + """Extract Target id from htm line. @return Target id or None """ # Detecting modern mbed.htm file format @@ -138,13 +138,13 @@ def _target_id_from_htm(self, line): return None def mount_point_ready(self, path): - """! Check if a mount point is ready for file operations + """Check if a mount point is ready for file operations """ return exists(path) and isdir(path) @staticmethod def _run_cli_process(cmd, shell=True): - """! Runs command as a process and return stdout, stderr and ret code + """Runs command as a process and return stdout, stderr and ret code @param cmd Command to execute @return Tuple of (stdout, stderr, returncode) """ diff --git a/pyocd/probe/stlink/detect/factory.py b/pyocd/probe/stlink/detect/factory.py index 95b3f89a3..798a6fb38 100644 --- a/pyocd/probe/stlink/detect/factory.py +++ b/pyocd/probe/stlink/detect/factory.py @@ -20,7 +20,7 @@ from . import base # noqa: F401 # lgtm[py/unused-import] def create_mbed_detector(**kwargs): - """! Factory used to create host OS specific mbed-lstools object + """Factory used to create host OS specific mbed-lstools object :param kwargs: keyword arguments to pass along to the constructors @return Returns MbedLsTools object or None if host OS is not supported diff --git a/pyocd/probe/stlink/detect/linux.py b/pyocd/probe/stlink/detect/linux.py index c237235fa..0213db790 100644 --- a/pyocd/probe/stlink/detect/linux.py +++ b/pyocd/probe/stlink/detect/linux.py @@ -37,7 +37,7 @@ class StlinkDetectLinuxGeneric(StlinkDetectBase): """ def __init__(self, **kwargs): - """! ctor + """ctor """ StlinkDetectBase.__init__(self, **kwargs) self.nlp = re.compile(r"(pci|usb)-[0-9a-zA-Z:_-]*_(?P[0-9a-zA-Z]*)-.*$") @@ -60,7 +60,7 @@ def find_candidates(self): ] def _dev_by_id(self, device_type): - """! Get a dict, USBID -> device, for a device class + """Get a dict, USBID -> device, for a device class @param device_type The type of devices to search. For exmaple, "serial" looks for all serial devices connected to this computer @return A dict: Device USBID -> device file in /dev @@ -84,7 +84,7 @@ def _dev_by_id(self, device_type): return {} def _fat_mounts(self): - """! Lists mounted devices with vfat file system (potential mbeds) + """Lists mounted devices with vfat file system (potential mbeds) @result Returns list of all mounted vfat devices @details Uses Linux shell command: 'mount' """ @@ -97,7 +97,7 @@ def _fat_mounts(self): yield match.group("dev"), match.group("dir") def _hex_ids(self, dev_list): - """! Build a USBID map for a device list + """Build a USBID map for a device list @param disk_list List of disks in a system with USBID decoration @return Returns map USBID -> device file in /dev @details Uses regular expressions to get a USBID (TargeTIDs) a "by-id" diff --git a/pyocd/probe/stlink/detect/windows.py b/pyocd/probe/stlink/detect/windows.py index e8c09e010..57866905a 100644 --- a/pyocd/probe/stlink/detect/windows.py +++ b/pyocd/probe/stlink/detect/windows.py @@ -68,7 +68,7 @@ def _is_mbed_volume(volume_string): def _get_cached_mounted_points(): - """! Get the volumes present on the system + """Get the volumes present on the system @return List of mount points and their associated target id Ex. [{ 'mount_point': 'D:', 'target_id_usb_id': 'xxxx'}, ...] """ @@ -193,7 +193,7 @@ def _determine_subdevice_capability(key): def _vid_pid_path_to_usb_info(vid_pid_path): - """! Provide the vendor ID and product ID of a device based on its entry in the registry + """Provide the vendor ID and product ID of a device based on its entry in the registry @return Returns {'vendor_id': '', 'product': ''} @details If the vendor ID or product ID can't be determined, they will be returned as None. @@ -216,21 +216,21 @@ def _vid_pid_path_to_usb_info(vid_pid_path): def _iter_keys_as_str(key): - """! Iterate over subkeys of a key returning subkey as string + """Iterate over subkeys of a key returning subkey as string """ for i in range(winreg.QueryInfoKey(key)[0]): yield winreg.EnumKey(key, i) def _iter_keys(key): - """! Iterate over subkeys of a key + """Iterate over subkeys of a key """ for i in range(winreg.QueryInfoKey(key)[0]): yield winreg.OpenKey(key, winreg.EnumKey(key, i)) def _iter_vals(key): - """! Iterate over values of a key + """Iterate over values of a key """ for i in range(winreg.QueryInfoKey(key)[1]): yield winreg.EnumValue(key, i) @@ -482,7 +482,7 @@ def find_candidates(self): return final_candidates def mount_point_ready(self, path): - """! Check if a mount point is ready for file operations + """Check if a mount point is ready for file operations @return Returns True if the given path exists, False otherwise @details Calling the Windows command `dir` instead of using the python `os.path.exists`. The latter causes a Python error box to appear claiming diff --git a/pyocd/probe/stlink/stlink.py b/pyocd/probe/stlink/stlink.py index 6129335a5..cf73680d3 100644 --- a/pyocd/probe/stlink/stlink.py +++ b/pyocd/probe/stlink/stlink.py @@ -31,12 +31,10 @@ LOG = logging.getLogger(__name__) class STLink(object): - """! - @brief STLink V2 and V3 command-level interface. + """@brief STLink V2 and V3 command-level interface. """ class Protocol(Enum): - """! - @brief Protocol options to pass to STLink.enter_debug() method. + """@brief Protocol options to pass to STLink.enter_debug() method. """ SWD = 1 JTAG = 2 @@ -224,7 +222,7 @@ def target_voltage(self): @property def supports_banked_dp(self): - """! @brief Whether the firmware version supports accessing banked DP registers. + """@brief Whether the firmware version supports accessing banked DP registers. This property is not valid until the connection is opened. """ @@ -471,7 +469,7 @@ def write_mem8(self, addr, data, apsel): self._write_mem(addr, data, Commands.JTAG_WRITEMEM_8BIT, self._device.max_packet_size, apsel) def _check_dp_bank(self, port, addr): - """! @brief Check if attempting to access a banked DP register with a firmware version that + """@brief Check if attempting to access a banked DP register with a firmware version that doesn't support that. """ if ((port == self.DP_PORT) and ((addr & 0xf0) != 0) and not self.supports_banked_dp): diff --git a/pyocd/probe/stlink/usb.py b/pyocd/probe/stlink/usb.py index 84e5680f4..c9a400af2 100644 --- a/pyocd/probe/stlink/usb.py +++ b/pyocd/probe/stlink/usb.py @@ -40,7 +40,7 @@ class STLinkInfo(NamedTuple): swv_ep: int class STLinkUSBInterface: - """!@brief Provides low-level USB enumeration and transfers for STLinkV2/3 devices.""" + """@brief Provides low-level USB enumeration and transfers for STLinkV2/3 devices.""" ## Command packet size. CMD_SIZE = 16 diff --git a/pyocd/probe/stlink_probe.py b/pyocd/probe/stlink_probe.py index add039a98..2a8efb20e 100644 --- a/pyocd/probe/stlink_probe.py +++ b/pyocd/probe/stlink_probe.py @@ -31,7 +31,7 @@ from ..utility import conversion class StlinkProbe(DebugProbe): - """! @brief Wraps an STLink as a DebugProbe.""" + """@brief Wraps an STLink as a DebugProbe.""" @classmethod def get_all_connected_probes(cls, unique_id: Optional[str] = None, @@ -245,14 +245,14 @@ def swo_read(self): return self._link.swo_read() class STLinkMemoryInterface(MemoryInterface): - """! @brief Concrete memory interface for a single AP.""" + """@brief Concrete memory interface for a single AP.""" def __init__(self, link, apsel): self._link = link self._apsel = apsel def write_memory(self, addr, data, transfer_size=32): - """! @brief Write a single memory location. + """@brief Write a single memory location. By default the transfer size is a word. """ @@ -266,7 +266,7 @@ def write_memory(self, addr, data, transfer_size=32): self._link.write_mem8(addr, [data], self._apsel) def read_memory(self, addr, transfer_size=32, now=True): - """! @brief Read a memory location. + """@brief Read a memory location. By default, a word will be read. """ @@ -292,7 +292,7 @@ def read_memory_block32(self, addr, size): return conversion.byte_list_to_u32le_list(self._link.read_mem32(addr, size * 4, self._apsel)) class StlinkProbePlugin(Plugin): - """! @brief Plugin class for StlLinkProbe.""" + """@brief Plugin class for StlLinkProbe.""" def should_load(self): # TODO only load the plugin when libusb is available diff --git a/pyocd/probe/swj.py b/pyocd/probe/swj.py index 6876d6060..b8e51f760 100644 --- a/pyocd/probe/swj.py +++ b/pyocd/probe/swj.py @@ -22,7 +22,7 @@ LOG = logging.getLogger(__name__) class SWJSequenceSender(object): - """! @brief Class to send canned SWJ sequences. + """@brief Class to send canned SWJ sequences. The primary usage of this class is for sending the SWJ sequences to switch between JTAG and SWD protocols in the Arm ADI SWJ-DP. The select_protocol() method is used for this purpose. @@ -44,7 +44,7 @@ def use_dormant(self, flag): self._use_dormant = flag def select_protocol(self, protocol): - """! @brief Send SWJ sequence to select chosen wire protocol. + """@brief Send SWJ sequence to select chosen wire protocol. The `use_dormant` property determines whether dormant mode will be used for the protocol selection, or if the deprecated ADIv5.0 SWJ sequences will be used. @@ -67,14 +67,14 @@ def select_protocol(self, protocol): assert False, "unhandled protocol %s in SWJSequenceSender" % protocol def jtag_enter_test_logic_reset(self): - """! @brief Execute at least >5 TCK cycles with TMS high to enter the Test-Logic-Reset state. + """@brief Execute at least >5 TCK cycles with TMS high to enter the Test-Logic-Reset state. The line_reset() method can be used instead of this method, but takes a little longer to send. """ self._probe.swj_sequence(8, 0xff) def line_reset(self): - """! @brief Execute a line reset for both SWD and JTAG. + """@brief Execute a line reset for both SWD and JTAG. For JTAG, >=5 TCK cycles with TMS high enters the Test-Logic-Reset state.
For SWD, >=50 cycles with SWDIO high performs a line reset. @@ -82,26 +82,26 @@ def line_reset(self): self._probe.swj_sequence(51, 0xffffffffffffff) def selection_alert(self): - """! @brief Send the dormant selection alert sequence. + """@brief Send the dormant selection alert sequence. The 128-bit selection alert is prefixed with 8 cycles of SWDIOTMS high. """ self._probe.swj_sequence(136, 0x19bc0ea2e3ddafe986852d956209f392ff) def jtag_activation_code(self): - """! @brief 4-bit SWDIOTMS cycles low + 8-bit JTAG activation code.""" + """@brief 4-bit SWDIOTMS cycles low + 8-bit JTAG activation code.""" self._probe.swj_sequence(12, 0x00a0) def swd_activation_code(self): - """! @brief 4-bit SWDIOTMS cycles low + 8-bit SWD activation code.""" + """@brief 4-bit SWDIOTMS cycles low + 8-bit SWD activation code.""" self._probe.swj_sequence(12, 0x01a0) def idle_cycles(self, cycles): - """! @brief Send SWD idle cycles with SWDIOTMS low.""" + """@brief Send SWD idle cycles with SWDIOTMS low.""" self._probe.swj_sequence(cycles, 0) def jtag_to_dormant(self): - """! @brief Send the JTAG to DS select sequence. + """@brief Send the JTAG to DS select sequence. Sends the recommended 31-bit JTAG-to-DS select sequence of 0x33bbbbba (LSB-first) on SWDIOTMS. See ADIv6 section B5.3.2. @@ -112,7 +112,7 @@ def jtag_to_dormant(self): self._probe.swj_sequence(39, 0x33bbbbba) def swd_to_dormant(self): - """! @brief Send the SWD to DS sequence. + """@brief Send the SWD to DS sequence. Sends the 16-bit SWD-to-DS select sequence of 0xe3bc (LSB-first) on SWDIOTMS. See ADIv6 section B5.3.3. @@ -121,7 +121,7 @@ def swd_to_dormant(self): self._probe.swj_sequence(16, 0xe3bc) def dormant_to_swd(self): - """! @brief Perform the dormant mode to SWD transition sequence.""" + """@brief Perform the dormant mode to SWD transition sequence.""" # 8 SWDIOTMS cycles high + 128-bit selection alert sequence. self.selection_alert() @@ -136,7 +136,7 @@ def dormant_to_swd(self): self.idle_cycles(2) def dormant_to_jtag(self): - """! @brief Perform the dormant mode to JTAG transition sequence.""" + """@brief Perform the dormant mode to JTAG transition sequence.""" # 8 SWDIOTMS cycles high + 128-bit selection alert sequence. self.selection_alert() @@ -146,7 +146,7 @@ def dormant_to_jtag(self): self.jtag_enter_test_logic_reset() def switch_to_swd(self): - """! @brief Send SWJ sequence to select SWD.""" + """@brief Send SWJ sequence to select SWD.""" # Ensure current debug interface is in reset state. A full line reset is used here instead # of the shorter JTAG TLR to support the case where the device is already in SWD mode. @@ -170,7 +170,7 @@ def switch_to_swd(self): self._probe.swj_sequence(8, 0x00) # At least 2 idle cycles (SWDIO/TMS Low) def switch_to_jtag(self): - """! @brief Send SWJ sequence to select JTAG.""" + """@brief Send SWJ sequence to select JTAG.""" # Ensure current debug interface is in reset state, for either SWD or JTAG. self.line_reset() diff --git a/pyocd/probe/tcp_client_probe.py b/pyocd/probe/tcp_client_probe.py index 8427459e4..cfdd02542 100644 --- a/pyocd/probe/tcp_client_probe.py +++ b/pyocd/probe/tcp_client_probe.py @@ -32,7 +32,7 @@ TRACE.setLevel(logging.CRITICAL) class TCPClientProbe(DebugProbe): - """! @brief Probe class that connects to a debug probe server. + """@brief Probe class that connects to a debug probe server. The protocol is a one-line JSON request and response form. @@ -63,7 +63,7 @@ class TCPClientProbe(DebugProbe): PROTOCOL_VERSION = 1 class StatusCode: - """! @brief Constants for errors reported from the server.""" + """@brief Constants for errors reported from the server.""" GENERAL_ERROR = 1 PROBE_DISCONNECTED = 2 PROBE_ERROR = 3 @@ -102,7 +102,7 @@ def get_probe_with_id(cls, unique_id, is_explicit=False): return cls(unique_id) if is_explicit else None def __init__(self, unique_id): - """! @brief Constructor.""" + """@brief Constructor.""" super(TCPClientProbe, self).__init__() hostname, port = self._extract_address(unique_id) self._uid = f"remote:{hostname}:{port}" @@ -142,13 +142,13 @@ def capabilities(self): @property def request_id(self): - """! @brief Generate a new request ID.""" + """@brief Generate a new request ID.""" rid = self._request_id self._request_id += 1 return rid def _perform_request_without_raise(self, request: str, *args: Any) -> Tuple[Any, Optional[BaseException]]: - """! Execute a request-reply transaction with the server. + """Execute a request-reply transaction with the server. The return value is a 2-tuple consisting of the optional result from the request and an optional exception object. The latter is only non-None if the request failed and a non-zero status code was @@ -356,7 +356,7 @@ def swo_read(self): ##@} class RemoteMemoryInterface(MemoryInterface): - """! @brief Local proxy for a remote memory interface.""" + """@brief Local proxy for a remote memory interface.""" def __init__(self, remote_probe, handle): self._remote_probe = remote_probe @@ -390,7 +390,7 @@ def read_memory_block8(self, addr, size): return self._remote_probe._perform_request('read_block8', self._handle, addr, size) class TCPClientProbePlugin(Plugin): - """! @brief Plugin class for TCPClientProbePlugin.""" + """@brief Plugin class for TCPClientProbePlugin.""" def load(self): return TCPClientProbe diff --git a/pyocd/probe/tcp_probe_server.py b/pyocd/probe/tcp_probe_server.py index 887c8cc8f..3954ca442 100644 --- a/pyocd/probe/tcp_probe_server.py +++ b/pyocd/probe/tcp_probe_server.py @@ -102,7 +102,7 @@ def __init__( self._server.server_bind() def start(self) -> None: - """! @brief Start the server thread and begin listening. + """@brief Start the server thread and begin listening. Returns once the server thread has begun executing. """ @@ -112,7 +112,7 @@ def start(self) -> None: sleep(0.005) def stop(self) -> None: - """! @brief Shut down the server. + """@brief Shut down the server. Any open connections will be forcibly closed. This function does not return until the server thread has exited. @@ -122,12 +122,12 @@ def stop(self) -> None: @property def is_running(self) -> bool: - """! @brief Whether the server thread is running.""" + """@brief Whether the server thread is running.""" return self._is_running @property def port(self) -> int: - """! @brief The server's port. + """@brief The server's port. If port 0 was specified in the constructor, then, after start() is called, this will reflect the actual port on which the server is listening. @@ -135,7 +135,7 @@ def port(self) -> int: return self._port def run(self) -> None: - """! @brief The server thread implementation.""" + """@brief The server thread implementation.""" self._did_start = True self._is_running = True @@ -149,7 +149,7 @@ def run(self) -> None: self._is_running = False class TCPProbeServer(ThreadingTCPServer): - """! @brief TCP server subclass that carries the session and probe being served.""" + """@brief TCP server subclass that carries the session and probe being served.""" # Change the default SO_REUSEADDR setting. allow_reuse_address = True @@ -173,8 +173,7 @@ def handle_error(self, request, client_address): exc_info=self._session.log_tracebacks) class DebugProbeRequestHandler(StreamRequestHandler): - """! - @brief Probe server request handler. + """@brief Probe server request handler. This class implements the server side for the remote probe protocol. @@ -202,7 +201,7 @@ class DebugProbeRequestHandler(StreamRequestHandler): PROTOCOL_VERSION = 1 class StatusCode: - """! @brief Constants for errors reported from the server.""" + """@brief Constants for errors reported from the server.""" GENERAL_ERROR = 1 PROBE_DISCONNECTED = 2 PROBE_ERROR = 3 @@ -379,7 +378,7 @@ def handle(self): raise def _get_exception_status_code(self, err): - """! @brief Convert an exception class into a status code.""" + """@brief Convert an exception class into a status code.""" # Must test the exception class in order of specific to general. if isinstance(err, exceptions.ProbeDisconnected): return self.StatusCode.PROBE_DISCONNECTED diff --git a/pyocd/rtos/argon.py b/pyocd/rtos/argon.py index 263c9fa1e..2ba15638e 100644 --- a/pyocd/rtos/argon.py +++ b/pyocd/rtos/argon.py @@ -68,7 +68,7 @@ def __iter__(self): is_valid = False class ArgonThreadContext(DebugContext): - """! @brief Thread context for Argon.""" + """@brief Thread context for Argon.""" # SP is handled specially, so it is not in these dicts. @@ -225,7 +225,7 @@ def read_core_registers_raw(self, reg_list): return reg_vals class ArgonThread(TargetThread): - """! @brief Base class representing a thread on the target.""" + """@brief Base class representing a thread on the target.""" UNKNOWN = 0 SUSPENDED = 1 @@ -329,7 +329,7 @@ def __repr__(self): return str(self) class ArgonThreadProvider(ThreadProvider): - """! @brief Base class for RTOS support plugins.""" + """@brief Base class for RTOS support plugins.""" def __init__(self, target): super(ArgonThreadProvider, self).__init__(target) @@ -444,7 +444,7 @@ def get_is_running(self): return (flags & IS_RUNNING_MASK) != 0 class ArgonTraceEvent(events.TraceEvent): - """! @brief Argon kernel trace event.""" + """@brief Argon kernel trace event.""" kArTraceThreadSwitch = 1 # 2 value: 0=previous thread's new state, 1=new thread id kArTraceThreadCreated = 2 # 1 value @@ -486,7 +486,7 @@ def __str__(self): return "[{}] Argon: {}".format(self.timestamp, desc) class ArgonTraceEventFilter(TraceEventFilter): - """! @brief Trace event filter to identify Argon kernel trace events sent via ITM. + """@brief Trace event filter to identify Argon kernel trace events sent via ITM. As Argon kernel trace events are identified, the ITM trace events are replaced with instances of ArgonTraceEvent. @@ -521,7 +521,7 @@ def filter(self, event): return event class ArgonPlugin(Plugin): - """! @brief Plugin class for the Argon RTOS.""" + """@brief Plugin class for the Argon RTOS.""" def load(self): return ArgonThreadProvider diff --git a/pyocd/rtos/common.py b/pyocd/rtos/common.py index 31c6d4297..3dba84fd7 100644 --- a/pyocd/rtos/common.py +++ b/pyocd/rtos/common.py @@ -27,7 +27,7 @@ EXC_RETURN_EXT_FRAME_MASK = (1 << 4) def read_c_string(context, ptr): - """! @brief Reads a null-terminated C string from the target.""" + """@brief Reads a null-terminated C string from the target.""" if ptr == 0: return "" @@ -62,7 +62,7 @@ def read_c_string(context, ptr): return s class HandlerModeThread(TargetThread): - """! @brief Class representing the handler mode.""" + """@brief Class representing the handler mode.""" UNIQUE_ID = 2 diff --git a/pyocd/rtos/freertos.py b/pyocd/rtos/freertos.py index 621a68ac7..2fbdf24f5 100644 --- a/pyocd/rtos/freertos.py +++ b/pyocd/rtos/freertos.py @@ -66,7 +66,7 @@ def __iter__(self): node = 0 class FreeRTOSThreadContext(DebugContext): - """! @brief Thread context for FreeRTOS.""" + """@brief Thread context for FreeRTOS.""" # SP/PSP are handled specially, so it is not in these dicts. @@ -233,7 +233,7 @@ def read_core_registers_raw(self, reg_list): return reg_vals class FreeRTOSThread(TargetThread): - """! @brief A FreeRTOS task.""" + """@brief A FreeRTOS task.""" RUNNING = 1 READY = 2 @@ -310,7 +310,7 @@ def __repr__(self): return str(self) class FreeRTOSThreadProvider(ThreadProvider): - """! @brief Thread provider for FreeRTOS.""" + """@brief Thread provider for FreeRTOS.""" ## Required FreeRTOS symbols. FREERTOS_SYMBOLS = [ @@ -534,7 +534,7 @@ def _get_elf_symbol_size(self, name, addr, calculated_size): return calculated_size class FreeRTOSPlugin(Plugin): - """! @brief Plugin class for FreeRTOS.""" + """@brief Plugin class for FreeRTOS.""" def load(self): return FreeRTOSThreadProvider diff --git a/pyocd/rtos/provider.py b/pyocd/rtos/provider.py index 31296427d..a3d922804 100644 --- a/pyocd/rtos/provider.py +++ b/pyocd/rtos/provider.py @@ -19,7 +19,7 @@ LOG = logging.getLogger(__name__) class TargetThread(object): - """! @brief Base class representing a thread on the target.""" + """@brief Base class representing a thread on the target.""" def __init__(self): pass @@ -45,7 +45,7 @@ def context(self): raise NotImplementedError() class ThreadProvider(object): - """! @brief Base class for RTOS support plugins.""" + """@brief Base class for RTOS support plugins.""" def __init__(self, target): self._target = target @@ -64,8 +64,7 @@ def _lookup_symbols(self, symbolList, symbolProvider): return syms def init(self, symbolProvider): - """! - @retval True The provider was successfully initialzed. + """@retval True The provider was successfully initialzed. @retval False The provider could not be initialized successfully. """ raise NotImplementedError() @@ -116,9 +115,9 @@ def is_valid_thread_id(self, threadId): raise NotImplementedError() def get_current_thread_id(self): - """! From GDB's point of view, where Handler Mode is a thread""" + """From GDB's point of view, where Handler Mode is a thread""" raise NotImplementedError() def get_actual_current_thread_id(self): - """! From OS's point of view, so the current OS thread even in Handler Mode""" + """From OS's point of view, so the current OS thread even in Handler Mode""" raise NotImplementedError() diff --git a/pyocd/rtos/rtx5.py b/pyocd/rtos/rtx5.py index 5e7482c0b..e3ef035bd 100644 --- a/pyocd/rtos/rtx5.py +++ b/pyocd/rtos/rtx5.py @@ -47,7 +47,7 @@ def __iter__(self): break class RTXThreadContext(DebugContext): - """! @brief Thread context for RTX5.""" + """@brief Thread context for RTX5.""" # SP/PSP are handled specially, so it is not in these dicts. @@ -210,7 +210,7 @@ def read_core_registers_raw(self, reg_list): return reg_vals class RTXTargetThread(TargetThread): - """! @brief Represents an RTX5 thread on the target.""" + """@brief Represents an RTX5 thread on the target.""" STATE_OFFSET = 1 NAME_OFFSET = 4 @@ -307,7 +307,7 @@ def get_stack_frame(self): return 0xFFFFFFFD class RTX5ThreadProvider(ThreadProvider): - """! @brief Thread provider for RTX5 RTOS.""" + """@brief Thread provider for RTX5 RTOS.""" # Offsets in osRtxInfo_t KERNEL_STATE_OFFSET = 8 @@ -447,7 +447,7 @@ def get_kernel_state(self): return self._target_context.read8(self._os_rtx_info + RTX5ThreadProvider.KERNEL_STATE_OFFSET) class RTX5Plugin(Plugin): - """! @brief Plugin class for the RTX5 RTOS.""" + """@brief Plugin class for the RTX5 RTOS.""" def load(self): return RTX5ThreadProvider diff --git a/pyocd/rtos/threadx.py b/pyocd/rtos/threadx.py index 3d50e1caf..ccb84f0fd 100644 --- a/pyocd/rtos/threadx.py +++ b/pyocd/rtos/threadx.py @@ -72,7 +72,7 @@ def __iter__(self): class ThreadXThreadContext(DebugContext): - """! @brief Thread context for ThreadX.""" + """@brief Thread context for ThreadX.""" # SP/PSP are handled specially, so it is not in these dicts. @@ -258,7 +258,7 @@ def read_core_registers_raw(self, reg_list): class ThreadXThread(TargetThread): - """! @brief A ThreadX task.""" + """@brief A ThreadX task.""" STATE_NAMES = { 0: "Ready", @@ -361,7 +361,7 @@ def __repr__(self): class ThreadXThreadProvider(ThreadProvider): - """! @brief Thread provider for ThreadX. + """@brief Thread provider for ThreadX. To successfully initialize, the following ThreadX symbols are needed: _tx_thread_created_ptr: pointer to list of created processes @@ -514,7 +514,7 @@ def get_actual_current_thread_id(self): class ThreadXPlugin(Plugin): - """! @brief Plugin class for ThreadX.""" + """@brief Plugin class for ThreadX.""" def load(self): return ThreadXThreadProvider diff --git a/pyocd/rtos/zephyr.py b/pyocd/rtos/zephyr.py index a1f02f699..ba5178a4b 100644 --- a/pyocd/rtos/zephyr.py +++ b/pyocd/rtos/zephyr.py @@ -46,7 +46,7 @@ def __iter__(self): node = 0 class ZephyrThreadContext(DebugContext): - """! @brief Thread context for Zephyr.""" + """@brief Thread context for Zephyr.""" STACK_FRAME_OFFSETS = { 0: 0, # r0 @@ -141,7 +141,7 @@ def read_core_registers_raw(self, reg_list): return reg_vals class ZephyrThread(TargetThread): - """! @brief A Zephyr task.""" + """@brief A Zephyr task.""" READY = 0 PENDING = 1 << 1 @@ -241,7 +241,7 @@ def __repr__(self): return str(self) class ZephyrThreadProvider(ThreadProvider): - """! @brief Thread provider for Zephyr.""" + """@brief Thread provider for Zephyr.""" ## Required Zephyr symbols. ZEPHYR_SYMBOLS = [ @@ -416,7 +416,7 @@ def version(self): return self._version class ZephyrPlugin(Plugin): - """! @brief Plugin class for the Zephyr RTOS.""" + """@brief Plugin class for the Zephyr RTOS.""" def load(self): return ZephyrThreadProvider diff --git a/pyocd/subcommands/base.py b/pyocd/subcommands/base.py index 275a2e5ad..16542331d 100644 --- a/pyocd/subcommands/base.py +++ b/pyocd/subcommands/base.py @@ -22,7 +22,7 @@ from ..utility.cmdline import convert_frequency class SubcommandBase: - """! @brief Base class for pyocd command line subcommand.""" + """@brief Base class for pyocd command line subcommand.""" # Subcommand descriptors. NAMES: List[str] = [] @@ -35,7 +35,7 @@ class SubcommandBase: parser: Optional[argparse.ArgumentParser] = None class CommonOptions: - """! @brief Namespace with parsers for repeated option groups.""" + """@brief Namespace with parsers for repeated option groups.""" # Define logging related options. LOGGING = argparse.ArgumentParser(description='logging', add_help=False) @@ -94,7 +94,7 @@ class CommonOptions: @classmethod def add_subcommands(cls, parser: argparse.ArgumentParser) -> None: - """! @brief Add declared subcommands to the given parser.""" + """@brief Add declared subcommands to the given parser.""" if cls.SUBCOMMANDS: subparsers = parser.add_subparsers(title="subcommands", metavar="", dest='cmd') for subcmd_class in cls.SUBCOMMANDS: @@ -112,7 +112,7 @@ def add_subcommands(cls, parser: argparse.ArgumentParser) -> None: @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object. + """@brief Add this subcommand to the subparsers object. @return List of argument parsers. The last element in the list _must_ be the parser for the subcommand class itself, as it is saved by the caller in cls.parser. """ @@ -120,11 +120,11 @@ class itself, as it is saved by the caller in cls.parser. @classmethod def customize_subparser(cls, subparser: argparse.ArgumentParser) -> None: - """! @brief Optionally modify a subparser after it is created.""" + """@brief Optionally modify a subparser after it is created.""" pass def __init__(self, args: argparse.Namespace): - """! @brief Constructor. + """@brief Constructor. @param self This object. @param args Namespace of parsed argument values. @@ -132,7 +132,7 @@ def __init__(self, args: argparse.Namespace): self._args = args def invoke(self) -> int: - """! @brief Run the subcommand. + """@brief Run the subcommand. @return Process status code for the command. """ if self.parser is not None: @@ -140,11 +140,11 @@ def invoke(self) -> int: return 0 def _get_log_level_delta(self) -> int: - """! @brief Compute the logging level delta sum from quiet and verbose counts.""" + """@brief Compute the logging level delta sum from quiet and verbose counts.""" return (self._args.quiet * 10) - (self._args.verbose * 10) def _increase_logging(self, loggers: List[str]) -> None: - """! @brief Increase logging level for a set of subloggers. + """@brief Increase logging level for a set of subloggers. @param self This object. @param loggers """ @@ -155,7 +155,7 @@ def _increase_logging(self, loggers: List[str]) -> None: logging.getLogger(logger).setLevel(level) def _get_pretty_table(self, fields: List[str], header: bool = None) -> prettytable.PrettyTable: - """! @brief Returns a PrettyTable object with formatting options set.""" + """@brief Returns a PrettyTable object with formatting options set.""" pt = prettytable.PrettyTable(fields) pt.align = 'l' if header is not None: diff --git a/pyocd/subcommands/commander_cmd.py b/pyocd/subcommands/commander_cmd.py index a7730cedc..5714eebef 100644 --- a/pyocd/subcommands/commander_cmd.py +++ b/pyocd/subcommands/commander_cmd.py @@ -23,7 +23,7 @@ from ..commands.commander import PyOCDCommander class CommanderSubcommand(SubcommandBase): - """! @brief `pyocd commander` subcommand.""" + """@brief `pyocd commander` subcommand.""" NAMES = ['commander', 'cmd'] HELP = "Interactive command console." @@ -38,7 +38,7 @@ class CommanderSubcommand(SubcommandBase): @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" commander_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) commander_options = commander_parser.add_argument_group("commander options") @@ -59,7 +59,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser] def invoke(self) -> int: - """! @brief Handle 'commander' subcommand.""" + """@brief Handle 'commander' subcommand.""" # Flatten commands list then extract primary command and its arguments. if self._args.commands is not None: cmds = [] diff --git a/pyocd/subcommands/erase_cmd.py b/pyocd/subcommands/erase_cmd.py index 26603bcfb..918820ea5 100644 --- a/pyocd/subcommands/erase_cmd.py +++ b/pyocd/subcommands/erase_cmd.py @@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__) class EraseSubcommand(SubcommandBase): - """! @brief `pyocd erase` subcommand.""" + """@brief `pyocd erase` subcommand.""" NAMES = ['erase'] HELP = "Erase entire device flash or specified sectors." @@ -46,7 +46,7 @@ class EraseSubcommand(SubcommandBase): @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" erase_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) erase_options = erase_parser.add_argument_group("erase options") @@ -63,7 +63,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, erase_parser] def invoke(self) -> int: - """! @brief Handle 'erase' subcommand.""" + """@brief Handle 'erase' subcommand.""" self._increase_logging(["pyocd.flash.eraser"]) # Display a nice, helpful error describing why nothing was done and how to correct it. diff --git a/pyocd/subcommands/gdbserver_cmd.py b/pyocd/subcommands/gdbserver_cmd.py index 3a481d723..a788c2c64 100644 --- a/pyocd/subcommands/gdbserver_cmd.py +++ b/pyocd/subcommands/gdbserver_cmd.py @@ -37,7 +37,7 @@ LOG = logging.getLogger(__name__) class GdbserverSubcommand(SubcommandBase): - """! @brief `pyocd gdbserver` subcommand.""" + """@brief `pyocd gdbserver` subcommand.""" NAMES = ['gdbserver', 'gdb'] HELP = "Run the gdb remote server(s)." @@ -51,7 +51,7 @@ class GdbserverSubcommand(SubcommandBase): @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" gdbserver_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) gdbserver_options = gdbserver_parser.add_argument_group("gdbserver options") @@ -93,12 +93,12 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, gdbserver_parser] def __init__(self, args: argparse.Namespace): - """! @brief Constructor.""" + """@brief Constructor.""" super().__init__(args) self._echo_msg = None def _process_commands(self, commands: Optional[List[str]]): - """! @brief Handle OpenOCD commands for compatibility.""" + """@brief Handle OpenOCD commands for compatibility.""" if commands is None: return for cmd_list in commands: @@ -123,13 +123,13 @@ def _process_commands(self, commands: Optional[List[str]]): pass def _gdbserver_listening_cb(self, note: Notification): - """! @brief Callback invoked when the gdbserver starts listening on its port.""" + """@brief Callback invoked when the gdbserver starts listening on its port.""" if self._echo_msg is not None: print(self._echo_msg, file=sys.stderr) sys.stderr.flush() def invoke(self) -> int: - """! @brief Handle 'gdbserver' subcommand.""" + """@brief Handle 'gdbserver' subcommand.""" self._process_commands(self._args.commands) probe_server = None diff --git a/pyocd/subcommands/json_cmd.py b/pyocd/subcommands/json_cmd.py index c78a082b7..e71eb38d1 100644 --- a/pyocd/subcommands/json_cmd.py +++ b/pyocd/subcommands/json_cmd.py @@ -30,7 +30,7 @@ LOG = logging.getLogger(__name__) class JsonSubcommand(SubcommandBase): - """! @brief `pyocd json` subcommand.""" + """@brief `pyocd json` subcommand.""" NAMES = ['json'] HELP = "Output information as JSON." @@ -38,7 +38,7 @@ class JsonSubcommand(SubcommandBase): @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" json_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) json_options = json_parser.add_argument_group('json output') @@ -55,7 +55,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: @classmethod def customize_subparser(cls, subparser: argparse.ArgumentParser) -> None: - """! @brief Optionally modify a subparser after it is created.""" + """@brief Optionally modify a subparser after it is created.""" subparser.set_defaults(verbose=0, quiet=0) def __init__(self, args: argparse.Namespace): @@ -65,7 +65,7 @@ def __init__(self, args: argparse.Namespace): logging.disable(logging.CRITICAL) def invoke(self) -> int: - """! @brief Handle 'json' subcommand.""" + """@brief Handle 'json' subcommand.""" exit_status = 0 try: all_outputs = (self._args.probes, self._args.targets, self._args.boards, self._args.features) diff --git a/pyocd/subcommands/list_cmd.py b/pyocd/subcommands/list_cmd.py index e5e97e934..f5fd2be17 100644 --- a/pyocd/subcommands/list_cmd.py +++ b/pyocd/subcommands/list_cmd.py @@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__) class ListSubcommand(SubcommandBase): - """! @brief `pyocd list` subcommand.""" + """@brief `pyocd list` subcommand.""" NAMES = ['list'] HELP = "List information about probes, targets, or boards." @@ -41,7 +41,7 @@ class ListSubcommand(SubcommandBase): @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" list_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) list_output = list_parser.add_argument_group("list output") @@ -67,7 +67,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.COMMON, list_parser] def invoke(self) -> int: - """! @brief Handle 'list' subcommand.""" + """@brief Handle 'list' subcommand.""" all_outputs = (self._args.probes, self._args.targets, self._args.boards, self._args.plugins) # Default to listing probes. diff --git a/pyocd/subcommands/load_cmd.py b/pyocd/subcommands/load_cmd.py index 1f60c572d..e414faa58 100644 --- a/pyocd/subcommands/load_cmd.py +++ b/pyocd/subcommands/load_cmd.py @@ -30,7 +30,7 @@ LOG = logging.getLogger(__name__) class LoadSubcommand(SubcommandBase): - """! @brief `pyocd load` and `flash` subcommand.""" + """@brief `pyocd load` and `flash` subcommand.""" NAMES = ['load', 'flash'] HELP = "Load one or more images into target device memory." @@ -46,7 +46,7 @@ class LoadSubcommand(SubcommandBase): @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) parser_options = parser.add_argument_group("load options") @@ -70,7 +70,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, parser] def invoke(self) -> int: - """! @brief Handle 'load' subcommand.""" + """@brief Handle 'load' subcommand.""" self._increase_logging(["pyocd.flash.loader", __name__]) # Validate arguments. diff --git a/pyocd/subcommands/pack_cmd.py b/pyocd/subcommands/pack_cmd.py index fabd6738e..0c82e3667 100644 --- a/pyocd/subcommands/pack_cmd.py +++ b/pyocd/subcommands/pack_cmd.py @@ -33,12 +33,12 @@ LOG = logging.getLogger(__name__) class PackSubcommandBase(SubcommandBase): - """! @brief Base class for `pyocd pack` subcommands.""" + """@brief Base class for `pyocd pack` subcommands.""" # cmsis_pack_manager.Cache is used in quotes in the return type annotation because it may have # not been imported successfully. def _get_cache(self) -> "cmsis_pack_manager.Cache": - """! @brief Handle 'clean' subcommand.""" + """@brief Handle 'clean' subcommand.""" if not CPM_AVAILABLE: raise exceptions.CommandError("'pack' subcommand is not available because cmsis-pack-manager is not installed") @@ -64,19 +64,19 @@ def _get_matches(self, cache: "cmsis_pack_manager.Cache") -> Set[str]: return matches class PackCleanSubcommand(PackSubcommandBase): - """! @brief `pyocd pack clean` subcommand.""" + """@brief `pyocd pack clean` subcommand.""" NAMES = ['clean'] HELP = "Delete the pack index and all installed packs." @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) return [cls.CommonOptions.LOGGING, parser] def invoke(self) -> int: - """! @brief Handle 'clean' subcommand.""" + """@brief Handle 'clean' subcommand.""" cache = self._get_cache() LOG.info("Removing all pack data...") @@ -85,14 +85,14 @@ def invoke(self) -> int: return 0 class PackUpdateSubcommand(PackSubcommandBase): - """! @brief `pyocd pack update` subcommand.""" + """@brief `pyocd pack update` subcommand.""" NAMES = ['update'] HELP = "Update the pack index." @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) parser.add_argument("-c", "--clean", action='store_true', @@ -101,7 +101,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.LOGGING, parser] def invoke(self) -> int: - """! @brief Handle 'update' subcommand.""" + """@brief Handle 'update' subcommand.""" cache = self._get_cache() if self._args.clean: @@ -114,14 +114,14 @@ def invoke(self) -> int: return 0 class PackShowSubcommand(PackSubcommandBase): - """! @brief `pyocd pack show` subcommand.""" + """@brief `pyocd pack show` subcommand.""" NAMES = ['show'] HELP = "Show the list of installed packs." @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) display_options = parser.add_argument_group('display options') @@ -131,7 +131,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.LOGGING, parser] def invoke(self) -> int: - """! @brief Handle 'show' subcommand.""" + """@brief Handle 'show' subcommand.""" cache = self._get_cache() packs = pack_target.ManagedPacks.get_installed_packs(cache) @@ -145,14 +145,14 @@ def invoke(self) -> int: return 0 class PackFindSubcommand(PackSubcommandBase): - """! @brief `pyocd pack find` subcommand.""" + """@brief `pyocd pack find` subcommand.""" NAMES = ['find'] HELP = "Report pack(s) in the index containing matching device part numbers." @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) index_options = parser.add_argument_group("index operations") @@ -171,7 +171,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.LOGGING, parser] def invoke(self) -> int: - """! @brief Handle 'find' subcommand.""" + """@brief Handle 'find' subcommand.""" cache = self._get_cache() if self._args.update: @@ -207,14 +207,14 @@ def invoke(self) -> int: return 0 class PackInstallSubcommand(PackSubcommandBase): - """! @brief `pyocd pack install` subcommand.""" + """@brief `pyocd pack install` subcommand.""" NAMES = ['install'] HELP = "Download and install pack(s) containing matching device part numbers." @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) index_options = parser.add_argument_group("index operations") @@ -233,7 +233,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.LOGGING, parser] def invoke(self) -> int: - """! @brief Handle 'find' subcommand.""" + """@brief Handle 'find' subcommand.""" cache = self._get_cache() if self._args.update: @@ -264,7 +264,7 @@ def invoke(self) -> int: return 0 class PackSubcommand(PackSubcommandBase): - """! @brief `pyocd pack` subcommand.""" + """@brief `pyocd pack` subcommand.""" NAMES = ['pack'] HELP = "Manage CMSIS-Packs for target support." @@ -278,7 +278,7 @@ class PackSubcommand(PackSubcommandBase): @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" pack_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False) cls.add_subcommands(pack_parser) @@ -303,7 +303,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.LOGGING, pack_parser] def invoke(self) -> int: - """! @brief Handle 'pack' subcommand.""" + """@brief Handle 'pack' subcommand.""" if not any([self._args.clean, self._args.update, self._args.show, bool(self._args.find_devices), bool(self._args.install_devices)]): self.parser.print_help() diff --git a/pyocd/subcommands/reset_cmd.py b/pyocd/subcommands/reset_cmd.py index 4008fcc7c..8a0a899be 100644 --- a/pyocd/subcommands/reset_cmd.py +++ b/pyocd/subcommands/reset_cmd.py @@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__) class ResetSubcommand(SubcommandBase): - """! @brief `pyocd reset` subcommand.""" + """@brief `pyocd reset` subcommand.""" NAMES = ['reset'] HELP = "Reset a target device." @@ -39,7 +39,7 @@ class ResetSubcommand(SubcommandBase): @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" reset_parser = argparse.ArgumentParser(description='reset', add_help=False) reset_options = reset_parser.add_argument_group("reset options") @@ -55,7 +55,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, reset_parser] def invoke(self) -> None: - """! @brief Handle 'reset' subcommand.""" + """@brief Handle 'reset' subcommand.""" # Verify selected reset type. try: the_reset_type = convert_reset_type(self._args.reset_type) diff --git a/pyocd/subcommands/rtt_cmd.py b/pyocd/subcommands/rtt_cmd.py index 29a0d8c0b..e3c8fa98f 100644 --- a/pyocd/subcommands/rtt_cmd.py +++ b/pyocd/subcommands/rtt_cmd.py @@ -32,7 +32,7 @@ class SEGGER_RTT_BUFFER_UP(Structure): - """! @brief `SEGGER RTT Ring Buffer` target to host.""" + """@brief `SEGGER RTT Ring Buffer` target to host.""" _fields_ = [ ("sName", c_uint32), @@ -45,7 +45,7 @@ class SEGGER_RTT_BUFFER_UP(Structure): class SEGGER_RTT_BUFFER_DOWN(Structure): - """! @brief `SEGGER RTT Ring Buffer` host to target.""" + """@brief `SEGGER RTT Ring Buffer` host to target.""" _fields_ = [ ("sName", c_uint32), @@ -58,7 +58,7 @@ class SEGGER_RTT_BUFFER_DOWN(Structure): class SEGGER_RTT_CB(Structure): - """! @brief `SEGGER RTT control block` structure. """ + """@brief `SEGGER RTT control block` structure. """ _fields_ = [ ("acID", c_char * 16), @@ -70,14 +70,14 @@ class SEGGER_RTT_CB(Structure): class RTTSubcommand(SubcommandBase): - """! @brief `pyocd rtt` subcommand.""" + """@brief `pyocd rtt` subcommand.""" NAMES = ["rtt"] HELP = "SEGGER RTT Viewer." @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" rtt_parser = argparse.ArgumentParser(cls.HELP, add_help=False) diff --git a/pyocd/subcommands/server_cmd.py b/pyocd/subcommands/server_cmd.py index c23b34e90..a707bb278 100644 --- a/pyocd/subcommands/server_cmd.py +++ b/pyocd/subcommands/server_cmd.py @@ -28,14 +28,14 @@ LOG = logging.getLogger(__name__) class ServerSubcommand(SubcommandBase): - """! @brief `pyocd server` subcommand.""" + """@brief `pyocd server` subcommand.""" NAMES = ['server'] HELP = "Run debug probe server." @classmethod def get_args(cls) -> List[argparse.ArgumentParser]: - """! @brief Add this subcommand to the subparsers object.""" + """@brief Add this subcommand to the subparsers object.""" server_parser = argparse.ArgumentParser(description='server', add_help=False) server_config_options = server_parser.add_argument_group('configuration') @@ -67,7 +67,7 @@ def get_args(cls) -> List[argparse.ArgumentParser]: return [cls.CommonOptions.LOGGING, server_parser] def invoke(self) -> None: - """! @brief Handle 'server' subcommand.""" + """@brief Handle 'server' subcommand.""" # Create a session to load config, particularly logging config. Even though we do have a # probe, we don't set it in the session because we don't want the board, target, etc objects # to be created. diff --git a/pyocd/target/builtin/target_MKL28Z512xxx7.py b/pyocd/target/builtin/target_MKL28Z512xxx7.py index 15a06ca0d..dd6eb12a5 100644 --- a/pyocd/target/builtin/target_MKL28Z512xxx7.py +++ b/pyocd/target/builtin/target_MKL28Z512xxx7.py @@ -133,8 +133,7 @@ def __init__(self, target): self._saved_rccr = 0 def prepare_target(self): - """! - This function sets up target clocks to ensure that flash is clocked at the maximum + """This function sets up target clocks to ensure that flash is clocked at the maximum of 24MHz. Doing so gets the best flash programming performance. The FIRC clock source is used so that there is no dependency on an external crystal frequency. """ @@ -154,7 +153,7 @@ def prepare_target(self): LOG.debug("SCG_CSR = 0x%08x", csr) def restore_target(self): - """! Restore clock registers to original values.""" + """Restore clock registers to original values.""" self.target.write32(SCG_FIRCCSR, self._saved_firccsr) self.target.write32(SCG_RCCR, self._saved_rccr) @@ -203,7 +202,7 @@ def create_init_sequence(self): return seq def create_kl28_aps(self): - """! @brief Set the fixed list of valid AP numbers for KL28.""" + """@brief Set the fixed list of valid AP numbers for KL28.""" self.dp.valid_aps = [0, 1, 2] def detect_dual_core(self): diff --git a/pyocd/target/builtin/target_RP2040.py b/pyocd/target/builtin/target_RP2040.py index a27971a9e..8aadceee5 100644 --- a/pyocd/target/builtin/target_RP2040.py +++ b/pyocd/target/builtin/target_RP2040.py @@ -84,7 +84,7 @@ def _parity32(value): return parity & 1 class RP2040Base(CoreSightTarget): - """! @brief Raspberry Pi RP2040. + """@brief Raspberry Pi RP2040. This device is very strange in that it as three DPs. The first two DPs each have a single AHB-AP for the two Cortex-M0+ cores. The third DP is a "Rescue DP" that has no APs, but the CDBGPWRUPREQ @@ -92,7 +92,7 @@ class RP2040Base(CoreSightTarget): """ class Targetsel: - """! @brief DP TARGETEL values for each DP.""" + """@brief DP TARGETEL values for each DP.""" CORE_0 = 0x01002927 CORE_1 = 0x11002927 RESCUE_DP = 0xf1002927 @@ -143,7 +143,7 @@ def _select_core(self): self.select_dp(self._core_targetsel) def select_dp(self, targetsel): - """! @brief Select the DP with the matching TARGETSEL.""" + """@brief Select the DP with the matching TARGETSEL.""" probe = self.session.probe # Have to connect the probe first, or SWCLK will not be enabled. @@ -198,14 +198,14 @@ def select_dp(self, targetsel): probe.write_dp(0x8, 0x0) # restore DPBANKSEL=0 class RP2040Core0(RP2040Base): - """! @brief RP2040 target for core 0.""" + """@brief RP2040 target for core 0.""" def __init__(self, session): super().__init__(session) self._core_targetsel = self.Targetsel.CORE_0 class RP2040Core1(RP2040Base): - """! @brief RP2040 target for core 1.""" + """@brief RP2040 target for core 1.""" def __init__(self, session): super().__init__(session) diff --git a/pyocd/target/family/flash_kinetis.py b/pyocd/target/family/flash_kinetis.py index 51b9ca986..0d55ec0cb 100644 --- a/pyocd/target/family/flash_kinetis.py +++ b/pyocd/target/family/flash_kinetis.py @@ -35,10 +35,10 @@ FDPROT_VAL = 0xFF class Flash_Kinetis(Flash): - """! @brief Base flash algorithm class for Freescale Kinetis devices.""" + """@brief Base flash algorithm class for Freescale Kinetis devices.""" def override_security_bits(self, address, data): - """! @brief Check security bytes. + """@brief Check security bytes. Override Flash Configuration Field bytes at address 0x400-0x40f to ensure that flash security won't be enabled. If flash security is enabled, then the chip is inaccessible via SWD. diff --git a/pyocd/target/family/target_kinetis.py b/pyocd/target/family/target_kinetis.py index 88c2474ba..dbfa87a86 100644 --- a/pyocd/target/family/target_kinetis.py +++ b/pyocd/target/family/target_kinetis.py @@ -53,7 +53,7 @@ LOG = logging.getLogger(__name__) class Kinetis(CoreSightTarget): - """! @brief Family class for NXP Kinetis devices. + """@brief Family class for NXP Kinetis devices. """ VENDOR = "NXP" @@ -89,7 +89,7 @@ def check_mdm_ap_idr(self): LOG.debug("MDM-AP version %d", self.mdm_ap_version) def check_flash_security(self): - """! @brief Check security and unlock device. + """@brief Check security and unlock device. This init task determines whether the device is locked (flash security enabled). If it is, and if auto unlock is enabled, then perform a mass erase to unlock the device. @@ -171,7 +171,7 @@ def check_flash_security(self): LOG.info("%s not in secure state", self.part_number) def perform_halt_on_connect(self): - """! This init task runs *after* cores are created.""" + """This init task runs *after* cores are created.""" if self.session.options.get('connect_mode') == 'under-reset' or self._force_halt_on_connect: if not self.mdm_ap: return @@ -239,7 +239,7 @@ def _wait_for_flash_init(self): return not to.did_time_out def mass_erase(self): - """! @brief Perform a mass erase operation. + """@brief Perform a mass erase operation. @note Reset is held for the duration of this function. @return True Mass erase succeeded. @return False Mass erase failed or is disabled. @@ -258,7 +258,7 @@ def mass_erase(self): return result def _mass_erase(self): - """! @brief Private mass erase routine.""" + """@brief Private mass erase routine.""" # Flash must finish initing before we can mass erase. if not self._wait_for_flash_init(): LOG.error("Mass erase timeout waiting for flash to finish init") diff --git a/pyocd/target/family/target_lpc5500.py b/pyocd/target/family/target_lpc5500.py index 3fa98c349..668b68b7a 100644 --- a/pyocd/target/family/target_lpc5500.py +++ b/pyocd/target/family/target_lpc5500.py @@ -119,7 +119,7 @@ def _modify_ap1(self, seq): return seq def check_locked_state(self, seq): - """! @brief Attempt to unlock cores if they are locked (flash is empty etc.)""" + """@brief Attempt to unlock cores if they are locked (flash is empty etc.)""" # The device is not locked if AP#0 was found and is enabled. if (0 in self.aps) and self.aps[0].is_enabled: return @@ -210,7 +210,7 @@ def trace_start(self): self._enable_traceclk() def unlock(self, dm_ap): - """! @brief Unlock Cores. See UM11126 51.6.1 """ + """@brief Unlock Cores. See UM11126 51.6.1 """ assert self.dp.probe.is_open LOG.info("attempting unlock procedure") @@ -242,7 +242,7 @@ def unlock(self, dm_ap): class CortexM_LPC5500(CortexM_v8M): def reset_and_halt(self, reset_type=None): - """! @brief Perform a reset and stop the core on the reset handler. """ + """@brief Perform a reset and stop the core on the reset handler. """ halt_only = False catch_mode = 0 diff --git a/pyocd/target/family/target_nRF52.py b/pyocd/target/family/target_nRF52.py index a4798007a..885bbf65e 100644 --- a/pyocd/target/family/target_nRF52.py +++ b/pyocd/target/family/target_nRF52.py @@ -85,7 +85,7 @@ def check_ctrl_ap_idr(self): LOG.debug("CTRL-AP version %d", ctrl_ap_version) def check_flash_security(self): - """! @brief Check security and unlock device. + """@brief Check security and unlock device. This init task determines whether the device is locked (APPROTECT enabled). If it is, and if auto unlock is enabled, then perform a mass erase to unlock the device. diff --git a/pyocd/target/pack/cmsis_pack.py b/pyocd/target/pack/cmsis_pack.py index 115a874cb..983fde2c1 100644 --- a/pyocd/target/pack/cmsis_pack.py +++ b/pyocd/target/pack/cmsis_pack.py @@ -32,11 +32,11 @@ LOG = logging.getLogger(__name__) class MalformedCmsisPackError(exceptions.TargetSupportError): - """! @brief Exception raised for errors parsing a CMSIS-Pack.""" + """@brief Exception raised for errors parsing a CMSIS-Pack.""" pass class _DeviceInfo: - """! @brief Simple container class to hold XML elements describing a device.""" + """@brief Simple container class to hold XML elements describing a device.""" def __init__(self, element: Element, **kwargs): self.element: Element = element self.families: List[str] = kwargs.get('families', []) @@ -45,7 +45,7 @@ def __init__(self, element: Element, **kwargs): self.debugs: List[Element] = kwargs.get('debugs', []) def _get_part_number_from_element(element: Element) -> str: - """! @brief Extract the part number from a device or variant XML element.""" + """@brief Extract the part number from a device or variant XML element.""" assert element.tag in ("device", "variant") if element.tag == "device": return element.attrib['Dname'] @@ -55,7 +55,7 @@ def _get_part_number_from_element(element: Element) -> str: raise ValueError("element is neither device nor variant") class CmsisPack: - """! @brief Wraps a CMSIS Device Family Pack. + """@brief Wraps a CMSIS Device Family Pack. This class provides a top-level interface for extracting device information from CMSIS-Packs. After an instance is constructed, a list of the devices described within the pack is available @@ -72,7 +72,7 @@ class CmsisPack: the parsing of each element type into pyOCD-compatible data. """ def __init__(self, file_or_path: Union[str, zipfile.ZipFile, IO[bytes]]) -> None: - """! @brief Constructor. + """@brief Constructor. Opens the CMSIS-Pack and builds instances of CmsisPackDevice for all the devices and variants defined within the pack. @@ -105,21 +105,21 @@ def __init__(self, file_or_path: Union[str, zipfile.ZipFile, IO[bytes]]) -> None @property def filename(self) -> Optional[str]: - """! @brief Accessor for the filename or path of the .pack file.""" + """@brief Accessor for the filename or path of the .pack file.""" return self._pack_file.filename @property def pdsc(self) -> "CmsisPackDescription": - """! @brief Accessor for the CmsisPackDescription instance for the pack's PDSC file.""" + """@brief Accessor for the CmsisPackDescription instance for the pack's PDSC file.""" return self._pdsc @property def devices(self) -> List["CmsisPackDevice"]: - """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" + """@brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._pdsc.devices def get_file(self, filename) -> IO[bytes]: - """! @brief Return file-like object for a file within the pack. + """@brief Return file-like object for a file within the pack. @param self @param filename Relative path within the pack. May use forward or back slashes. @@ -135,7 +135,7 @@ class CmsisPackDescription: """ def __init__(self, pack: CmsisPack, pdsc_file: IO) -> None: - """! @brief Constructor. + """@brief Constructor. @param self This object. @param pack Reference to the CmsisPack instance. @@ -159,12 +159,12 @@ def __init__(self, pack: CmsisPack, pdsc_file: IO) -> None: @property def pack(self) -> CmsisPack: - """! @brief Reference to the containing CmsisPack object.""" + """@brief Reference to the containing CmsisPack object.""" return self._pack @property def devices(self) -> List["CmsisPackDevice"]: - """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" + """@brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._devices def _parse_devices(self, parent: Element) -> None: @@ -205,7 +205,7 @@ def _parse_devices(self, parent: Element) -> None: self._state_stack.pop() def _extract_families(self) -> List[str]: - """! @brief Generate list of family names for a device.""" + """@brief Generate list of family names for a device.""" families = [] for state in self._state_stack: elem = state.element @@ -219,7 +219,7 @@ def _extract_families(self) -> List[str]: V = TypeVar('V') def _extract_items(self, state_info_name: str, filter: Callable[[Dict[Any, V], Element], None]) -> List[V]: - """! @brief Generic extractor utility. + """@brief Generic extractor utility. Iterates over saved elements for the specified device state info for each level of the device state stack, from outer to inner, calling the provided filter callback each @@ -243,7 +243,7 @@ def _extract_items(self, state_info_name: str, filter: Callable[[Dict[Any, V], E return list(map.values()) def _extract_memories(self) -> List[Element]: - """! @brief Extract memory elements. + """@brief Extract memory elements. The unique identifier is a bi-tuple of the memory's name, which is either the 'name' or 'id' attribute, in that order, plus the pname. If neither attribute exists, the region base and size are turned into @@ -307,7 +307,7 @@ def filter(map: Dict, elem: Element) -> None: return self._extract_items('memories', filter) def _extract_algos(self) -> List[Element]: - """! @brief Extract algorithm elements. + """@brief Extract algorithm elements. The unique identifier is the algorithm's memory address range. @@ -331,7 +331,7 @@ def filter(map: Dict, elem: Element) -> None: return self._extract_items('algos', filter) def _extract_debugs(self) -> List[Element]: - """! @brief Extract debug elements. + """@brief Extract debug elements. If the debug element does not have a 'Pname' element, its identifier is set to "*" to represent that it applies to all processors. @@ -358,7 +358,7 @@ def filter(map: Dict, elem: Element) -> None: return self._extract_items('debugs', filter) def _get_bool_attribute(elem: Element, name: str, default: bool = False) -> bool: - """! @brief Extract an XML attribute with a boolean value. + """@brief Extract an XML attribute with a boolean value. Supports "true"/"false" or "1"/"0" as the attribute values. Leading and trailing whitespace is stripped, and the comparison is case-insensitive. @@ -380,7 +380,7 @@ def _get_bool_attribute(elem: Element, name: str, default: bool = False) -> bool return default class CmsisPackDevice: - """! @brief Wraps a device defined in a CMSIS Device Family Pack. + """@brief Wraps a device defined in a CMSIS Device Family Pack. Responsible for converting the XML elements that describe the device into objects usable by pyOCD. This includes the memory map and flash algorithms. @@ -390,7 +390,7 @@ class CmsisPackDevice: """ def __init__(self, pack: CmsisPack, device_info: _DeviceInfo): - """! @brief Constructor. + """@brief Constructor. @param self @param pack The CmsisPack object that contains this device. @param device_info A _DeviceInfo object with the XML elements that describe this device. @@ -404,7 +404,7 @@ def __init__(self, pack: CmsisPack, device_info: _DeviceInfo): self._memory_map: Optional[MemoryMap] = None def _build_memory_regions(self) -> None: - """! @brief Creates memory region instances for the device. + """@brief Creates memory region instances for the device. For each `` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions @@ -475,7 +475,7 @@ def _get_containing_region(self, addr: int) -> Optional[MemoryRegion]: return None def _build_flash_regions(self) -> None: - """! @brief Converts ROM memory regions to flash regions. + """@brief Converts ROM memory regions to flash regions. Each ROM region in the `_regions` attribute is converted to a flash region if a matching flash algo can be found. If the flash has multiple sector sizes, then separate flash @@ -625,7 +625,7 @@ def _split_flash_region_by_sector_size(self, alias=region.alias) def _find_matching_algo(self, region: MemoryRegion) -> Element: - """! @brief Searches for a flash algo covering the regions's address range.'""" + """@brief Searches for a flash algo covering the regions's address range.'""" for algo in self._info.algos: # Both start and size are required attributes. algoStart = int(algo.attrib['start'], base=0) @@ -638,7 +638,7 @@ def _find_matching_algo(self, region: MemoryRegion) -> Element: raise KeyError("no matching flash algorithm") def _load_flash_algo(self, filename: str) -> Optional[PackFlashAlgo]: - """! @brief Return the PackFlashAlgo instance for the given flash algo filename.""" + """@brief Return the PackFlashAlgo instance for the given flash algo filename.""" if self.pack is not None: try: algo_data = self.pack.get_file(filename) @@ -650,12 +650,12 @@ def _load_flash_algo(self, filename: str) -> Optional[PackFlashAlgo]: @property def pack(self) -> CmsisPack: - """! @brief The CmsisPack object that defines this device.""" + """@brief The CmsisPack object that defines this device.""" return self._pack @property def part_number(self) -> str: - """! @brief Part number for this device. + """@brief Part number for this device. This value comes from either the `Dname` or `Dvariant` attribute, depending on whether the device was created from a `` or `` element. @@ -664,17 +664,17 @@ def part_number(self) -> str: @property def vendor(self) -> str: - """! @brief Vendor or manufacturer name.""" + """@brief Vendor or manufacturer name.""" return self._info.families[0].split(':')[0] @property def families(self) -> List[str]: - """! @brief List of families the device belongs to, ordered most generic to least.""" + """@brief List of families the device belongs to, ordered most generic to least.""" return [f for f in self._info.families[1:]] @property def memory_map(self) -> MemoryMap: - """! @brief MemoryMap object.""" + """@brief MemoryMap object.""" # Lazily construct the memory map. if self._memory_map is None: self._build_memory_regions() @@ -690,7 +690,7 @@ def memory_map(self) -> MemoryMap: @property def svd(self) -> Optional[IO[bytes]]: - """! @brief File-like object for the device's SVD file. + """@brief File-like object for the device's SVD file. @todo Support multiple cores. """ try: @@ -701,7 +701,7 @@ def svd(self) -> Optional[IO[bytes]]: @property def default_reset_type(self) -> Target.ResetType: - """! @brief One of the Target.ResetType enums. + """@brief One of the Target.ResetType enums. @todo Support multiple cores. """ try: diff --git a/pyocd/target/pack/flash_algo.py b/pyocd/target/pack/flash_algo.py index 830ced8a2..91a5019f0 100644 --- a/pyocd/target/pack/flash_algo.py +++ b/pyocd/target/pack/flash_algo.py @@ -31,12 +31,11 @@ FLASH_ALGO_STACK_SIZE = 512 class FlashAlgoException(exceptions.TargetSupportError): - """! @brief Exception class for errors parsing an FLM file.""" + """@brief Exception class for errors parsing an FLM file.""" pass class PackFlashAlgo(object): - """! - @brief Class to wrap a flash algo + """@brief Class to wrap a flash algo This class is intended to provide easy access to the information provided by a flash algorithm, such as symbols and the flash @@ -80,7 +79,7 @@ class PackFlashAlgo(object): _FLASH_BLOB_HEADER_SIZE = len(_FLASH_BLOB_HEADER) * 4 def __init__(self, data): - """! @brief Construct a PackFlashAlgo from a file-like object.""" + """@brief Construct a PackFlashAlgo from a file-like object.""" self.elf = ELFBinaryFile(data) self.flash_info = PackFlashInfo(self.elf) @@ -112,7 +111,7 @@ def __init__(self, data): self.algo_data = self._create_algo_bin(ro_rw_zi) def get_pyocd_flash_algo(self, blocksize, ram_region): - """! @brief Return a dictionary representing a pyOCD flash algorithm, or None. + """@brief Return a dictionary representing a pyOCD flash algorithm, or None. The most interesting operation this method performs is dynamically allocating memory for the flash algo from a given RAM region. Note that the .data and .bss sections are @@ -182,7 +181,7 @@ def get_pyocd_flash_algo(self, blocksize, ram_region): return flash_algo def _extract_symbols(self, symbols, default=None): - """! @brief Fill 'symbols' field with required flash algo symbols""" + """@brief Fill 'symbols' field with required flash algo symbols""" to_ret = {} for symbol in symbols: symbolInfo = self.elf.symbol_decoder.get_symbol_for_name(symbol) @@ -195,7 +194,7 @@ def _extract_symbols(self, symbols, default=None): return to_ret def _find_sections(self, name_type_pairs): - """! @brief Return a list of sections the same length and order of the input list""" + """@brief Return a list of sections the same length and order of the input list""" sections = [None] * len(name_type_pairs) for section in self.elf.sections: section_name = to_str_safe(section.name) @@ -210,7 +209,7 @@ def _find_sections(self, name_type_pairs): return sections def _algo_fill_zi_if_missing(self, ro_rw_zi): - """! @brief Create an empty zi section if it is missing""" + """@brief Create an empty zi section if it is missing""" s_ro, s_rw, s_zi = ro_rw_zi if s_rw is None: return ro_rw_zi @@ -220,7 +219,7 @@ def _algo_fill_zi_if_missing(self, ro_rw_zi): return s_ro, s_rw, s_zi def _algo_check_for_section_problems(self, ro_rw_zi): - """! @brief Return a string describing any errors with the layout or None if good""" + """@brief Return a string describing any errors with the layout or None if good""" s_ro, s_rw, s_zi = ro_rw_zi if s_ro is None: return "RO section is missing" @@ -251,7 +250,7 @@ def _create_algo_bin(self, ro_rw_zi): class PackFlashInfo(object): - """! @brief Wrapper class for the non-executable information in an FLM file""" + """@brief Wrapper class for the non-executable information in an FLM file""" FLASH_DEVICE_STRUCT = " None: pass class ManagedPacksImpl: - """! @brief Namespace for managed CMSIS-Pack utilities. + """@brief Namespace for managed CMSIS-Pack utilities. By managed, we mean managed by the cmsis-pack-manager package. All the methods on this class apply only to those packs managed by cmsis-pack-manager, not any targets from packs specified @@ -62,7 +62,7 @@ class ManagedPacksImpl: @staticmethod def get_installed_packs(cache: Optional[cmsis_pack_manager.Cache] = None) -> List["CmsisPackRef"]: # type:ignore - """! @brief Return a list containing CmsisPackRef objects for all installed packs.""" + """@brief Return a list containing CmsisPackRef objects for all installed packs.""" if cache is None: cache = cmsis_pack_manager.Cache(True, True) results = [] @@ -78,7 +78,7 @@ def get_installed_packs(cache: Optional[cmsis_pack_manager.Cache] = None) -> Lis @staticmethod def get_installed_targets(cache: Optional[cmsis_pack_manager.Cache] = None) -> List[CmsisPackDevice]: # type:ignore - """! @brief Return a list of CmsisPackDevice objects for installed pack targets.""" + """@brief Return a list of CmsisPackDevice objects for installed pack targets.""" if cache is None: cache = cmsis_pack_manager.Cache(True, True) results = [] @@ -90,7 +90,7 @@ def get_installed_targets(cache: Optional[cmsis_pack_manager.Cache] = None) -> L @staticmethod def populate_target(device_name: str) -> None: - """! @brief Add targets from cmsis-pack-manager matching the given name. + """@brief Add targets from cmsis-pack-manager matching the given name. Targets are added to the `#TARGET` list. A case-insensitive comparison against the device part number is used to find the target to populate. If multiple packs are installed @@ -108,11 +108,11 @@ def populate_target(device_name: str) -> None: ManagedPacks = ManagedPacksStub class _PackTargetMethods: - """! @brief Container for methods added to the dynamically generated pack target subclass.""" + """@brief Container for methods added to the dynamically generated pack target subclass.""" @staticmethod def _pack_target__init__(self, session: "Session") -> None: # type:ignore - """! @brief Constructor for dynamically created target class.""" + """@brief Constructor for dynamically created target class.""" super(self.__class__, self).__init__(session, self._pack_device.memory_map) self.vendor = self._pack_device.vendor @@ -123,7 +123,7 @@ def _pack_target__init__(self, session: "Session") -> None: # type:ignore @staticmethod def _pack_target_create_init_sequence(self) -> "CallSequence": # type:ignore - """! @brief Creates an init task to set the default reset type.""" + """@brief Creates an init task to set the default reset type.""" seq = super(self.__class__, self).create_init_sequence() seq.wrap_task('discovery', lambda seq: seq.insert_after('create_cores', @@ -134,16 +134,16 @@ def _pack_target_create_init_sequence(self) -> "CallSequence": # type:ignore @staticmethod def _pack_target_set_default_reset_type(self) -> None: # type:ignore - """! @brief Set's the first core's default reset type to the one specified in the pack.""" + """@brief Set's the first core's default reset type to the one specified in the pack.""" if 0 in self.cores: self.cores[0].default_reset_type = self._pack_device.default_reset_type class PackTargets: - """! @brief Namespace for CMSIS-Pack target generation utilities. """ + """@brief Namespace for CMSIS-Pack target generation utilities. """ @staticmethod def _find_family_class(dev: CmsisPackDevice) -> Type[CoreSightTarget]: - """! @brief Search the families list for matching entry.""" + """@brief Search the families list for matching entry.""" for familyInfo in FAMILIES: # Skip if wrong vendor. if dev.vendor != familyInfo.vendor: @@ -161,7 +161,7 @@ def _find_family_class(dev: CmsisPackDevice) -> Type[CoreSightTarget]: @staticmethod def _generate_pack_target_class(dev: CmsisPackDevice) -> Optional[type]: - """! @brief Generates a new target class from a CmsisPackDevice. + """@brief Generates a new target class from a CmsisPackDevice. @param dev A CmsisPackDevice object. @return A new subclass of either CoreSightTarget or one of the family classes. @@ -187,7 +187,7 @@ def _generate_pack_target_class(dev: CmsisPackDevice) -> Optional[type]: @staticmethod def populate_device(dev: CmsisPackDevice) -> None: - """! @brief Generates and populates the target defined by a CmsisPackDevice. + """@brief Generates and populates the target defined by a CmsisPackDevice. The new target class is added to the `#TARGET` list. @@ -211,7 +211,7 @@ def populate_device(dev: CmsisPackDevice) -> None: @staticmethod def populate_targets_from_pack(pack_list: Union[PackReferenceType, List[PackReferenceType], Tuple[PackReferenceType]]) -> None: - """! @brief Adds targets defined in the provided CMSIS-Pack. + """@brief Adds targets defined in the provided CMSIS-Pack. Targets are added to the `#TARGET` list. diff --git a/pyocd/tools/gdb_server.py b/pyocd/tools/gdb_server.py index f29c71905..4a25bbaeb 100644 --- a/pyocd/tools/gdb_server.py +++ b/pyocd/tools/gdb_server.py @@ -146,7 +146,7 @@ def setup_logging(self, args): logging.basicConfig(level=level, format=format) def process_commands(self, commands): - """! @brief Handle OpenOCD commands for compatibility.""" + """@brief Handle OpenOCD commands for compatibility.""" if commands is None: return for cmd_list in commands: diff --git a/pyocd/tools/lists.py b/pyocd/tools/lists.py index 7dd7e5d40..1dbca1fb5 100644 --- a/pyocd/tools/lists.py +++ b/pyocd/tools/lists.py @@ -37,7 +37,7 @@ def unique_id(self) -> str: class ListGenerator(object): @staticmethod def list_probes(): - """! @brief Generate dictionary with info about the connected debug probes. + """@brief Generate dictionary with info about the connected debug probes. Output version history: - 1.0, initial version @@ -77,7 +77,7 @@ def list_probes(): @staticmethod def list_boards(name_filter=None): - """! @brief Generate dictionary with info about supported boards. + """@brief Generate dictionary with info about supported boards. Output version history: - 1.0, initial version @@ -118,7 +118,7 @@ def list_boards(name_filter=None): @staticmethod def list_targets(name_filter=None, vendor_filter=None, source_filter=None): - """! @brief Generate dictionary with info about all supported targets. + """@brief Generate dictionary with info about all supported targets. Output version history: - 1.0, initial version @@ -195,7 +195,7 @@ def list_targets(name_filter=None, vendor_filter=None, source_filter=None): @staticmethod def list_plugins(): - """! @brief Generate dictionary with lists of available plugins. + """@brief Generate dictionary with lists of available plugins. Output version history: - 1.0, initial version with debug probe and RTOS plugins @@ -238,7 +238,7 @@ def list_plugins(): @staticmethod def list_features(): - """! @brief Generate dictionary with info about supported features and options. + """@brief Generate dictionary with info about supported features and options. Output version history: - 1.1, added 'plugins' feature diff --git a/pyocd/trace/events.py b/pyocd/trace/events.py index 419047624..51b303ee5 100644 --- a/pyocd/trace/events.py +++ b/pyocd/trace/events.py @@ -15,7 +15,7 @@ # limitations under the License. class TraceEvent(object): - """! @brief Base trace event class.""" + """@brief Base trace event class.""" def __init__(self, desc="", ts=0): self._desc = desc self._timestamp = ts @@ -35,12 +35,12 @@ def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, str(self)) class TraceOverflow(TraceEvent): - """! @brief Trace overflow event.""" + """@brief Trace overflow event.""" def __init__(self, ts=0): super(TraceOverflow, self).__init__("overflow", ts) class TraceTimestamp(TraceEvent): - """! @brief Trace local timestamp.""" + """@brief Trace local timestamp.""" def __init__(self, tc, ts=0): super(TraceTimestamp, self).__init__("timestamp", ts) self._tc = 0 @@ -53,7 +53,7 @@ def __str__(self): return "[{}] local timestamp TC={:#x} {}".format(self._timestamp, self.tc, self.timestamp) class TraceITMEvent(TraceEvent): - """! @brief Trace ITM stimulus port event.""" + """@brief Trace ITM stimulus port event.""" def __init__(self, port, data, width, ts=0): super(TraceITMEvent, self).__init__("itm", ts) self._port = port @@ -83,7 +83,7 @@ def __str__(self): return "[{}] ITM: port={:d} data={}".format(self.timestamp, self.port, d) class TraceEventCounter(TraceEvent): - """! @brief Trace DWT counter overflow event.""" + """@brief Trace DWT counter overflow event.""" CPI_MASK = 0x01 EXC_MASK = 0x02 SLEEP_MASK = 0x04 @@ -119,7 +119,7 @@ def __str__(self): return "[{}] DWT: Event:{}".format(self.timestamp, self._get_event_desc(self.counter_mask)) class TraceExceptionEvent(TraceEvent): - """! @brief Exception trace event.""" + """@brief Exception trace event.""" ENTERED = 1 EXITED = 2 RETURNED = 3 @@ -153,7 +153,7 @@ def __str__(self): return "[{}] DWT: Exception #{:d} {} {}".format(self.timestamp, self.exception_number, action, self.exception_name) class TracePeriodicPC(TraceEvent): - """! @brief Periodic PC trace event.""" + """@brief Periodic PC trace event.""" def __init__(self, pc, ts=0): super(TracePeriodicPC, self).__init__("pc", ts) self._pc = pc @@ -166,7 +166,7 @@ def __str__(self): return "[{}] DWT: PC={:#010x}".format(self.timestamp, self.pc) class TraceDataTraceEvent(TraceEvent): - """! @brief DWT data trace event. + """@brief DWT data trace event. Valid combinations: - PC value. diff --git a/pyocd/trace/sink.py b/pyocd/trace/sink.py index 8047c1db6..e8bd49730 100644 --- a/pyocd/trace/sink.py +++ b/pyocd/trace/sink.py @@ -18,26 +18,26 @@ import collections.abc class TraceEventSink(object): - """! @brief Abstract interface for a trace event sink.""" + """@brief Abstract interface for a trace event sink.""" def receive(self, event): - """! @brief Handle a single trace event. + """@brief Handle a single trace event. @param self @param event An instance of TraceEvent or one of its subclasses. """ raise NotImplementedError() class TraceEventFilter(TraceEventSink): - """! @brief Abstract interface for a trace event filter.""" + """@brief Abstract interface for a trace event filter.""" def __init__(self, sink=None): self._sink = sink def connect(self, sink): - """! @brief Connect the downstream trace sink or filter.""" + """@brief Connect the downstream trace sink or filter.""" self._sink = sink def receive(self, event): - """! @brief Handle a single trace event. + """@brief Handle a single trace event. Passes the event through the filter() method. If one or more objects are returned, they are then passed to the trace sink connected to this filter (which may be another filter). @@ -54,7 +54,7 @@ def receive(self, event): self._sink.receive(event) def filter(self, event): - """! @brief Filter a single trace event. + """@brief Filter a single trace event. @param self @param event An instance of TraceEvent or one of its subclasses. @@ -63,13 +63,13 @@ def filter(self, event): raise NotImplementedError() class TraceEventTee(TraceEventSink): - """! @brief Trace event sink that replicates events to multiple sinks.""" + """@brief Trace event sink that replicates events to multiple sinks.""" def __init__(self): self._sinks = [] def connect(self, sinks): - """! @brief Connect one or more downstream trace sinks. + """@brief Connect one or more downstream trace sinks. @param self @param sinks If this parameter is a single object, it will be added to the list of @@ -82,7 +82,7 @@ def connect(self, sinks): self._sinks.append(sinks) def receive(self, event): - """! @brief Replicate a single trace event to all connected downstream trace event sinks. + """@brief Replicate a single trace event to all connected downstream trace event sinks. @param self @param event An instance of TraceEvent or one of its subclasses. diff --git a/pyocd/trace/swo.py b/pyocd/trace/swo.py index 5939ab020..769fb706a 100644 --- a/pyocd/trace/swo.py +++ b/pyocd/trace/swo.py @@ -18,7 +18,7 @@ from . import events class SWOParser(object): - """! @brief SWO data stream parser. + """@brief SWO data stream parser. Processes a stream of SWO data and generates TraceEvent objects. SWO data is passed to the parse() method. It processes the data and creates TraceEvent objects which are passed to an @@ -45,16 +45,16 @@ def reset(self): next(self._parser) def connect(self, sink): - """! @brief Connect the downstream trace sink or filter.""" + """@brief Connect the downstream trace sink or filter.""" self._sink = sink @property def bytes_parsed(self): - """! @brief The number of bytes of SWO data parsed thus far.""" + """@brief The number of bytes of SWO data parsed thus far.""" return self._bytes_parsed def parse(self, data): - """! @brief Process SWO data. + """@brief Process SWO data. This method will return once the provided data is consumed, and can be called again when more data is available. There is no minimum or maximum limit on the size of the provided @@ -69,14 +69,14 @@ def parse(self, data): self._bytes_parsed += 1 def _flush_events(self): - """! @brief Send all pending events to event sink.""" + """@brief Send all pending events to event sink.""" if self._sink is not None: for event in self._pending_events: self._sink.receive(event) self._pending_events = [] def _merge_data_trace_events(self, event): - """! @brief Look for pairs of data trace events and merge.""" + """@brief Look for pairs of data trace events and merge.""" if isinstance(event, events.TraceDataTraceEvent): # Record the first data trace event. if self._pending_data_trace is None: @@ -107,7 +107,7 @@ def _merge_data_trace_events(self, event): return False def _send_event(self, event): - """! @brief Process event objects and decide when to send to event sink. + """@brief Process event objects and decide when to send to event sink. This method handles the logic to associate a timestamp event with the prior other event. A list of pending events is built up until either a timestamp or overflow event @@ -133,7 +133,7 @@ def _send_event(self, event): self._flush_events() def _parse(self): - """! @brief SWO parser as generator function coroutine. + """@brief SWO parser as generator function coroutine. The generator yields every time it needs a byte of SWO data. The caller must use the generator's send() method to provide the next byte. diff --git a/pyocd/trace/swv.py b/pyocd/trace/swv.py index 0b3e89093..a013ea55c 100644 --- a/pyocd/trace/swv.py +++ b/pyocd/trace/swv.py @@ -33,17 +33,17 @@ LOG = logging.getLogger(__name__) class SWVEventSink(TraceEventSink): - """! @brief Trace event sink that converts ITM packets to a text stream.""" + """@brief Trace event sink that converts ITM packets to a text stream.""" def __init__(self, console): - """! @brief Constructor. + """@brief Constructor. @param self @param console File-like object to which SWV data will be written. """ self._console = console def receive(self, event): - """! @brief Handle an SWV trace event. + """@brief Handle an SWV trace event. @param self @param event An instance of TraceITMEvent. If the event is not this class, or isn't for ITM port 0, then it will be ignored. The individual bytes of 16- or 32-bit events @@ -66,10 +66,10 @@ def receive(self, event): self._console.write(data) class SWVReader(threading.Thread): - """! @brief Sets up SWV and processes data in a background thread.""" + """@brief Sets up SWV and processes data in a background thread.""" def __init__(self, session, core_number=0, lock=None): - """! @brief Constructor. + """@brief Constructor. @param self @param session The Session instance. @param core_number The number of the core being traced. Default is core 0. @@ -86,7 +86,7 @@ def __init__(self, session, core_number=0, lock=None): self._session.subscribe(self._reset_handler, Target.Event.POST_RESET, self._session.target.cores[core_number]) def init(self, sys_clock, swo_clock, console): - """! @brief Configures trace graph and starts thread. + """@brief Configures trace graph and starts thread. This method performs all steps required to start up SWV. It first calls the target's trace_start() method, which allows for target-specific trace initialization. Then it @@ -128,7 +128,7 @@ def init(self, sys_clock, swo_clock, console): self.start() def stop(self): - """! @brief Stops processing SWV data. + """@brief Stops processing SWV data. The reader thread is terminated first, then the ITM is disabled. The last step is to call the target's trace_stop() method. @@ -147,7 +147,7 @@ def stop(self): self._session.target.trace_stop() def run(self): - """! @brief SWV reader thread routine. + """@brief SWV reader thread routine. Starts the probe receiving SWO data by calling DebugProbe.swo_start(). For as long as the thread runs, it reads SWO data from the probe and passes it to the SWO parser created in @@ -195,7 +195,7 @@ def run(self): self._lock.release() def _reset_handler(self, notification): - """! @brief Reset notification handler. + """@brief Reset notification handler. If the target is reset while the SWV reader is running, then the Target::trace_start() method is called to reinit trace output. diff --git a/pyocd/utility/autoflush.py b/pyocd/utility/autoflush.py index 0f38b86f4..87d176488 100644 --- a/pyocd/utility/autoflush.py +++ b/pyocd/utility/autoflush.py @@ -24,7 +24,7 @@ from types import TracebackType class Autoflush: - """! @brief Context manager for performing flushes. + """@brief Context manager for performing flushes. Pass a Target instance to the constructor, and when the context exits, the target will be automatically flushed. If a TransferError or subclass, such as TransferFaultError, is raised @@ -35,7 +35,7 @@ class Autoflush: """ def __init__(self, target: "Target") -> None: - """! @brief Constructor. + """@brief Constructor. @param self The object. @param target Object on which the flush will be performed. Normally this is a Target diff --git a/pyocd/utility/cmdline.py b/pyocd/utility/cmdline.py index 2518ca7d0..1f0bb8ea9 100644 --- a/pyocd/utility/cmdline.py +++ b/pyocd/utility/cmdline.py @@ -25,7 +25,7 @@ LOG = logging.getLogger(__name__) def split_command(cmd: str) -> List[str]: - """! @brief Split command by whitespace, supporting quoted strings.""" + """@brief Split command by whitespace, supporting quoted strings.""" result: List[str] = [] state = 0 word = '' @@ -82,7 +82,7 @@ def split_command(cmd: str) -> List[str]: return result def split_command_line(cmd_line: Union[str, List[str]]) -> List[str]: - """! @brief Split command line by whitespace, supporting quoted strings.""" + """@brief Split command line by whitespace, supporting quoted strings.""" result: List[str] = [] if isinstance(cmd_line, str): args = [cmd_line] @@ -108,7 +108,7 @@ def split_command_line(cmd_line: Union[str, List[str]]) -> List[str]: } def convert_vector_catch(vcvalue: Union[str, bytes]) -> int: - """! @brief Convert a vector catch string to a mask. + """@brief Convert a vector catch string to a mask. @exception ValueError Raised if an invalid vector catch character is encountered. """ @@ -129,7 +129,7 @@ def convert_vector_catch(vcvalue: Union[str, bytes]) -> int: raise ValueError("invalid vector catch option '{}'".format(e.args[0])) def convert_session_options(option_list: Iterable[str]) -> Dict[str, Any]: - """! @brief Convert a list of session option settings to a dictionary.""" + """@brief Convert a list of session option settings to a dictionary.""" options = {} if option_list is not None: for o in option_list: @@ -201,7 +201,7 @@ def convert_session_options(option_list: Iterable[str]) -> Dict[str, Any]: } def convert_reset_type(value: str) -> Optional[Target.ResetType]: - """! @brief Convert a reset_type session option value to the Target.ResetType enum. + """@brief Convert a reset_type session option value to the Target.ResetType enum. @param value The value of the reset_type session option. @exception ValueError Raised if an unknown reset_type value is passed. """ @@ -211,7 +211,7 @@ def convert_reset_type(value: str) -> Optional[Target.ResetType]: return RESET_TYPE_MAP[value] def convert_frequency(value: str) -> int: - """! @brief Applies scale suffix to frequency value string. + """@brief Applies scale suffix to frequency value string. @param value String with a float and possible 'k' or 'm' suffix (case-insensitive). "Hz" may also follow. No space is allowed between the float and suffix. Leading and trailing whitespace is allowed. @@ -233,11 +233,11 @@ def convert_frequency(value: str) -> int: def int_base_0(x: str) -> int: - """! @brief Converts a string to an int with support for base prefixes.""" + """@brief Converts a string to an int with support for base prefixes.""" return int(x, base=0) def flatten_args(args: Iterable[Iterable[Any]]) -> List[Any]: - """! @brief Converts a list of lists to a single list.""" + """@brief Converts a list of lists to a single list.""" return [item for sublist in args for item in sublist] diff --git a/pyocd/utility/columns.py b/pyocd/utility/columns.py index 8336976f4..136877754 100644 --- a/pyocd/utility/columns.py +++ b/pyocd/utility/columns.py @@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__) class ColumnFormatter: - """! @brief Formats a set of values in multiple columns. + """@brief Formats a set of values in multiple columns. The value_list must be a list of bi-tuples (name, value) sorted in the desired display order. @@ -32,7 +32,7 @@ class ColumnFormatter: """ def __init__(self, maxwidth: Optional[int] = None, inset: int = 2) -> None: - """! @brief Constructor. + """@brief Constructor. @param self The object. @param maxwidth Number of characters to which the output width must be constrained. If not provided, then the width of the stdout terminal is used. If getting the terminal width fails, for instance @@ -46,7 +46,7 @@ def __init__(self, maxwidth: Optional[int] = None, inset: int = 2) -> None: self._max_value_width = 0 def add_items(self, item_list: Iterable[Tuple[str, str]]) -> None: - """! @brief Add items to the output. + """@brief Add items to the output. @param self The object. @param item_list Must be a list of bi-tuples (name, value) sorted in the desired display order. """ @@ -58,7 +58,7 @@ def add_items(self, item_list: Iterable[Tuple[str, str]]) -> None: self._max_value_width = max(self._max_value_width, len(value)) def format(self) -> str: - """! @brief Return the formatted columns as a string. + """@brief Return the formatted columns as a string. @param self The object. @return String containing the output of the column printer. """ @@ -81,7 +81,7 @@ def format(self) -> str: return txt def write(self, output_file: IO[str] = None) -> None: - """! @brief Write the formatted columns to stdout or the specified file. + """@brief Write the formatted columns to stdout or the specified file. @param self The object. @param output_file Optional file to which the column printer output will be written. If no specified, then sys.stdout is used. diff --git a/pyocd/utility/concurrency.py b/pyocd/utility/concurrency.py index 30f6fd57e..c12caa072 100644 --- a/pyocd/utility/concurrency.py +++ b/pyocd/utility/concurrency.py @@ -18,7 +18,7 @@ from typing import (Any, Callable) def locked(func: Callable) -> Callable: - """! @brief Decorator to automatically lock a method of a class. + """@brief Decorator to automatically lock a method of a class. The class is required to have `lock()` and `unlock()` methods. """ diff --git a/pyocd/utility/conversion.py b/pyocd/utility/conversion.py index 92efa99a4..636bd7022 100644 --- a/pyocd/utility/conversion.py +++ b/pyocd/utility/conversion.py @@ -24,7 +24,7 @@ ByteList = Sequence[int] def byte_list_to_nbit_le_list(data: ByteList, bitwidth: int, pad: int = 0x00) -> Sequence[int]: - """! @brief Convert a list of bytes to a list of n-bit integers (little endian) + """@brief Convert a list of bytes to a list of n-bit integers (little endian) If the length of the data list is not a multiple of `bitwidth` // 8, then the pad value is used for the additional required bytes. @@ -47,7 +47,7 @@ def byte_list_to_nbit_le_list(data: ByteList, bitwidth: int, pad: int = 0x00) -> return res def nbit_le_list_to_byte_list(data: Sequence[int], bitwidth: int) -> ByteList: - """! @brief Convert a list of n-bit values into a byte list. + """@brief Convert a list of n-bit values into a byte list. @param data List of n-bit values. @param bitwidth Width in bits of the input vales. @@ -56,7 +56,7 @@ def nbit_le_list_to_byte_list(data: Sequence[int], bitwidth: int) -> ByteList: return [(x >> shift) & 0xff for x in data for shift in range(0, bitwidth, 8)] def byte_list_to_u32le_list(data: ByteList, pad: int = 0x00) -> Sequence[int]: - """! @brief Convert a list of bytes to a list of 32-bit integers (little endian) + """@brief Convert a list of bytes to a list of 32-bit integers (little endian) If the length of the data list is not a multiple of 4, then the pad value is used for the additional required bytes. @@ -74,7 +74,7 @@ def byte_list_to_u32le_list(data: ByteList, pad: int = 0x00) -> Sequence[int]: return res def u32le_list_to_byte_list(data: Sequence[int]) -> ByteList: - """! @brief Convert a word array into a byte array""" + """@brief Convert a word array into a byte array""" res = [] for x in data: res.append((x >> 0) & 0xff) @@ -84,41 +84,41 @@ def u32le_list_to_byte_list(data: Sequence[int]) -> ByteList: return res def u16le_list_to_byte_list(data: Sequence[int]) -> ByteList: - """! @brief Convert a halfword array into a byte array""" + """@brief Convert a halfword array into a byte array""" byteData = [] for h in data: byteData.extend([h & 0xff, (h >> 8) & 0xff]) return byteData def byte_list_to_u16le_list(byteData: ByteList) -> Sequence[int]: - """! @brief Convert a byte array into a halfword array""" + """@brief Convert a byte array into a halfword array""" data = [] for i in range(0, len(byteData), 2): data.append(byteData[i] | (byteData[i + 1] << 8)) return data def u32_to_float32(data: int) -> float: - """! @brief Convert a 32-bit int to an IEEE754 float""" + """@brief Convert a 32-bit int to an IEEE754 float""" d = struct.pack(">I", data & 0xffff_ffff) return struct.unpack(">f", d)[0] def float32_to_u32(data: float) -> int: - """! @brief Convert an IEEE754 float to a 32-bit int""" + """@brief Convert an IEEE754 float to a 32-bit int""" d = struct.pack(">f", data) return struct.unpack(">I", d)[0] def u64_to_float64(data: int) -> float: - """! @brief Convert a 64-bit int to an IEEE754 float""" + """@brief Convert a 64-bit int to an IEEE754 float""" d = struct.pack(">Q", data & 0xffff_ffff_ffff_ffff) return struct.unpack(">d", d)[0] def float64_to_u64(data: float) -> int: - """! @brief Convert an IEEE754 float to a 64-bit int""" + """@brief Convert an IEEE754 float to a 64-bit int""" d = struct.pack(">d", data) return struct.unpack(">Q", d)[0] def uint_to_hex_le(value: int, width: int) -> str: - """! @brief Create an n-digit hexadecimal string from an integer value. + """@brief Create an n-digit hexadecimal string from an integer value. @param value Integer value to format. @param width The width in bits. @return A string with the number of hex bytes required to fit `width` bits, rounded up to the @@ -128,7 +128,7 @@ def uint_to_hex_le(value: int, width: int) -> str: return ''.join("%02x" % ((value >> b) & 0xff) for b in range(0, align_up(width, 8), 8)) def hex_le_to_uint(value: str, width: int) -> int: - """! @brief Create an an integer value from an n-digit hexadecimal string. + """@brief Create an an integer value from an n-digit hexadecimal string. @param value String consisting of pairs of hex digits with no intervening whitespace. Must have at least enough hex bytes to meet the desired width. The first hex byte is the LSB. @param width The width in bits. The width can be shorter then the input `value` width, in which case @@ -138,55 +138,55 @@ def hex_le_to_uint(value: str, width: int) -> int: return sum((int(value[i:i+2], base=16) << (i * 4)) for i in range(0, align_up(width, 8) // 4, 2)) def u32_to_hex8le(val: int) -> str: - """! @brief Create 8-digit hexadecimal string from 32-bit register value""" + """@brief Create 8-digit hexadecimal string from 32-bit register value""" return uint_to_hex_le(val, 32) def u64_to_hex16le(val: int) -> str: - """! @brief Create 16-digit hexadecimal string from 64-bit register value""" + """@brief Create 16-digit hexadecimal string from 64-bit register value""" return uint_to_hex_le(val, 64) def hex8_to_u32be(data: str) -> int: - """! @brief Build 32-bit register value from big-endian 8-digit hexadecimal string + """@brief Build 32-bit register value from big-endian 8-digit hexadecimal string @note Endianness in this function name is backwards. """ return hex_le_to_uint(data, 32) def hex16_to_u64be(data: str) -> int: - """! @brief Build 64-bit register value from big-endian 16-digit hexadecimal string + """@brief Build 64-bit register value from big-endian 16-digit hexadecimal string @note Endianness in this function name is backwards. """ return hex_le_to_uint(data, 64) def hex8_to_u32le(data: str) -> int: - """! @brief Build 32-bit register value from little-endian 8-digit hexadecimal string + """@brief Build 32-bit register value from little-endian 8-digit hexadecimal string @note Endianness in this function name is backwards. """ return int(data[0:8], 16) def hex16_to_u64le(data: str) -> int: - """! @brief Build 64-bit register value from little-endian 16-digit hexadecimal string + """@brief Build 64-bit register value from little-endian 16-digit hexadecimal string @note Endianness in this function name is backwards. """ return int(data[0:16], 16) def byte_to_hex2(val: int) -> str: - """! @brief Create 2-digit hexadecimal string from 8-bit value""" + """@brief Create 2-digit hexadecimal string from 8-bit value""" return "%02x" % int(val) def hex_to_byte_list(data: str) -> ByteList: - """! @brief Convert string of hex bytes to list of integers""" + """@brief Convert string of hex bytes to list of integers""" return list(binascii.unhexlify(data)) def hex_decode(cmd: str) -> bytes: - """! @brief Return the binary data represented by the hexadecimal string.""" + """@brief Return the binary data represented by the hexadecimal string.""" return binascii.unhexlify(cmd) def hex_encode(string: bytes) -> bytes: - """! @brief Return the hexadecimal representation of the binary data.""" + """@brief Return the hexadecimal representation of the binary data.""" return binascii.hexlify(string) def pairwise(iterable: Iterator[Any]) -> Iterator[Tuple[Any, Any]]: - """! s -> (s0,s1), (s2,s3), (s3, s4), ...""" + """s -> (s0,s1), (s2,s3), (s3, s4), ...""" r = [] for x in iterable: r.append(x) diff --git a/pyocd/utility/graph.py b/pyocd/utility/graph.py index a004b91a1..9cf01dbaa 100644 --- a/pyocd/utility/graph.py +++ b/pyocd/utility/graph.py @@ -18,7 +18,7 @@ from typing import (Callable, List, Optional, Sequence, Type, Union) class GraphNode: - """! @brief Simple graph node. + """@brief Simple graph node. All nodes have a parent, which is None for a root node, and zero or more children. @@ -26,33 +26,33 @@ class GraphNode: """ def __init__(self) -> None: - """! @brief Constructor.""" + """@brief Constructor.""" super().__init__() self._parent: Optional[GraphNode] = None self._children: List[GraphNode] = [] @property def parent(self) -> Optional["GraphNode"]: - """! @brief This node's parent in the object graph.""" + """@brief This node's parent in the object graph.""" return self._parent @property def children(self) -> Sequence["GraphNode"]: - """! @brief Child nodes in the object graph.""" + """@brief Child nodes in the object graph.""" return self._children @property def is_leaf(self) -> bool: - """! @brief Returns true if the node has no children.""" + """@brief Returns true if the node has no children.""" return len(self.children) == 0 def add_child(self, node: "GraphNode") -> None: - """! @brief Link a child node onto this object.""" + """@brief Link a child node onto this object.""" node._parent = self self._children.append(node) def find_root(self) -> "GraphNode": - """! @brief Returns the root node of the object graph.""" + """@brief Returns the root node of the object graph.""" root = self while root.parent is not None: root = root.parent @@ -62,7 +62,7 @@ def find_children(self, predicate: Callable[["GraphNode"], bool], breadth_first: bool = True ) -> Sequence["GraphNode"]: - """! @brief Recursively search for children that match a given predicate. + """@brief Recursively search for children that match a given predicate. @param self @param predicate A callable accepting a single argument for the node to examine. If the predicate returns True, then that node is added to the result list and no further @@ -90,7 +90,7 @@ def _search(node: GraphNode): return _search(self) def get_first_child_of_type(self, klass: Type) -> Optional["GraphNode"]: - """! @brief Breadth-first search for a child of the given class. + """@brief Breadth-first search for a child of the given class. @param self @param klass The class type to search for. The first child at any depth that is an instance of this class or a subclass thereof will be returned. Matching children at more shallow @@ -104,22 +104,22 @@ def get_first_child_of_type(self, klass: Type) -> Optional["GraphNode"]: return None def __getitem__(self, key: Union[int, slice]) -> Union["GraphNode", List["GraphNode"]]: - """! @brief Returns the indexed child. + """@brief Returns the indexed child. Slicing is supported. """ return self._children[key] def __iter__(self): - """! @brief Iterate over the node's children.""" + """@brief Iterate over the node's children.""" return iter(self.children) def _dump_desc(self) -> str: - """! @brief Similar to __repr__ by used for dump_to_str().""" + """@brief Similar to __repr__ by used for dump_to_str().""" return str(self) def dump_to_str(self) -> str: - """! @brief Returns a string describing the object graph.""" + """@brief Returns a string describing the object graph.""" def _dump(node, level): result = (" " * level) + "- " + node._dump_desc() + "\n" @@ -130,5 +130,5 @@ def _dump(node, level): return _dump(self, 0) def dump(self) -> None: - """! @brief Pretty print the object graph to stdout.""" + """@brief Pretty print the object graph to stdout.""" print(self.dump_to_str()) diff --git a/pyocd/utility/hex.py b/pyocd/utility/hex.py index a0b09d004..f1b7276f3 100644 --- a/pyocd/utility/hex.py +++ b/pyocd/utility/hex.py @@ -26,7 +26,7 @@ _PRINTABLE = string.digits + string.ascii_letters + string.punctuation + ' ' def format_hex_width(value: int, width: int) -> str: - """! @brief Formats the value as hex of the specified bit width. + """@brief Formats the value as hex of the specified bit width. @param value Integer value to be formatted. @param width Bit width, must be one of 8, 16, 32, 64. @@ -50,7 +50,7 @@ def dump_hex_data( output: Optional[IO[str]] = None, print_ascii: bool = True ) -> None: - """! @brief Prints a canonical hex dump of the given data. + """@brief Prints a canonical hex dump of the given data. Each line of the output consists of an address column, the data as hex, and a printable ASCII representation of the data. @@ -138,7 +138,7 @@ def line_width_in_chars(elements: int) -> int: output.write("\n") def dump_hex_data_to_str(data, **kwargs): - """! @brief Returns a string with data formatted as hex. + """@brief Returns a string with data formatted as hex. @see dump_hex_data() """ sio = io.StringIO() diff --git a/pyocd/utility/mask.py b/pyocd/utility/mask.py index 97dccc76a..db23905db 100644 --- a/pyocd/utility/mask.py +++ b/pyocd/utility/mask.py @@ -19,7 +19,7 @@ from typing import (Any, Optional, Sequence, Tuple, Union) def bitmask(*args: Union[int, Sequence[int], Tuple[int, int]]) -> int: - """! @brief Returns a mask with specified bit ranges set. + """@brief Returns a mask with specified bit ranges set. An integer mask is generated based on the bits and bit ranges specified by the arguments. Any number of arguments can be provided. Each argument may be either @@ -56,7 +56,7 @@ def bitmask(*args: Union[int, Sequence[int], Tuple[int, int]]) -> int: return mask def bit_invert(value: int, width: int = 32) -> int: - """! @brief Return the bitwise inverted value of the argument given a specified width. + """@brief Return the bitwise inverted value of the argument given a specified width. @param value Integer value to be inverted. @param width Bit width of both the input and output. If not supplied, this defaults to 32. @@ -65,27 +65,27 @@ def bit_invert(value: int, width: int = 32) -> int: return ((1 << width) - 1) & (~value) invert32 = bit_invert -"""! @brief Return the 32-bit inverted value of the argument.""" +"""@brief Return the 32-bit inverted value of the argument.""" def bfx(value: int, msb: int, lsb: int) -> int: - """! @brief Extract a value from a bitfield.""" + """@brief Extract a value from a bitfield.""" mask = bitmask((msb, lsb)) return (value & mask) >> lsb def bfxw(value: int, lsb: int, width: int) -> int: - """! @brief Extract a value from a bitfield given the LSb and width.""" + """@brief Extract a value from a bitfield given the LSb and width.""" mask = bitmask((lsb + width, lsb)) return (value & mask) >> lsb def bfi(value: int, msb: int, lsb: int, field: int) -> int: - """! @brief Change a bitfield value.""" + """@brief Change a bitfield value.""" mask = bitmask((msb, lsb)) value &= ~mask value |= (field << lsb) & mask return value class Bitfield: - """! @brief Represents a bitfield of a register.""" + """@brief Represents a bitfield of a register.""" def __init__(self, msb: int, lsb: Optional[int] = None, name: Optional[str] = None): self._msb = msb @@ -98,7 +98,7 @@ def width(self) -> int: return self._msb - self._lsb + 1 def get(self, value: int) -> int: - """! @brief Extract the bitfield value from a register value. + """@brief Extract the bitfield value from a register value. @param self The Bitfield object. @param value Integer register value. @return Integer value of the bitfield extracted from `value`. @@ -106,7 +106,7 @@ def get(self, value: int) -> int: return bfx(value, self._msb, self._lsb) def set(self, register_value: int, field_value: int) -> int: - """! @brief Modified the bitfield in a register value. + """@brief Modified the bitfield in a register value. @param self The Bitfield object. @param register_value Integer register value. @param field_value New value for the bitfield. Must not be shifted into place already. @@ -118,7 +118,7 @@ def __repr__(self) -> str: return "<{}@{:x} name={} {}:{}>".format(self.__class__.__name__, id(self), self._name, self._msb, self._lsb) def msb(n: int) -> int: - """! @brief Return the bit number of the highest set bit.""" + """@brief Return the bit number of the highest set bit.""" ndx = 0 while ( 1 < n ): n = ( n >> 1 ) @@ -126,7 +126,7 @@ def msb(n: int) -> int: return ndx def same(d1: Sequence[Any], d2: Sequence[Any]) -> bool: - """! @brief Test whether two sequences contain the same values. + """@brief Test whether two sequences contain the same values. Unlike a simple equality comparison, this function works as expected when the two sequences are of different types, such as a list and bytearray. The sequences must return @@ -140,19 +140,19 @@ def same(d1: Sequence[Any], d2: Sequence[Any]) -> bool: return True def align_down(value: int, multiple: int) -> int: - """! @brief Return value aligned down to multiple.""" + """@brief Return value aligned down to multiple.""" return value // multiple * multiple def align_up(value: int, multiple: int) -> int: - """! @brief Return value aligned up to multiple.""" + """@brief Return value aligned up to multiple.""" return (value + multiple - 1) // multiple * multiple def round_up_div(value: int, divisor: int) -> int: - """! @brief Return value divided by the divisor, rounding up to the nearest multiple of the divisor.""" + """@brief Return value divided by the divisor, rounding up to the nearest multiple of the divisor.""" return (value + divisor - 1) // divisor def parity32_high(n: int) -> int: - """! @brief Compute parity over a 32-bit value. + """@brief Compute parity over a 32-bit value. This function is intended to be used for computing parity over a 32-bit value transferred in an Arm ADI AP/DP register transfer. The result is returned in bit 32, ready to be OR'd into the register diff --git a/pyocd/utility/notification.py b/pyocd/utility/notification.py index 7c0a2c349..672439e0f 100644 --- a/pyocd/utility/notification.py +++ b/pyocd/utility/notification.py @@ -22,7 +22,7 @@ TRACE.setLevel(logging.CRITICAL) class Notification(object): - """!@brief Holds information about a notification to subscribers.""" + """@brief Holds information about a notification to subscribers.""" def __init__(self, event, source, data=None): self._event = event @@ -45,7 +45,7 @@ def __repr__(self): return "" % (id(self), repr(self.event), repr(self.source), repr(self.data)) class Notifier(object): - """!@brief Mix-in class that provides notification broadcast capabilities. + """@brief Mix-in class that provides notification broadcast capabilities. In this notification model, subscribers register callbacks for one or more events. The events are simply a Python object of any kind, as long as it is hashable. Typically integers or Enums @@ -74,7 +74,7 @@ def __init__(self): self._subscribers = {} def subscribe(self, cb, events, source=None): - """!@brief Subscribe to selection of events from an optional source. + """@brief Subscribe to selection of events from an optional source. @param self @param cb The callable that will be invoked when a matching notification is sent. Must @@ -101,7 +101,7 @@ def subscribe(self, cb, events, source=None): event_info[1][source].append(cb) def unsubscribe(self, cb, events=None): - """!@brief Remove a callback from the subscribers list. + """@brief Remove a callback from the subscribers list. @param self @param cb The callback to remove from all subscriptions. @@ -126,7 +126,7 @@ def unsubscribe(self, cb, events=None): source_info.remove(cb) def notify(self, event, source=None, data=None): - """!@brief Notify subscribers of an event. + """@brief Notify subscribers of an event. @param self @param event Event to send. Must be a hashable object. It is acceptable to notify for an diff --git a/pyocd/utility/progress.py b/pyocd/utility/progress.py index 4144f3519..ff03f7430 100644 --- a/pyocd/utility/progress.py +++ b/pyocd/utility/progress.py @@ -21,8 +21,7 @@ LOG = logging.getLogger(__name__) class ProgressReport(object): - """! - @brief Base progress report class. + """@brief Base progress report class. This base class implements the logic but no output. """ @@ -74,8 +73,7 @@ def _finish(self): raise NotImplementedError() class ProgressReportTTY(ProgressReport): - """! - @brief Progress report subclass for TTYs. + """@brief Progress report subclass for TTYs. The progress bar is fully redrawn onscreen as progress is updated to give the impression of animation. @@ -94,8 +92,7 @@ def _finish(self): self._file.write("\n") class ProgressReportNoTTY(ProgressReport): - """! - @brief Progress report subclass for non-TTY output. + """@brief Progress report subclass for non-TTY output. A simpler progress bar is used than for the TTY version. Only the difference between the previous and current progress is drawn for each update, making the output suitable @@ -123,8 +120,7 @@ def _finish(self): self._file.flush() def print_progress(file=None): - """! - @brief Progress printer factory. + """@brief Progress printer factory. This factory function checks whether the output file is a TTY, and instantiates the appropriate subclass of ProgressReport. diff --git a/pyocd/utility/sequencer.py b/pyocd/utility/sequencer.py index 499983a1d..66e6d3f0a 100644 --- a/pyocd/utility/sequencer.py +++ b/pyocd/utility/sequencer.py @@ -21,7 +21,7 @@ LOG = logging.getLogger(__name__) class CallSequence(object): - """! @brief Call sequence manager. + """@brief Call sequence manager. Contains an ordered sequence of tasks. Each task has a name and associated callable. The CallSequence class itself is callable, so instances can be nested @@ -35,7 +35,7 @@ class CallSequence(object): """ def __init__(self, *args): - """! @brief Constructor. + """@brief Constructor. The constructor accepts an arbitrary number of parameters describing an ordered set of tasks. Each parameter must be a 2-tuple with the first element being the @@ -53,7 +53,7 @@ def _validate_tasks(self, tasks): @property def sequence(self): - """! @brief Returns an OrderedDict of the call sequence. + """@brief Returns an OrderedDict of the call sequence. Task names are keys. """ @@ -61,7 +61,7 @@ def sequence(self): @sequence.setter def sequence(self, seq): - """! @brief Replace the entire call sequence. + """@brief Replace the entire call sequence. Accepts either an OrderedDict or a list of 2-tuples like the constructor. """ @@ -72,38 +72,38 @@ def sequence(self, seq): @property def count(self): - """! @brief Returns the number of tasks in the sequence.""" + """@brief Returns the number of tasks in the sequence.""" return len(self._calls) def clear(self): - """! @brief Remove all tasks from the sequence.""" + """@brief Remove all tasks from the sequence.""" self._calls = OrderedDict() def copy(self): - """! @brief Duplicate the sequence.""" + """@brief Duplicate the sequence.""" new_seq = CallSequence() new_seq._calls = self._calls.copy() return new_seq def remove_task(self, name): - """! @brief Remove a task with the given name. + """@brief Remove a task with the given name. @exception KeyError Raised if no task with the specified name exists. """ del self._calls[name] return self def has_task(self, name): - """! @brief Returns a boolean indicating presence of the named task in the sequence.""" + """@brief Returns a boolean indicating presence of the named task in the sequence.""" return name in self._calls def get_task(self, name): - """! @brief Return the callable for the named task. + """@brief Return the callable for the named task. @exception KeyError Raised if no task with the specified name exists. """ return self._calls[name] def replace_task(self, name, replacement): - """! @brief Change the callable associated with a task.""" + """@brief Change the callable associated with a task.""" assert isinstance(replacement, Callable) if name not in self._calls: raise KeyError(name) @@ -114,7 +114,7 @@ def replace_task(self, name, replacement): return self def wrap_task(self, name, wrapper): - """! @brief Wrap an existing task with a new callable. + """@brief Wrap an existing task with a new callable. The wrapper is expected to take a single parameter, the return value from the original task. This allows for easy filtering of a new call sequence returned by @@ -132,7 +132,7 @@ def wrap_task(self, name, wrapper): return self def append(self, *args): - """! @brief Append a new task or tasks to the sequence. + """@brief Append a new task or tasks to the sequence. Like the constructor, this method takes any number of arguments. Each must be a 2-tuple task description. @@ -144,7 +144,7 @@ def append(self, *args): return self def insert_before(self, beforeTaskName, *args): - """! @brief Insert a task or tasks before a named task. + """@brief Insert a task or tasks before a named task. @param beforeTaskName The name of an existing task. The new tasks will be inserted prior to this task. @@ -171,7 +171,7 @@ def insert_before(self, beforeTaskName, *args): return self def insert_after(self, afterTaskName, *args): - """! @brief Insert a task or tasks after a named task. + """@brief Insert a task or tasks after a named task. @param afterTaskName The name of an existing task. The new tasks will be inserted after this task. @@ -198,7 +198,7 @@ def insert_after(self, afterTaskName, *args): return self def invoke(self): - """! @brief Execute each task in order. + """@brief Execute each task in order. A task may return a CallSequence, in which case the new sequence is immediately executed. @@ -213,14 +213,14 @@ def invoke(self): resultSequence.invoke() def __call__(self, *args, **kwargs): - """! @brief Another way to execute the tasks. + """@brief Another way to execute the tasks. Supports nested CallSequences. """ self.invoke() def __iter__(self): - """! @brief Iterate over the sequence.""" + """@brief Iterate over the sequence.""" return iter(self._calls.items()) def __repr__(self): diff --git a/pyocd/utility/server.py b/pyocd/utility/server.py index fc34a6663..301e173ed 100644 --- a/pyocd/utility/server.py +++ b/pyocd/utility/server.py @@ -25,7 +25,7 @@ LOG = logging.getLogger(__name__) class StreamServer(threading.Thread): - """! @brief File-like object that serves data over a TCP socket. + """@brief File-like object that serves data over a TCP socket. The user can connect to the socket with telnet or netcat. @@ -34,7 +34,7 @@ class StreamServer(threading.Thread): """ def __init__(self, port, serve_local_only=True, name=None, is_read_only=True, extra_info=None): - """! @brief Constructor. + """@brief Constructor. Starts the server immediately. @@ -126,7 +126,7 @@ def run(self): LOG.info("%sserver stopped", self._formatted_name) def write(self, data): - """! @brief Write bytes into the connection.""" + """@brief Write bytes into the connection.""" # If nobody is connected, act like all data was written anyway. if self.connected is None: return 0 @@ -141,7 +141,7 @@ def write(self, data): return size def _get_input(self, length=-1): - """! @brief Extract requested amount of data from the read buffer.""" + """@brief Extract requested amount of data from the read buffer.""" self._buffer_lock.acquire() try: if length == -1: @@ -158,7 +158,7 @@ def _get_input(self, length=-1): self._buffer_lock.release() def read(self, size=-1): - """! @brief Return bytes read from the connection.""" + """@brief Return bytes read from the connection.""" if self.connected is None: return None @@ -168,7 +168,7 @@ def read(self, size=-1): return data def readinto(self, b): - """! @brief Read bytes into a mutable buffer.""" + """@brief Read bytes into a mutable buffer.""" if self.connected is None: return None diff --git a/pyocd/utility/sockets.py b/pyocd/utility/sockets.py index 79f79f9c4..61d365da5 100644 --- a/pyocd/utility/sockets.py +++ b/pyocd/utility/sockets.py @@ -78,7 +78,7 @@ def set_timeout(self, timeout): self.conn.settimeout(timeout) class ClientSocket(object): - """! @brief Simple client-side TCP socket. + """@brief Simple client-side TCP socket. Provides a file-like interface to a TCP socket. Blocking and timeout are configurable. """ @@ -106,7 +106,7 @@ def set_blocking(self, blocking): self._socket.setblocking(blocking) def set_timeout(self, timeout): - """! @brief Change the socket to blocking with timeout mode.""" + """@brief Change the socket to blocking with timeout mode.""" self._socket.settimeout(timeout) def read(self, packet_size=None): diff --git a/pyocd/utility/strings.py b/pyocd/utility/strings.py index e37508bab..6e3df1e25 100644 --- a/pyocd/utility/strings.py +++ b/pyocd/utility/strings.py @@ -18,24 +18,24 @@ from typing import (Iterable, Sequence, Optional, Tuple) class UniquePrefixMatcher: - """! @brief Manages detection of shortest unique prefix match of a set of strings.""" + """@brief Manages detection of shortest unique prefix match of a set of strings.""" def __init__(self, items: Optional[Iterable[str]] = None): - """! @brief Constructor. + """@brief Constructor. @param self This object. @param items Optional sequence of strings. """ self._items = set(items) if (items is not None) else set() def add_items(self, items: Iterable[str]) -> None: - """! @brief Add some items to be matched. + """@brief Add some items to be matched. @param self This object. @param items Sequence of strings. """ self._items.update(items) def find_all(self, prefix: str) -> Tuple[str, ...]: - """! @brief Return all items matching the given prefix. + """@brief Return all items matching the given prefix. @param self This object. @param prefix String that is compared as a prefix against the items passed to the constructor. Must not be the empty string. @@ -50,7 +50,7 @@ def find_all(self, prefix: str) -> Tuple[str, ...]: return tuple(i for i in self._items if i.startswith(prefix)) def find_one(self, prefix: str) -> Optional[str]: - """! @brief Return the item matching the given prefix, or None. + """@brief Return the item matching the given prefix, or None. @param self This object. @param prefix String that is compared as a prefix against the items passed to the constructor. @return The full value of the matching item where `prefix` is a valid prefix. diff --git a/pyocd/utility/timeout.py b/pyocd/utility/timeout.py index 0fbdbcb68..d9fc7b5f6 100644 --- a/pyocd/utility/timeout.py +++ b/pyocd/utility/timeout.py @@ -22,7 +22,7 @@ from types import TracebackType class Timeout: - """! @brief Timeout helper context manager. + """@brief Timeout helper context manager. The recommended way to use this class is demonstrated here. It uses an else block on a while loop to handle the timeout. The code in the while loop must use a break statement @@ -63,7 +63,7 @@ class Timeout: """ def __init__(self, timeout: Optional[float], sleeptime: float = 0) -> None: - """! @brief Constructor. + """@brief Constructor. @param self @param timeout The timeout in seconds. May be None to indicate no timeout. @param sleeptime Time in seconds to sleep during calls to check(). Defaults to 0, thus @@ -84,7 +84,7 @@ def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> bo return False def start(self) -> None: - """! @brief Start or restart the timeout timer. + """@brief Start or restart the timeout timer. This has precisely the same effect as entering `self` when used as a context manager. @@ -97,7 +97,7 @@ def start(self) -> None: self._is_first_check = True def clear(self) -> None: - """! @brief Reset the timeout back to initial, non-running state. + """@brief Reset the timeout back to initial, non-running state. The timeout can be made to run again by calling start(). """ @@ -106,7 +106,7 @@ def clear(self) -> None: self._is_first_check = True def check(self, autosleep: bool = True) -> bool: - """! @brief Check for timeout and possibly sleep. + """@brief Check for timeout and possibly sleep. Starting with the second call to this method, it will automatically sleep before returning if: @@ -137,12 +137,12 @@ def check(self, autosleep: bool = True) -> bool: @property def is_running(self) -> bool: - """! @brief Whether the timeout object has started timing.""" + """@brief Whether the timeout object has started timing.""" return self._is_running @property def did_time_out(self) -> bool: - """! @brief Whether the timeout has occurred as of the time when this property is accessed.""" + """@brief Whether the timeout has occurred as of the time when this property is accessed.""" self.check(autosleep=False) return self._timed_out diff --git a/test/automated_test.py b/test/automated_test.py index 9864e7d7c..46881d16d 100755 --- a/test/automated_test.py +++ b/test/automated_test.py @@ -193,7 +193,7 @@ class BoardTestConfig: test_list: List[Test] def test_board(config: BoardTestConfig): - """! @brief Run all tests on a given board. + """@brief Run all tests on a given board. When multiple test jobs are being used, this function is the entry point executed in child processes. @@ -306,7 +306,7 @@ def test_board(config: BoardTestConfig): return result_list def filter_tests(args): - """! @brief Generate the list of tests to run based on arguments.""" + """@brief Generate the list of tests to run based on arguments.""" if args.exclude_tests and args.include_tests: print("Please only include or exclude tests, not both simultaneously.") sys.exit(1) diff --git a/test/test_user_script.py b/test/test_user_script.py index bcad53b9b..ab630b4bb 100644 --- a/test/test_user_script.py +++ b/test/test_user_script.py @@ -15,7 +15,7 @@ def testcmd2(*args): # Provides stub implementations of all hooks. def will_connect(board): - """! @brief Pre-init hook for the board. + """@brief Pre-init hook for the board. @param self @param board A Board instance that is about to be initialized. @return Ignored. @@ -23,7 +23,7 @@ def will_connect(board): pass def did_connect(board): - """! @brief Post-initialization hook for the board. + """@brief Post-initialization hook for the board. @param self @param board A Board instance. @return Ignored. @@ -31,7 +31,7 @@ def did_connect(board): pass def will_init_target(target, init_sequence): - """! @brief Hook to review and modify init call sequence prior to execution. + """@brief Hook to review and modify init call sequence prior to execution. @param self @param target A CoreSightTarget object about to be initialized. @param init_sequence The CallSequence that will be invoked. Because call sequences are @@ -41,7 +41,7 @@ def will_init_target(target, init_sequence): pass def did_init_target(target): - """! @brief Post-initialization hook. + """@brief Post-initialization hook. @param self @param target A CoreSightTarget. @return Ignored. @@ -49,7 +49,7 @@ def did_init_target(target): pass def will_start_debug_core(core): - """! @brief Hook to enable debug for the given core. + """@brief Hook to enable debug for the given core. @param self @param core A CortexM object about to be initialized. @retval True Do not perform the normal procedure to start core debug. @@ -58,7 +58,7 @@ def will_start_debug_core(core): pass def did_start_debug_core(core): - """! @brief Post-initialization hook. + """@brief Post-initialization hook. @param self @param core A CortexM object. @return Ignored. @@ -66,7 +66,7 @@ def did_start_debug_core(core): pass def will_stop_debug_core(core): - """! @brief Pre-cleanup hook for the core. + """@brief Pre-cleanup hook for the core. @param self @param core A CortexM object. @retval True Do not perform the normal procedure to disable core debug. @@ -75,7 +75,7 @@ def will_stop_debug_core(core): pass def did_stop_debug_core(core): - """! @brief Post-cleanup hook for the core. + """@brief Post-cleanup hook for the core. @param self @param core A CortexM object. @return Ignored. @@ -83,7 +83,7 @@ def did_stop_debug_core(core): pass def will_disconnect(target, resume): - """! @brief Pre-disconnect hook. + """@brief Pre-disconnect hook. @param self @param target Either a CoreSightTarget or CortexM object. @param resume The value of the `disconnect_on_resume` option. @@ -92,7 +92,7 @@ def will_disconnect(target, resume): pass def did_disconnect(target, resume): - """! @brief Post-disconnect hook. + """@brief Post-disconnect hook. @param self @param target Either a CoreSightTarget or CortexM object. @param resume The value of the `disconnect_on_resume` option. @@ -100,7 +100,7 @@ def did_disconnect(target, resume): pass def will_reset(core, reset_type): - """! @brief Pre-reset hook. + """@brief Pre-reset hook. @param self @param core A CortexM instance. @param reset_type One of the Target.ResetType enumerations. @@ -110,7 +110,7 @@ def will_reset(core, reset_type): pass def did_reset(core, reset_type): - """! @brief Post-reset hook. + """@brief Post-reset hook. @param self @param core A CortexM instance. @param reset_type One of the Target.ResetType enumerations. @@ -119,7 +119,7 @@ def did_reset(core, reset_type): pass def set_reset_catch(core, reset_type): - """! @brief Hook to prepare target for halting on reset. + """@brief Hook to prepare target for halting on reset. @param self @param core A CortexM instance. @param reset_type One of the Target.ResetType enumerations. @@ -129,7 +129,7 @@ def set_reset_catch(core, reset_type): pass def clear_reset_catch(core, reset_type): - """! @brief Hook to clean up target after a reset and halt. + """@brief Hook to clean up target after a reset and halt. @param self @param core A CortexM instance. @param reset_type @@ -138,7 +138,7 @@ def clear_reset_catch(core, reset_type): pass def mass_erase(target): - """! @brief Hook to override mass erase. + """@brief Hook to override mass erase. @param self @param target A CoreSightTarget object. @retval True Indicate that mass erase was performed by the hook. @@ -148,7 +148,7 @@ def mass_erase(target): pass def trace_start(target, mode): - """! @brief Hook to prepare for tracing the target. + """@brief Hook to prepare for tracing the target. @param self @param target A CoreSightTarget object. @param mode The trace mode. Currently always 0 to indicate SWO. @@ -157,7 +157,7 @@ def trace_start(target, mode): pass def trace_stop(target, mode): - """! @brief Hook to clean up after tracing the target. + """@brief Hook to clean up after tracing the target. @param self @param target A CoreSightTarget object. @param mode The trace mode. Currently always 0 to indicate SWO. diff --git a/test/test_util.py b/test/test_util.py index f83f92b31..c13a26088 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -225,7 +225,7 @@ def get_test_case(self): return case def filter_output(self, output): - """! @brief Hex-encode null byte and control characters.""" + """@brief Hex-encode null byte and control characters.""" result = six.text_type() for c in output: if (c not in ('\n', '\r', '\t')) and (0 <= ord(c) <= 31): diff --git a/test/unit/test_rom_table.py b/test/unit/test_rom_table.py index 4902f86c1..a583329e5 100644 --- a/test/unit/test_rom_table.py +++ b/test/unit/test_rom_table.py @@ -31,7 +31,7 @@ from pyocd.debug.context import DebugContext class MockCoreForMemCache(CoreSightCoreComponent): - """! @brief Just enough of a core to satisfy MemoryCache. + """@brief Just enough of a core to satisfy MemoryCache. Most importantly, it defines a memory map with a single RAM region covering almost the full 4 GB address space. @@ -48,13 +48,13 @@ def is_running(self): MockDebugContext = mock.Mock(spec=DebugContext) class RomMemory(MemoryCache, MemoryInterface): - """! @brief Memory interface for reading constant values. + """@brief Memory interface for reading constant values. Uses the memory cache as readily-available component to store data at fixed addresses. We just have to make sure the cache is never invalidated. """ def __init__(self, ranges): - """! @brief Constructor. + """@brief Constructor. @param self @param ranges Dict of start address -> list of word values. @@ -70,9 +70,9 @@ def _check_cache(self): pass class MockCoreSight(RomMemory): - """! @brief RomMemory based on a list of MockCoreSightComponent objects.""" + """@brief RomMemory based on a list of MockCoreSightComponent objects.""" def __init__(self, components): - """! @brief Constructor. + """@brief Constructor. @param self @param components List of component dicts, where each component dict consists of start @@ -86,14 +86,14 @@ def short_description(self): return "MockCoreSight" class MockCoreSightComponent(object): - """! @brief Generates a data dict from CoreSight component ID register values.""" + """@brief Generates a data dict from CoreSight component ID register values.""" # Start offset within the 4 kB CoreSight component memory window of the ID registers # we care about, particularly those read by CoreSightComponentID. CMPID_REGS_OFFSET = 0xfbc def __init__(self, base, cidr, pidr, **kwargs): - """! @brief Constructor. + """@brief Constructor. @param self @param base Base address of the component. @param cidr 32-bit combined CIDR register value. @@ -139,7 +139,7 @@ def data(self): return d class MockM4Components: - """! @ brief Namespace for mock Cortex-M4 Class 0x1 ROM table and core complex components.""" + """@ brief Namespace for mock Cortex-M4 Class 0x1 ROM table and core complex components.""" # ROM table #0 @ 0xe00ff000 (designer=244 part=00d) M4_ROM_TABLE_BASE = 0xe00ff000 @@ -181,7 +181,7 @@ class MockM4Components: ETM = MockCoreSightComponent(ETM_BASE, cidr=0xb105900d, pidr=0x4000bb925, devtype=0x13) class MockCSSOC600Components: - """! @ brief Namespace for mock Class 0x9 ROM table and CoreSight SoC-600 components.""" + """@ brief Namespace for mock Class 0x9 ROM table and CoreSight SoC-600 components.""" C9_ROM_TABLE_BASE = 0x00000000 C9_ROM_TABLE = MockCoreSightComponent(C9_ROM_TABLE_BASE, cidr=0xb105900d, pidr=0x4000bb7d5, diff --git a/test/unit/test_semihosting.py b/test/unit/test_semihosting.py index 93d0f41da..31b4d91d6 100644 --- a/test/unit/test_semihosting.py +++ b/test/unit/test_semihosting.py @@ -98,7 +98,7 @@ def run_til_halt(tgt, semihostagent): BKPT_AB = 0xbeab class RecordingSemihostIOHandler(semihost.SemihostIOHandler): - """! @brief Semihost IO handler that records output. + """@brief Semihost IO handler that records output. This handler is only meant to be used for console I/O since it doesn't implement open() or close(). @@ -142,7 +142,7 @@ def readc(self): return -1 class SemihostRequestBuilder: - """! @brief Utility to build code and set registers to perform a semihost request.""" + """@brief Utility to build code and set registers to perform a semihost request.""" def __init__(self, tgt, semihostagent, ramrgn): self.tgt = tgt self.ctx = tgt.get_target_context() @@ -324,7 +324,7 @@ def delete_it(): request.addfinalizer(delete_it) class TestSemihosting: - """! @brief Tests for semihost requests.""" + """@brief Tests for semihost requests.""" def test_open_stdio(self, semihost_builder): fd = semihost_builder.do_open(":tt", 'r') # stdin assert fd == 1 From e2adf0cf4ae68de01652e3285a0a0b7e2cbd8f6e Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Tue, 18 Jan 2022 19:01:00 +0000 Subject: [PATCH 119/123] command line: fix an issue with --color=always being equivalent to auto. (#1311) * main: refactor color log setup into utility function. - Add build_color_logger() utility to encapsulate color log setup. - Move colorama init to build_color_logger(). - Configure colorama to fully support --color=always option. * docs: update configuring_logging and env_vars. --- docs/configuring_logging.md | 8 +++---- docs/env_vars.md | 32 +++++++++++++++++---------- pyocd/__main__.py | 23 ++++++------------- pyocd/core/helpers.py | 5 +---- pyocd/utility/color_log.py | 44 ++++++++++++++++++++++++++++++++++++- 5 files changed, 74 insertions(+), 38 deletions(-) diff --git a/docs/configuring_logging.md b/docs/configuring_logging.md index 832fcef24..5ab164f63 100644 --- a/docs/configuring_logging.md +++ b/docs/configuring_logging.md @@ -50,16 +50,14 @@ level of WARNING to INFO. ## Color logging -By default, log output to the console is colorised. Control over colorised log output is possible two ways. +By default, log output to a tty is colorised. Control over colorised log output is possible two ways. The command-line `--color` argument accepts an optional parameter that must be one of `auto`, `always`, or `never`. The default is `auto`, which will enable color only when outputting to a tty. Another option for controlling color output is the `PYOCD_COLOR` environment variable. It should be set to one of the -same values supported by `--color`, or left empty. This environment variable changes the default color output setting, -and is overridden by `--color` on the command line. - -Currently, due to limitations in the colorisation support, `always` behaves the same as `auto`. +same values supported by `--color`. This environment variable changes the default color output setting, and is +overridden by `--color` on the command line. ## Loggers diff --git a/docs/env_vars.md b/docs/env_vars.md index c6dde0bf9..96717696e 100644 --- a/docs/env_vars.md +++ b/docs/env_vars.md @@ -6,6 +6,26 @@ title: Environment variables VariableDescription + +

PYOCD_COLOR

+ +

Changes the default color output setting. Must be one of auto, always, or +never. If not defined, the default is auto, which will enable color only when +outputting to a tty. Overridden by --color on the command line.

+ + + +

PYOCD_HISTORY

+ +

Path to the pyocd commander command history file. The default is ~/.pyocd_history.

+ + + +

PYOCD_HISTORY_LENGTH

+ +

Maximum number of entries in the command history file. Set to -1 for unlimited. Default is 1000.

+ +

PYOCD_PROJECT_DIR

@@ -24,17 +44,5 @@ unset. CMSIS-DAP v2 probes are unaffected by the environment variable; pyusb is by default.

- -

PYOCD_HISTORY

- -

Path to the pyocd commander command history file. The default is ~/.pyocd_history.

- - - -

PYOCD_HISTORY_LENGTH

- -

Maximum number of entries in the command history file. Set to -1 for unlimited. Default is 1000.

- - diff --git a/pyocd/__main__.py b/pyocd/__main__.py index 9857d4964..a6e5a49a3 100644 --- a/pyocd/__main__.py +++ b/pyocd/__main__.py @@ -3,7 +3,7 @@ # pyOCD debugger # Copyright (c) 2018-2020 Arm Limited # Copyright (c) 2020 Cypress Semiconductor Corporation -# Copyright (c) 2021 Chris Reed +# Copyright (c) 2021-2022 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,7 +31,7 @@ from .core import exceptions from .probe.pydapaccess import DAPAccess from .core import options -from .utility.color_log import ColorFormatter +from .utility.color_log import build_color_logger from .subcommands.base import SubcommandBase from .subcommands.commander_cmd import CommanderSubcommand from .subcommands.erase_cmd import EraseSubcommand @@ -105,24 +105,15 @@ def _setup_logging(self) -> None: Log level for specific loggers are also configured here. """ - is_tty = sys.stderr.isatty() - color_setting = ((hasattr(self._args, 'color') and self._args.color) or os.environ.get('PYOCD_COLOR', 'auto')) - use_color = (color_setting == "always") or (color_setting == "auto" and is_tty) + # Get the color setting to use, defaulting to 'auto'. + color_setting = ((hasattr(self._args, 'color') and self._args.color) \ + or os.environ.get('PYOCD_COLOR', 'auto')) # Compute global log level. level = max(1, self._args.command_class.DEFAULT_LOG_LEVEL + self._get_log_level_delta()) - # Create handler to output logging to stderr. - console = logging.StreamHandler() - - # Create the color formatter and attach to our stream handler. - color_formatter = ColorFormatter(ColorFormatter.FORMAT, use_color, is_tty) - console.setFormatter(color_formatter) - - # Set stream handler and log level on root logger. - root_logger = logging.getLogger() - root_logger.addHandler(console) - root_logger.setLevel(level) + # Build the logger to output to stderr (the default). + build_color_logger(level=level, color_setting=color_setting) # Handle settings for individual loggers from --log-level arguments. for logger_setting in self._args.log_level: diff --git a/pyocd/core/helpers.py b/pyocd/core/helpers.py index 81b304dbc..63d21881f 100644 --- a/pyocd/core/helpers.py +++ b/pyocd/core/helpers.py @@ -1,6 +1,6 @@ # pyOCD debugger # Copyright (c) 2018-2019 Arm Limited -# Copyright (c) 2021 Chris Reed +# Copyright (c) 2021-2022 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,9 +29,6 @@ from .session import Session from ..probe.aggregator import DebugProbeAggregator -# Init colorama here since this is currently the only module that uses it. -colorama.init() - class ConnectHelper: """@brief Helper class for streamlining the probe discovery and session creation process. diff --git a/pyocd/utility/color_log.py b/pyocd/utility/color_log.py index 4a5b45514..6f0813f41 100644 --- a/pyocd/utility/color_log.py +++ b/pyocd/utility/color_log.py @@ -1,6 +1,6 @@ # pyOCD debugger # Copyright (c) 2018-2020 Arm Limited -# Copyright (c) 2021 Chris Reed +# Copyright (c) 2021-2022 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import colorama from colorama import (Fore, Style) import logging from shutil import get_terminal_size +import sys +from typing import (IO, Optional) class ColorFormatter(logging.Formatter): """@brief Log formatter that applies colours based on the record's log level.""" @@ -96,3 +99,42 @@ def format(self, record) -> str: log_msg += "\n" + Style.DIM + self.formatStack(stack_info) + Style.RESET_ALL return log_msg + + +def build_color_logger( + level: int = logging.INFO, + color_setting: str = 'auto', + stream: Optional[IO[str]] = None, + is_tty: Optional[bool] = None, + ) -> logging.Logger: + """@brief Sets up color logging for the root logger. + + @param level Log level of the root logger. + @param color_setting One of 'auto', 'always', or 'never'. The default 'auto' enables color if `is_tty` is True. + @param stream The stream to which the log will be output. The default is stderr. + @param is_tty Whether the output stream is a tty. Affects the 'auto' color_setting. If not provided, the `isatty()` + method of `stream` is called if it exists. If `stream` doesn't have `isatty()` then the default is False. + """ + if stream is None: + stream = sys.stderr + if is_tty is None: + is_tty = stream.isatty() if hasattr(stream, 'isatty') else False + use_color = (color_setting == "always") or (color_setting == "auto" and is_tty) + + # Init colorama with appropriate color setting. + colorama.init(strip=(not use_color)) + + # Create handler to output logging to stderr. + console = logging.StreamHandler(stream) + + # Create the color formatter and attach to our stream handler. + color_formatter = ColorFormatter(ColorFormatter.FORMAT, use_color, is_tty) + console.setFormatter(color_formatter) + + # Set stream handler and log level on root logger. + root_logger = logging.getLogger() + root_logger.addHandler(console) + root_logger.setLevel(level) + + return root_logger + From a6d018a4afc832f220adfabd4e548ab89fb1db2d Mon Sep 17 00:00:00 2001 From: j4cbo Date: Tue, 18 Jan 2022 11:47:27 -0800 Subject: [PATCH 120/123] coresight: don't read CoreSight registers for non-CoreSight components (#1213) The CoreSight spec (IHI0029D B2.3) says "All component classes require the implementation of the Component and Peripheral Identification registers", but non-CoreSight components may sometimes fault on reading registers outside this space. In particular, a Cortex-M7 SCS, while the core is in reset, will fault on an attempt to read 0xe000efbc, which relates to the data cache and not component identification. To fix, we should avoid reading registers outside PIDR and CIDR unless the component has the CoreSight component class. --- pyocd/coresight/rom_table.py | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/pyocd/coresight/rom_table.py b/pyocd/coresight/rom_table.py index f34128ad6..2ef91c030 100644 --- a/pyocd/coresight/rom_table.py +++ b/pyocd/coresight/rom_table.py @@ -1,6 +1,7 @@ # pyOCD debugger # Copyright (c) 2015-2020 Arm Limited # Copyright (c) 2021 Chris Reed +# Copyright (c) 2021 Jacob Berg Potter # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,10 +37,7 @@ class CoreSightComponentID(object): registers are made available as attributes. """ - # CoreSight identification register offsets. - DEVARCH = 0xfbc - DEVID = 0xfc8 - DEVTYPE = 0xfcc + # Component identification register offsets. PIDR4 = 0xfd0 PIDR0 = 0xfe0 CIDR0 = 0xff0 @@ -47,16 +45,27 @@ class CoreSightComponentID(object): # Range of identification registers to read at once and offsets in results. # - # To improve component identification performance, we read all of a components - # CoreSight ID registers in a single read. Reading starts at the DEVARCH register. - IDR_READ_START = DEVARCH + # To improve component identification performance, we read all of a component's + # ID registers in a single read. + IDR_READ_START = PIDR4 IDR_READ_COUNT = (IDR_END - IDR_READ_START) // 4 - DEVARCH_OFFSET = (DEVARCH - IDR_READ_START) // 4 - DEVTYPE_OFFSET = (DEVTYPE - IDR_READ_START) // 4 PIDR4_OFFSET = (PIDR4 - IDR_READ_START) // 4 PIDR0_OFFSET = (PIDR0 - IDR_READ_START) // 4 CIDR0_OFFSET = (CIDR0 - IDR_READ_START) // 4 + # CoreSight identification register offsets. + DEVARCH = 0xfbc + DEVTYPE = 0xfcc + CORESIGHT_IDR_END = 0xfd0 + + # Range of CoreSight-specific registers to read. Non-CoreSight components may not + # implement these registers, and may even error on attempting to read them, so we + # only read them if the component's class is CORESIGHT_CLASS. + CORESIGHT_IDR_READ_START = DEVARCH + CORESIGHT_IDR_READ_COUNT = (CORESIGHT_IDR_END - CORESIGHT_IDR_READ_START) // 4 + DEVARCH_OFFSET = (DEVARCH - CORESIGHT_IDR_READ_START) // 4 + DEVTYPE_OFFSET = (DEVTYPE - CORESIGHT_IDR_READ_START) // 4 + # Component ID register fields. CIDR_PREAMBLE_MASK = 0xffff0fff CIDR_PREAMBLE_VALUE = 0xb105000d @@ -134,11 +143,14 @@ def read_id_registers(self): # Class 0x1 ROM table. self.is_rom_table = True elif self.component_class == self.CORESIGHT_CLASS: + coresight_regs = self.ap.read_memory_block32( + self.top_address + self.CORESIGHT_IDR_READ_START, self.CORESIGHT_IDR_READ_COUNT) + # For CoreSight-class components, extract additional fields. - self.devarch = regs[self.DEVARCH_OFFSET] - self.devid = regs[1:4] + self.devarch = coresight_regs[self.DEVARCH_OFFSET] + self.devid = coresight_regs[1:4] self.devid.reverse() - self.devtype = regs[self.DEVTYPE_OFFSET] + self.devtype = coresight_regs[self.DEVTYPE_OFFSET] if self.devarch & self.DEVARCH_PRESENT_MASK: self.archid = self.devarch & self.DEVARCH_ARCHID_MASK From 91ba2dfca5bfeb3c4177041dcbff61796312110d Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Fri, 21 Jan 2022 18:40:33 +0000 Subject: [PATCH 121/123] gdbserver: packet_io: fix timeout exception class; some cleanup. (#1316) - Catch socket.timeout instead of TimeoutError. The former was only made an alias of the latter in Python 3.10. - Add RECEIVE_TIMEOUT constant. - Change socket timeout to 100 ms (RECEIVE_TIMEOUT). - Cleanup super() call. --- pyocd/gdbserver/packet_io.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/pyocd/gdbserver/packet_io.py b/pyocd/gdbserver/packet_io.py index 6d7d99e24..ed60b2cb5 100644 --- a/pyocd/gdbserver/packet_io.py +++ b/pyocd/gdbserver/packet_io.py @@ -18,6 +18,7 @@ import logging import threading import queue +import socket CTRL_C = b'\x03' @@ -45,8 +46,11 @@ class GDBServerPacketIOThread(threading.Thread): method writes outgoing packets to the socket immediately. """ + ## 100 ms timeout for socket and receive queue reads. + RECEIVE_TIMEOUT = 0.1 + def __init__(self, abstract_socket): - super(GDBServerPacketIOThread, self).__init__() + super().__init__() self.name = "gdb-packet-thread-port%d" % abstract_socket.port self._abstract_socket = abstract_socket self._receive_queue = queue.Queue() @@ -89,7 +93,7 @@ def receive(self, block=True): # If block is false, we'll get an Empty exception immediately if there # are no packets in the queue. Same if block is true and it times out # waiting on an empty queue. - return self._receive_queue.get(block, 0.1) + return self._receive_queue.get(block, self.RECEIVE_TIMEOUT) except queue.Empty: # Only exit the loop if block is false or connection closed. if not block: @@ -100,7 +104,7 @@ def receive(self, block=True): def run(self): LOG.debug("Starting GDB server packet I/O thread") - self._abstract_socket.set_timeout(0.01) + self._abstract_socket.set_timeout(self.RECEIVE_TIMEOUT) while not self._shutdown_event.is_set(): try: @@ -119,6 +123,9 @@ def run(self): LOG.warning("GDB packet thread: connection unexpectedly closed during receive (%s)", err) self._closed = True break + except socket.timeout: + # Ignore timeouts. + pass except OSError as err: LOG.debug("Error in packet IO thread: %s", err) From f34e2de8c72201ae2f0861a7e0a5a2f21481b4bf Mon Sep 17 00:00:00 2001 From: David Runge Date: Sat, 22 Jan 2022 23:39:13 +0100 Subject: [PATCH 122/123] Switch from naturalsort to natsort (#1318) pyocd/commands/commands.py: Use natsort's `natsorted()` instead of naturalsort's `natsort()`. setup.cfg: Replace naturalsort with natsort. Fixes #1317 --- pyocd/commands/commands.py | 5 +++-- setup.cfg | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pyocd/commands/commands.py b/pyocd/commands/commands.py index db4b23cf7..ef50720b3 100755 --- a/pyocd/commands/commands.py +++ b/pyocd/commands/commands.py @@ -1,6 +1,7 @@ # pyOCD debugger # Copyright (c) 2015-2020 Arm Limited # Copyright (c) 2021 Chris Reed +# Copyright (c) 2022 David Runge # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +18,7 @@ import logging import os -from natsort import natsort +from natsort import natsorted import textwrap from time import sleep from shutil import get_terminal_size @@ -116,7 +117,7 @@ def execute(self): class RegisterCommandBase(CommandBase): def dump_register_group(self, group_name): - regs = natsort(self.context.selected_core.core_registers.iter_matching( + regs = natsorted(self.context.selected_core.core_registers.iter_matching( lambda r: r.group == group_name), key=lambda r: r.name) reg_values = self.context.selected_core.read_core_registers_raw(r.name for r in regs) diff --git a/setup.cfg b/setup.cfg index e927e448d..b1c6afee6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -55,7 +55,7 @@ install_requires = intelhex>=2.0,<3.0 intervaltree>=3.0.2,<4.0 libusb-package>=1.0,<2.0 - naturalsort>=1.5,<2.0 + natsort>=8.0.0,<9.0 prettytable>=2.0,<3.0 pyelftools<1.0 pylink-square>=0.11.1,<1.0 From bade0ae0b32b26f08202f79100c053d4ec67a73a Mon Sep 17 00:00:00 2001 From: Chris Reed Date: Sun, 23 Jan 2022 18:48:55 +0000 Subject: [PATCH 123/123] coresight: ap: set CSW.DBGSWEN for CSSoC-400 APB-AP. (#1321) If this bit in CSW is not set on this particular APB-AP, software running on the device will not be able to access the memory space downstream of the AP. --- pyocd/coresight/ap.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pyocd/coresight/ap.py b/pyocd/coresight/ap.py index 822756c65..a9f49c0fc 100644 --- a/pyocd/coresight/ap.py +++ b/pyocd/coresight/ap.py @@ -130,6 +130,7 @@ CSW_MSTRTYPE = 0x20000000 # Only present in M3/M3 AHB-AP, RES0 in others CSW_MSTRCORE = 0x00000000 CSW_MSTRDBG = 0x20000000 +CSW_DBGSWEN = 0x80000000 # Only present in CSSoC-400 APB-AP, RES0 in others DEFAULT_CSW_VALUE = CSW_SADDRINC @@ -532,6 +533,12 @@ def __init__( ## Base CSW value to use. self._csw: int = DEFAULT_CSW_VALUE + # Certain MEM-APs support a DBGSWEN control in the AP's CSW register. When set to zero, software running + # on the device is prevented from accessing the memory space downstream from the MEM-AP. This feature is + # deprecated in ADIv6, and Arm recommends to never clear the bit when implemented. + if self._flags & AP_DBGSWEN: + self._csw |= CSW_DBGSWEN + ## Cached current CSW value. self._cached_csw: int = -1 @@ -1236,10 +1243,11 @@ def find_components(self) -> None: AP_4K_WRAP = 0x1 # The AP has a 4 kB auto-increment modulus. AP_ALL_TX_SZ = 0x2 # The AP is known to support 8-, 16-, and 32-bit transfers, *unless* Large Data is implemented. AP_MSTRTYPE = 0x4 # The AP is known to support the MSTRTYPE field. +AP_DBGSWEN = 0x8 # The AP is known to support the DBGSWEN flag. ## Map from AP IDR fields to AccessPort subclass. # -# The dict maps from a 4-tuple of (JEP106 code, AP class, variant, type) to 2-tuple (name, class, flags). +# The dict maps from a 4-tuple of (JEP106 code, AP class, variant, type) to 3-tuple (name, class, flags). # # Known AP IDRs: # 0x24770011 AHB-AP with 0x1000 wrap and MSTRTYPE @@ -1255,7 +1263,8 @@ def find_components(self) -> None: # 0x84770001 AHB-AP Used on K32W042 # 0x14770005 AHB5-AP Used on M33. Note that M33 r0p0 incorrect fails to report this IDR. # 0x04770025 AHB5-AP Used on M23. -# 0x54770002 APB-AP used on M33. +# 0x54770002 APB-AP used on STM32H743, from CSSoC-400 +# 0x34770017 AXI5-AP from Corstone-700 AP_TYPE_MAP: Dict[Tuple[int, int, int, int], Tuple[str, Type[AccessPort], int]] = { # |JEP106 |Class |Var|Type |Name |Class (AP_JEP106_ARM, AP_CLASS_JTAG_AP, 0, 0): ("JTAG-AP", AccessPort, 0 ), @@ -1265,12 +1274,13 @@ def find_components(self) -> None: (AP_JEP106_ARM, AP_CLASS_MEM_AP, 2, AP_TYPE_AHB): ("AHB-AP", AHB_AP, AP_ALL_TX_SZ ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 3, AP_TYPE_AHB): ("AHB-AP", AHB_AP, AP_ALL_TX_SZ ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 4, AP_TYPE_AHB): ("AHB-AP", AHB_AP, AP_ALL_TX_SZ ), - (AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_APB): ("APB-AP", MEM_AP, 0 ), + (AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_APB): ("APB-AP", MEM_AP, AP_DBGSWEN ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AXI): ("AXI-AP", MEM_AP, AP_ALL_TX_SZ ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AHB5): ("AHB5-AP", AHB_AP, AP_ALL_TX_SZ ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 1, AP_TYPE_AHB5): ("AHB5-AP", AHB_AP, AP_ALL_TX_SZ ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 2, AP_TYPE_AHB5): ("AHB5-AP", AHB_AP, AP_ALL_TX_SZ ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_APB4): ("APB4-AP", MEM_AP, 0 ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AXI5): ("AXI5-AP", MEM_AP, AP_ALL_TX_SZ ), + (AP_JEP106_ARM, AP_CLASS_MEM_AP, 1, AP_TYPE_AXI5): ("AXI5-AP", MEM_AP, AP_ALL_TX_SZ ), (AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AHB5_HPROT): ("AHB5-AP", MEM_AP, AP_ALL_TX_SZ ), }