#include "sd_mmc_spi.h" #include "atmel_start_pins.h" #include "hal_io.h" #include "hal_spi_m_sync.h" #include "sd_mmc_protocol.h" static inline void sd_mmc_device_enable() { PORT->Group[1].OUT.reg &= ~(1 << SPI_CS_PIN); } static inline void sd_mmc_device_disable() { PORT->Group[1].OUT.reg |= (1 << SPI_CS_PIN); } static sd_mmc_spi_errno_t sd_mmc_spi_err; // 32 bits response of the last command static uint32_t sd_mmc_spi_response_32; // Current position (byte) of the transfer started by spi_m_sync_adtc_start() static uint32_t sd_mmc_spi_transfert_pos; // Size block requested by last spi_m_sync_adtc_start() static uint16_t sd_mmc_spi_block_size; // Total number of block requested by last spi_m_sync_adtc_start() static uint16_t sd_mmc_spi_nb_block; static uint8_t spi_m_sync_crc7(uint8_t * buf, uint8_t size) { uint8_t crc, value, i; crc = 0; while (size--) { value = *buf++; for (i = 0; i < 8; i++) { crc <<= 1; if ((value & 0x80) ^ (crc & 0x80)) { crc ^= 0x09; } value <<= 1; } } crc = (crc << 1) | 1; return crc; } static bool spi_m_sync_wait_busy(struct spi_m_sync_descriptor* spi) { uint8_t line = 0xFF; uint8_t dummy = 0xFF; /* Delay before check busy * Nbr timing minimum = 8 cylces */ struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); io_write(spi_inst, &dummy, 1); io_read(spi_inst, &line, 1); /* Wait end of busy signal * Nec timing: 0 to unlimited * However a timeout is used. * 200 000 * 8 cycles */ uint32_t nec_timeout = 200000; io_write(spi_inst, &dummy, 1); io_read(spi_inst, &line, 1); do { io_write(spi_inst, &dummy, 1); io_read(spi_inst, &line, 1); if (!(nec_timeout--)) { return false; } } while (line != 0xFF); return true; } static void spi_m_sync_start_write_block(struct spi_m_sync_descriptor* spi) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint8_t dummy = 0xFF; assert(!(sd_mmc_spi_transfert_pos %sd_mmc_spi_block_size), ">>>"); // Delay before start writing block: // Nwr timing minimum = 8 cycles io_write(spi_inst, &dummy, 1); // Send start token uint8_t token; if (1 == sd_mmc_spi_nb_block) { token = SPI_TOKEN_SINGLE_WRITE; } else { token = SPI_TOKEN_MULTI_WRITE; } io_write(spi_inst, &token, 1); } static bool spi_m_sync_stop_write_block(struct spi_m_sync_descriptor* spi) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint8_t resp; uint16_t crc; uint8_t dummy = 0xFF; // Send CRC crc = 0xFFFF; /// CRC is disabled in SPI mode io_write(spi_inst, (uint8_t*)&crc, 2); // Receiv data response token io_write(spi_inst, &dummy, 1); io_read(spi_inst, &resp, 1); if (!SPI_TOKEN_DATA_RESP_VALID(resp)) { sd_mmc_spi_err = SD_MMC_SPI_ERR; sd_mmc_spi_debug("%s: Invalid Data Response Token 0x%x\n\r", __func__, resp); return false; } // Check data response switch (SPI_TOKEN_DATA_RESP_CODE(resp)) { case SPI_TOKEN_DATA_RESP_ACCEPTED: break; case SPI_TOKEN_DATA_RESP_CRC_ERR: sd_mmc_spi_err = SD_MMC_SPI_ERR_WRITE_CRC; sd_mmc_spi_debug("%s: Write blocks, SD_MMC_SPI_ERR_CRC, resp 0x%x\n\r", __func__, resp); return false; case SPI_TOKEN_DATA_RESP_WRITE_ERR: default: sd_mmc_spi_err = SD_MMC_SPI_ERR_WRITE; sd_mmc_spi_debug("%s: Write blocks SD_MMC_SPI_ERR_WR, resp 0x%x\n\r", __func__, resp); return false; } return true; } static bool spi_m_sync_stop_multiwrite_block(struct spi_m_sync_descriptor* spi) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint8_t value; if (1 == sd_mmc_spi_nb_block) { return true; // Single block write } if (sd_mmc_spi_nb_block > (sd_mmc_spi_transfert_pos / sd_mmc_spi_block_size)) { return true; // It is not the End of multi write } // Delay before start write block: // Nwr timing minimum = 8 cylces value = 0xFF; io_write(spi_inst, &value, 1); // Send stop token value = SPI_TOKEN_STOP_TRAN; io_write(spi_inst, &value, 1); // Wait busy if (!spi_m_sync_wait_busy(spi)) { sd_mmc_spi_err = SD_MMC_SPI_ERR_WRITE_TIMEOUT; sd_mmc_spi_debug("%s: Stop write blocks timeout\n\r", __func__); return false; } return true; } static bool spi_m_sync_start_read_block(struct spi_m_sync_descriptor* spi) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint32_t i; uint8_t token; uint8_t dummy = 0xFF; assert(!(sd_mmc_spi_transfert_pos % sd_mmc_spi_block_size), ">>>"); /* Wait for start data token: * The read timeout is the Nac timing. * Nac must be computed trough CSD values, * or it is 100ms for SDHC / SDXC * Compute the maximum timeout: * Frequency maximum = 25MHz * 1 byte = 8 cycles * 100ms = 312500 x spi_read_buffer_wait() maximum */ token = 0; i = 500000; do { if (i-- == 0) { sd_mmc_spi_err = SD_MMC_SPI_ERR_READ_TIMEOUT; sd_mmc_spi_debug("%s: Read blocks timeout\n\r", __func__); return false; } io_write(spi_inst, &dummy, 1); io_read(spi_inst, &token, 1); if (SPI_TOKEN_DATA_ERROR_VALID(token)) { assert(SPI_TOKEN_DATA_ERROR_ERRORS & token, ">>>"); if (token & (SPI_TOKEN_DATA_ERROR_ERROR | SPI_TOKEN_DATA_ERROR_ECC_ERROR | SPI_TOKEN_DATA_ERROR_CC_ERROR)) { sd_mmc_spi_debug("%s: CRC data error token\n\r", __func__); sd_mmc_spi_err = SD_MMC_SPI_ERR_READ_CRC; } else { sd_mmc_spi_debug("%s: Out of range data error token\n\r", __func__); sd_mmc_spi_err = SD_MMC_SPI_ERR_OUT_OF_RANGE; } return false; } } while (token != SPI_TOKEN_SINGLE_MULTI_READ); return true; } static void spi_m_sync_stop_read_block(struct spi_m_sync_descriptor* spi) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint8_t crc[2]; uint8_t dummy = 0xFF; // Read 16-bit CRC (not cheked) io_write(spi_inst, &dummy, 1); io_read(spi_inst, crc, 2); } bool spi_m_sync_send_cmd(struct spi_m_sync_descriptor* spi, uint32_t cmd, uint32_t arg) { return spi_m_sync_adtc_start(spi, cmd, arg, 0, 0, false); } bool spi_m_sync_adtc_start(struct spi_m_sync_descriptor* spi, uint32_t cmd, uint32_t arg, uint16_t block_size, uint16_t nb_block, bool access_block) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint8_t dummy = 0xFF; uint8_t cmd_token[6]; uint8_t ncr_timeout; uint8_t r1; uint8_t dummy2 = 0xFF; (void)access_block; assert(cmd & SDMMC_RESP_PRESENT, "No SD Card response was present..."); sd_mmc_spi_err = SD_MMC_SPI_NO_ERR; cmd_token[0] = SPI_CMD_ENCODE(SDMMC_CMD_GET_INDEX(cmd)); cmd_token[1] = arg >> 24; cmd_token[2] = arg >> 16; cmd_token[3] = arg >> 8; cmd_token[4] = arg; cmd_token[5] = spi_m_sync_crc7(cmd_token, 5); // 8 cycles to respect Ncs timing io_write(spi_inst, &dummy, 1); // send command io_write(spi_inst, cmd_token, sizeof(cmd_token)); // Wait for response // Two retries will be done to manage the Ncr timing between command and response // Ncr: Min. 1x8 clock cycle, Max 8x8 clock cycles // WORKAROUND for no compliance (Atmel Internal ref. SD13) r1 = 0xFF; // Ignore first byte because Ncr min. = 8 clock cycles io_read(spi_inst, &r1, 1); ncr_timeout = 7; while(1) { io_read(spi_inst, &r1, 1); if((r1 & R1_SPI_ERROR) == 0) { // Valid response break; } if(--ncr_timeout == 0) { // Here valid r1 response received sd_mmc_spi_debug("%s: cmd %02d, arg 0x%08lX, R1 timeout\r\n", __func__, (int)SDMMC_CMD_GET_INDEX(cmd), arg); sd_mmc_spi_err = SD_MMC_SPI_ERR_RESP_TIMEOUT; return false; } } // Save R1 (specific to spi interface) in 32 bit response // The R1_SPI_IDLE bit can be checked by high level sd_mmc_spi_response_32 = r1; // Manage error in R1 if (r1 & R1_SPI_COM_CRC) { sd_mmc_spi_debug("%s: cmd %02d, arg 0x%08lx, r1 0x%02x, R1_SPI_COM_CRC\n\r", __func__, (int)SDMMC_CMD_GET_INDEX(cmd), arg, r1); sd_mmc_spi_err = SD_MMC_SPI_ERR_RESP_CRC; return false; } if (r1 & R1_SPI_ILLEGAL_COMMAND) { sd_mmc_spi_debug("%s: cmd %02d, arg 0x%08lx, r1 0x%x, R1 ILLEGAL_COMMAND\n\r", __func__, (int)SDMMC_CMD_GET_INDEX(cmd), arg, r1); sd_mmc_spi_err = SD_MMC_SPI_ERR_ILLEGAL_COMMAND; return false; } if (r1 & ~R1_SPI_IDLE) { // Other error sd_mmc_spi_debug("%s: cmd %02d, arg 0x%08lx, r1 0x%x, R1 error\n\r", __func__, (int)SDMMC_CMD_GET_INDEX(cmd), arg, r1); sd_mmc_spi_err = SD_MMC_SPI_ERR; return false; } // Manage other responses if (cmd & SDMMC_RESP_BUSY) { if (!spi_m_sync_wait_busy(spi)) { sd_mmc_spi_err = SD_MMC_SPI_ERR_RESP_BUSY_TIMEOUT; sd_mmc_spi_debug("%s: cmd %02d, arg 0x%08lx, Busy signal always high\n\r", __func__, (int)SDMMC_CMD_GET_INDEX(cmd), arg); return false; } } if (cmd & SDMMC_RESP_8) { sd_mmc_spi_response_32 = 0; io_write(spi_inst, &dummy2, 1); io_read(spi_inst, (uint8_t*)&sd_mmc_spi_response_32, 1); sd_mmc_spi_response_32 = LE32(sd_mmc_spi_response_32); } if (cmd & SDMMC_RESP_32) { io_write(spi_inst, &dummy2, 1); io_read(spi_inst, (uint8_t*)&sd_mmc_spi_response_32, 4); sd_mmc_spi_response_32 = BE32(sd_mmc_spi_response_32); } sd_mmc_spi_block_size = block_size; sd_mmc_spi_nb_block = nb_block; sd_mmc_spi_transfert_pos = 0; return true; } bool spi_m_sync_start_read_blocks(struct spi_m_sync_descriptor* spi, void *dst, uint16_t nb_block) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint32_t pos; uint8_t dummy = 0xFF; sd_mmc_spi_err = SD_MMC_SPI_NO_ERR; pos = 0; while (nb_block--) { assert(sd_mmc_spi_nb_block > (sd_mmc_spi_transfert_pos / sd_mmc_spi_block_size), ">>>"); if (!spi_m_sync_start_read_block(spi)) { return false; } // Read block io_write(spi_inst, &dummy, 1); io_read(spi_inst, &((uint8_t*)dst)[pos], sd_mmc_spi_block_size); pos += sd_mmc_spi_block_size; sd_mmc_spi_transfert_pos += sd_mmc_spi_block_size; spi_m_sync_stop_read_block(spi); } return true; } bool spi_m_sync_start_write_blocks(struct spi_m_sync_descriptor* spi, const void *src, uint16_t nb_block) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint32_t pos; sd_mmc_spi_err = SD_MMC_SPI_NO_ERR; pos = 0; while (nb_block--) { assert(sd_mmc_spi_nb_block > (sd_mmc_spi_transfert_pos / sd_mmc_spi_block_size), ">>>"); spi_m_sync_start_write_block(spi); // Write block io_write(spi_inst, &((uint8_t*)src)[pos], sd_mmc_spi_block_size); pos += sd_mmc_spi_block_size; sd_mmc_spi_transfert_pos += sd_mmc_spi_block_size; if (!spi_m_sync_stop_write_block(spi)) { return false; } // Do not check busy of last block // but delay it to mci_wait_end_of_write_blocks() if (nb_block) { // Wait busy due to data programmation if (!spi_m_sync_wait_busy(spi)) { sd_mmc_spi_err = SD_MMC_SPI_ERR_WRITE_TIMEOUT; sd_mmc_spi_debug("%s: Write blocks timeout\n\r", __func__); return false; } } } return true; } bool spi_m_sync_wait_end_of_write_blocks(struct spi_m_sync_descriptor* spi) { // Wait busy due to data programmation of last block writed if (!spi_m_sync_wait_busy(spi)) { sd_mmc_spi_err = SD_MMC_SPI_ERR_WRITE_TIMEOUT; sd_mmc_spi_debug("%s: Write blocks timeout\n\r", __func__); return false; } return spi_m_sync_stop_multiwrite_block(spi); } bool spi_m_sync_wait_end_of_read_blocks(struct spi_m_sync_descriptor* spi) { return true; } bool spi_m_sync_write_word(struct spi_m_sync_descriptor* spi, uint32_t value) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); sd_mmc_spi_err = SD_MMC_SPI_NO_ERR; assert(sd_mmc_spi_nb_block > (sd_mmc_spi_transfert_pos / sd_mmc_spi_block_size), "<<<"); if(!(sd_mmc_spi_transfert_pos % sd_mmc_spi_block_size)) { spi_m_sync_start_write_block(spi); } // Write data value = LE32(value); io_write(spi_inst, (uint8_t*)&value, 4); sd_mmc_spi_transfert_pos += 4; if (!(sd_mmc_spi_transfert_pos % sd_mmc_spi_block_size)) { // End of block if (!spi_m_sync_stop_write_block(spi)) { return false; } // Wait busy due to data programmation if (!spi_m_sync_wait_busy(spi)) { sd_mmc_spi_err = SD_MMC_SPI_ERR_WRITE_TIMEOUT; sd_mmc_spi_debug("%s: Write blocks timeout\n\r", __func__); return false; } } return spi_m_sync_stop_multiwrite_block(spi); } bool spi_m_sync_read_word(struct spi_m_sync_descriptor* spi, uint32_t* value) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint8_t dummy = 0xFF; sd_mmc_spi_err = SD_MMC_SPI_NO_ERR; assert(sd_mmc_spi_nb_block > (sd_mmc_spi_transfert_pos / sd_mmc_spi_block_size), ">>>"); if (!(sd_mmc_spi_transfert_pos % sd_mmc_spi_block_size)) { // New block if (!spi_m_sync_start_read_block(spi)) { return false; } } // Read data io_write(spi_inst, &dummy, 1); io_read(spi_inst, (uint8_t*)&value, 4); *value = LE32(*value); sd_mmc_spi_transfert_pos += 4; if (!(sd_mmc_spi_transfert_pos % sd_mmc_spi_block_size)) { // End of block spi_m_sync_stop_read_block(spi); } return true;} uint32_t spi_m_sync_get_response(struct spi_m_sync_descriptor* spi) { return sd_mmc_spi_response_32; } void spi_m_sync_send_clock(struct spi_m_sync_descriptor* spi) { struct io_descriptor* spi_inst = NULL; spi_m_sync_get_io_descriptor(spi, &spi_inst); uint8_t i; uint8_t dummy = 0xFF; sd_mmc_spi_err = SD_MMC_SPI_NO_ERR; // Send 80 cycles for(i = 0; i < 10; i++) { io_write(spi_inst, &dummy, 1); // 8 cycles } } int32_t spi_m_sync_select_device(struct spi_m_sync_descriptor* spi, uint8_t slot, uint32_t clock, uint8_t bus_width, bool high_speed) { UNUSED(bus_width); UNUSED(high_speed); sd_mmc_spi_err = SD_MMC_SPI_NO_ERR; PORT->Group[SPI_CS_PORT].OUT.reg &= ~(1 << SPI_CS_PIN); return 0; } int32_t spi_m_sync_deselect_device(struct spi_m_sync_descriptor* spi, uint8_t slot) { sd_mmc_spi_err = SD_MMC_SPI_NO_ERR; PORT->Group[1].OUT.reg |= (1 << SPI_CS_PIN); return 0; }