esp_async_memcpy.c
1 // Copyright 2020 Espressif Systems (Shanghai) PTE LTD 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 #include "freertos/FreeRTOS.h" 15 #include "freertos/semphr.h" 16 #include "hal/dma_types.h" 17 #include "esp_compiler.h" 18 #include "esp_heap_caps.h" 19 #include "esp_log.h" 20 #include "esp_async_memcpy.h" 21 #include "esp_async_memcpy_impl.h" 22 23 static const char *TAG = "async_memcpy"; 24 25 #define ASMCP_CHECK(a, msg, tag, ret, ...) \ 26 do \ 27 { \ 28 if (unlikely(!(a))) \ 29 { \ 30 ESP_LOGE(TAG, "%s(%d): " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ 31 ret_code = ret; \ 32 goto tag; \ 33 } \ 34 } while (0) 35 36 /** 37 * @brief Type of async mcp stream 38 * mcp stream inherits DMA descriptor, besides that, it has a callback function member 39 */ 40 typedef struct { 41 dma_descriptor_t desc; 42 async_memcpy_isr_cb_t cb; 43 void *cb_args; 44 } async_memcpy_stream_t; 45 46 /** 47 * @brief Type of async mcp driver context 48 */ 49 typedef struct async_memcpy_context_t { 50 async_memcpy_impl_t mcp_impl; // implementation layer 51 intr_handle_t intr_hdl; // interrupt handle 52 uint32_t flags; // extra driver flags 53 dma_descriptor_t *tx_desc; // pointer to the next free TX descriptor 54 dma_descriptor_t *rx_desc; // pointer to the next free RX descriptor 55 dma_descriptor_t *next_rx_desc_to_check; // pointer to the next RX descriptor to recycle 56 uint32_t max_stream_num; // maximum number of streams 57 async_memcpy_stream_t *out_streams; // pointer to the first TX stream 58 async_memcpy_stream_t *in_streams; // pointer to the first RX stream 59 async_memcpy_stream_t streams_pool[0]; // stream pool (TX + RX), the size is configured during driver installation 60 } async_memcpy_context_t; 61 62 esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_memcpy_t *asmcp) 63 { 64 esp_err_t ret_code = ESP_OK; 65 async_memcpy_context_t *mcp_hdl = NULL; 66 67 ASMCP_CHECK(config, "configuration can't be null", err, ESP_ERR_INVALID_ARG); 68 ASMCP_CHECK(asmcp, "can't assign mcp handle to null", err, ESP_ERR_INVALID_ARG); 69 70 // context memory size + stream pool size 71 size_t total_malloc_size = sizeof(async_memcpy_context_t) + sizeof(async_memcpy_stream_t) * config->backlog * 2; 72 // to work when cache is disabled, the driver handle should located in SRAM 73 mcp_hdl = heap_caps_calloc(1, total_malloc_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); 74 ASMCP_CHECK(mcp_hdl, "allocate context memory failed", err, ESP_ERR_NO_MEM); 75 76 int int_flags = ESP_INTR_FLAG_IRAM; // interrupt can still work when cache is disabled 77 // allocate interrupt handle, it's target dependent 78 ret_code = async_memcpy_impl_allocate_intr(&mcp_hdl->mcp_impl, int_flags, &mcp_hdl->intr_hdl); 79 ASMCP_CHECK(ret_code == ESP_OK, "allocate interrupt handle failed", err, ret_code); 80 81 mcp_hdl->flags = config->flags; 82 mcp_hdl->out_streams = mcp_hdl->streams_pool; 83 mcp_hdl->in_streams = mcp_hdl->streams_pool + config->backlog; 84 mcp_hdl->max_stream_num = config->backlog; 85 86 // circle TX/RX descriptors 87 for (int i = 0; i < mcp_hdl->max_stream_num; i++) { 88 mcp_hdl->out_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU; 89 mcp_hdl->out_streams[i].desc.next = &mcp_hdl->out_streams[i + 1].desc; 90 mcp_hdl->in_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU; 91 mcp_hdl->in_streams[i].desc.next = &mcp_hdl->in_streams[i + 1].desc; 92 } 93 mcp_hdl->out_streams[mcp_hdl->max_stream_num - 1].desc.next = &mcp_hdl->out_streams[0].desc; 94 mcp_hdl->in_streams[mcp_hdl->max_stream_num - 1].desc.next = &mcp_hdl->in_streams[0].desc; 95 96 mcp_hdl->tx_desc = &mcp_hdl->out_streams[0].desc; 97 mcp_hdl->rx_desc = &mcp_hdl->in_streams[0].desc; 98 mcp_hdl->next_rx_desc_to_check = &mcp_hdl->in_streams[0].desc; 99 100 // initialize implementation layer 101 async_memcpy_impl_init(&mcp_hdl->mcp_impl, &mcp_hdl->out_streams[0].desc, &mcp_hdl->in_streams[0].desc); 102 103 *asmcp = mcp_hdl; 104 105 async_memcpy_impl_start(&mcp_hdl->mcp_impl); 106 107 return ESP_OK; 108 err: 109 if (mcp_hdl) { 110 if (mcp_hdl->intr_hdl) { 111 esp_intr_free(mcp_hdl->intr_hdl); 112 } 113 free(mcp_hdl); 114 } 115 if (asmcp) { 116 *asmcp = NULL; 117 } 118 return ret_code; 119 } 120 121 esp_err_t esp_async_memcpy_uninstall(async_memcpy_t asmcp) 122 { 123 esp_err_t ret_code = ESP_OK; 124 ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG); 125 126 esp_intr_free(asmcp->intr_hdl); 127 async_memcpy_impl_stop(&asmcp->mcp_impl); 128 async_memcpy_impl_deinit(&asmcp->mcp_impl); 129 free(asmcp); 130 return ESP_OK; 131 err: 132 return ret_code; 133 } 134 135 static int async_memcpy_prepare_receive(async_memcpy_t asmcp, void *buffer, size_t size, dma_descriptor_t **start_desc, dma_descriptor_t **end_desc) 136 { 137 uint32_t prepared_length = 0; 138 uint8_t *buf = (uint8_t *)buffer; 139 dma_descriptor_t *desc = asmcp->rx_desc; // descriptor iterator 140 dma_descriptor_t *start = desc; 141 dma_descriptor_t *end = desc; 142 143 while (size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) { 144 if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) { 145 desc->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE; 146 desc->buffer = &buf[prepared_length]; 147 desc = desc->next; // move to next descriptor 148 prepared_length += DMA_DESCRIPTOR_BUFFER_MAX_SIZE; 149 size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE; 150 } else { 151 // out of RX descriptors 152 goto _exit; 153 } 154 } 155 if (size) { 156 if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) { 157 end = desc; // the last descriptor used 158 desc->dw0.size = size; 159 desc->buffer = &buf[prepared_length]; 160 desc = desc->next; // move to next descriptor 161 prepared_length += size; 162 } else { 163 // out of RX descriptors 164 goto _exit; 165 } 166 } 167 168 _exit: 169 *start_desc = start; 170 *end_desc = end; 171 return prepared_length; 172 } 173 174 static int async_memcpy_prepare_transmit(async_memcpy_t asmcp, void *buffer, size_t len, dma_descriptor_t **start_desc, dma_descriptor_t **end_desc) 175 { 176 uint32_t prepared_length = 0; 177 uint8_t *buf = (uint8_t *)buffer; 178 dma_descriptor_t *desc = asmcp->tx_desc; // descriptor iterator 179 dma_descriptor_t *start = desc; 180 dma_descriptor_t *end = desc; 181 182 while (len > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) { 183 if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) { 184 desc->dw0.suc_eof = 0; // not the end of the transaction 185 desc->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE; 186 desc->dw0.length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE; 187 desc->buffer = &buf[prepared_length]; 188 desc = desc->next; // move to next descriptor 189 prepared_length += DMA_DESCRIPTOR_BUFFER_MAX_SIZE; 190 len -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE; 191 } else { 192 // out of TX descriptors 193 goto _exit; 194 } 195 } 196 if (len) { 197 if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) { 198 end = desc; // the last descriptor used 199 desc->dw0.suc_eof = 1; // end of the transaction 200 desc->dw0.size = len; 201 desc->dw0.length = len; 202 desc->buffer = &buf[prepared_length]; 203 desc = desc->next; // move to next descriptor 204 prepared_length += len; 205 } else { 206 // out of TX descriptors 207 goto _exit; 208 } 209 } 210 211 *start_desc = start; 212 *end_desc = end; 213 _exit: 214 return prepared_length; 215 } 216 217 static bool async_memcpy_get_next_rx_descriptor(async_memcpy_t asmcp, dma_descriptor_t *eof_desc, dma_descriptor_t **next_desc) 218 { 219 dma_descriptor_t *next = asmcp->next_rx_desc_to_check; 220 // additional check, to avoid potential interrupt got triggered by mistake 221 if (next->dw0.owner == DMA_DESCRIPTOR_BUFFER_OWNER_CPU) { 222 asmcp->next_rx_desc_to_check = asmcp->next_rx_desc_to_check->next; 223 *next_desc = next; 224 // return if we need to continue 225 return eof_desc == next ? false : true; 226 } 227 228 *next_desc = NULL; 229 return false; 230 } 231 232 esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n, async_memcpy_isr_cb_t cb_isr, void *cb_args) 233 { 234 esp_err_t ret_code = ESP_OK; 235 dma_descriptor_t *rx_start_desc = NULL; 236 dma_descriptor_t *rx_end_desc = NULL; 237 dma_descriptor_t *tx_start_desc = NULL; 238 dma_descriptor_t *tx_end_desc = NULL; 239 int rx_prepared_size = 0; 240 int tx_prepared_size = 0; 241 ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG); 242 ASMCP_CHECK(async_memcpy_impl_is_buffer_address_valid(&asmcp->mcp_impl, src, dst), "buffer address not valid", err, ESP_ERR_INVALID_ARG); 243 ASMCP_CHECK(n <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE * asmcp->max_stream_num, "buffer size too large", err, ESP_ERR_INVALID_ARG); 244 245 // Prepare TX and RX descriptor 246 portENTER_CRITICAL_SAFE(&asmcp->mcp_impl.hal_lock); 247 rx_prepared_size = async_memcpy_prepare_receive(asmcp, dst, n, &rx_start_desc, &rx_end_desc); 248 tx_prepared_size = async_memcpy_prepare_transmit(asmcp, src, n, &tx_start_desc, &tx_end_desc); 249 if ((rx_prepared_size == n) && (tx_prepared_size == n)) { 250 // register user callback to the last descriptor 251 async_memcpy_stream_t *mcp_stream = __containerof(rx_end_desc, async_memcpy_stream_t, desc); 252 mcp_stream->cb = cb_isr; 253 mcp_stream->cb_args = cb_args; 254 // restart RX firstly 255 dma_descriptor_t *desc = rx_start_desc; 256 while (desc != rx_end_desc) { 257 desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; 258 desc = desc->next; 259 } 260 desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; 261 asmcp->rx_desc = desc->next; 262 // restart TX secondly 263 desc = tx_start_desc; 264 while (desc != tx_end_desc) { 265 desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; 266 desc = desc->next; 267 } 268 desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; 269 asmcp->tx_desc = desc->next; 270 async_memcpy_impl_restart(&asmcp->mcp_impl); 271 } 272 portEXIT_CRITICAL_SAFE(&asmcp->mcp_impl.hal_lock); 273 274 // It's unlikely that we have space for rx descriptor but no space for tx descriptor 275 // Both tx and rx descriptor should move in the same pace 276 ASMCP_CHECK(rx_prepared_size == n, "out of rx descriptor", err, ESP_FAIL); 277 ASMCP_CHECK(tx_prepared_size == n, "out of tx descriptor", err, ESP_FAIL); 278 279 return ESP_OK; 280 err: 281 return ret_code; 282 } 283 284 IRAM_ATTR void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl) 285 { 286 bool to_continue = false; 287 async_memcpy_stream_t *in_stream = NULL; 288 dma_descriptor_t *next_desc = NULL; 289 async_memcpy_context_t *asmcp = __containerof(impl, async_memcpy_context_t, mcp_impl); 290 291 // get the RX eof descriptor address 292 dma_descriptor_t *eof = async_memcpy_impl_get_rx_suc_eof_descriptor(impl); 293 // traversal all unchecked descriptors 294 do { 295 portENTER_CRITICAL_ISR(&impl->hal_lock); 296 // There is an assumption that the usage of rx descriptors are in the same pace as tx descriptors (this is determined by M2M DMA working mechanism) 297 // And once the rx descriptor is recycled, the corresponding tx desc is guaranteed to be returned by DMA 298 to_continue = async_memcpy_get_next_rx_descriptor(asmcp, eof, &next_desc); 299 portEXIT_CRITICAL_ISR(&impl->hal_lock); 300 if (next_desc) { 301 in_stream = __containerof(next_desc, async_memcpy_stream_t, desc); 302 // invoke user registered callback if available 303 if (in_stream->cb) { 304 async_memcpy_event_t e = {0}; 305 if (in_stream->cb(asmcp, &e, in_stream->cb_args)) { 306 impl->isr_need_yield = true; 307 } 308 in_stream->cb = NULL; 309 in_stream->cb_args = NULL; 310 } 311 } 312 } while (to_continue); 313 }