/ components / driver / rmt.c
rmt.c
   1  // Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
   2  //
   3  // Licensed under the Apache License, Version 2.0 (the "License");
   4  // you may not use this file except in compliance with the License.
   5  // You may obtain a copy of the License at
   6  //
   7  //     http://www.apache.org/licenses/LICENSE-2.0
   8  //
   9  // Unless required by applicable law or agreed to in writing, software
  10  // distributed under the License is distributed on an "AS IS" BASIS,
  11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12  // See the License for the specific language governing permissions and
  13  // limitations under the License.
  14  #include <stdlib.h>
  15  #include <string.h>
  16  #include <sys/lock.h>
  17  #include "esp_intr_alloc.h"
  18  #include "esp_log.h"
  19  #include "driver/gpio.h"
  20  #include "driver/periph_ctrl.h"
  21  #include "driver/rmt.h"
  22  #include "freertos/FreeRTOS.h"
  23  #include "freertos/task.h"
  24  #include "freertos/semphr.h"
  25  #include "freertos/ringbuf.h"
  26  #include "soc/soc_memory_layout.h"
  27  #include "hal/rmt_hal.h"
  28  #include "hal/rmt_ll.h"
  29  #include "esp_rom_gpio.h"
  30  
  31  #define RMT_CHANNEL_ERROR_STR "RMT CHANNEL ERR"
  32  #define RMT_ADDR_ERROR_STR "RMT ADDRESS ERR"
  33  #define RMT_MEM_CNT_ERROR_STR "RMT MEM BLOCK NUM ERR"
  34  #define RMT_CARRIER_ERROR_STR "RMT CARRIER LEVEL ERR"
  35  #define RMT_MEM_OWNER_ERROR_STR "RMT MEM OWNER_ERR"
  36  #define RMT_BASECLK_ERROR_STR "RMT BASECLK ERR"
  37  #define RMT_WR_MEM_OVF_ERROR_STR "RMT WR MEM OVERFLOW"
  38  #define RMT_GPIO_ERROR_STR "RMT GPIO ERROR"
  39  #define RMT_MODE_ERROR_STR "RMT MODE ERROR"
  40  #define RMT_CLK_DIV_ERROR_STR "RMT CLK DIV ERR"
  41  #define RMT_DRIVER_ERROR_STR "RMT DRIVER ERR"
  42  #define RMT_DRIVER_LENGTH_ERROR_STR "RMT PARAM LEN ERROR"
  43  #define RMT_PSRAM_BUFFER_WARN_STR "Using buffer allocated from psram"
  44  #define RMT_TRANSLATOR_NULL_STR "RMT translator is null"
  45  #define RMT_TRANSLATOR_UNINIT_STR "RMT translator not init"
  46  #define RMT_PARAM_ERR_STR "RMT param error"
  47  
  48  static const char *RMT_TAG = "rmt";
  49  #define RMT_CHECK(a, str, ret_val)                                    \
  50      if (!(a))                                                         \
  51      {                                                                 \
  52          ESP_LOGE(RMT_TAG, "%s(%d): %s", __FUNCTION__, __LINE__, str); \
  53          return (ret_val);                                             \
  54      }
  55  
  56  // Spinlock for protecting concurrent register-level access only
  57  #define RMT_ENTER_CRITICAL()  portENTER_CRITICAL_SAFE(&(rmt_contex.rmt_spinlock))
  58  #define RMT_EXIT_CRITICAL()   portEXIT_CRITICAL_SAFE(&(rmt_contex.rmt_spinlock))
  59  
  60  typedef struct {
  61      rmt_hal_context_t hal;
  62      _lock_t rmt_driver_isr_lock;
  63      portMUX_TYPE rmt_spinlock; // Mutex lock for protecting concurrent register/unregister of RMT channels' ISR
  64      rmt_isr_handle_t rmt_driver_intr_handle;
  65      rmt_tx_end_callback_t rmt_tx_end_callback;// Event called when transmission is ended
  66      uint8_t rmt_driver_channels; // Bitmask of installed drivers' channels
  67      bool rmt_module_enabled;
  68  } rmt_contex_t;
  69  
  70  typedef struct {
  71      size_t tx_offset;
  72      size_t tx_len_rem;
  73      size_t tx_sub_len;
  74      bool translator;
  75      bool wait_done; //Mark whether wait tx done.
  76      rmt_channel_t channel;
  77      const rmt_item32_t *tx_data;
  78      xSemaphoreHandle tx_sem;
  79  #if CONFIG_SPIRAM_USE_MALLOC
  80      int intr_alloc_flags;
  81      StaticSemaphore_t tx_sem_buffer;
  82  #endif
  83      rmt_item32_t *tx_buf;
  84      RingbufHandle_t rx_buf;
  85  #if SOC_RMT_SUPPORT_RX_PINGPONG
  86      rmt_item32_t *rx_item_buf;
  87      uint32_t rx_item_buf_size;
  88      uint32_t rx_item_len;
  89      uint32_t rx_item_start_idx;
  90  #endif
  91      sample_to_rmt_t sample_to_rmt;
  92      size_t sample_size_remain;
  93      const uint8_t *sample_cur;
  94  } rmt_obj_t;
  95  
  96  static rmt_contex_t rmt_contex = {
  97      .hal.regs = RMT_LL_HW_BASE,
  98      .hal.mem = RMT_LL_MEM_BASE,
  99      .rmt_spinlock = portMUX_INITIALIZER_UNLOCKED,
 100      .rmt_driver_intr_handle = NULL,
 101      .rmt_tx_end_callback = {
 102          .function = NULL,
 103      },
 104      .rmt_driver_channels = 0,
 105      .rmt_module_enabled = false,
 106  };
 107  
 108  static rmt_obj_t *p_rmt_obj[RMT_CHANNEL_MAX] = {0};
 109  
 110  //Enable RMT module
 111  static void rmt_module_enable(void)
 112  {
 113      RMT_ENTER_CRITICAL();
 114      if (rmt_contex.rmt_module_enabled == false) {
 115          periph_module_reset(PERIPH_RMT_MODULE);
 116          periph_module_enable(PERIPH_RMT_MODULE);
 117          rmt_contex.rmt_module_enabled = true;
 118      }
 119      RMT_EXIT_CRITICAL();
 120  }
 121  
 122  //Disable RMT module
 123  static void rmt_module_disable(void)
 124  {
 125      RMT_ENTER_CRITICAL();
 126      if (rmt_contex.rmt_module_enabled == true) {
 127          periph_module_disable(PERIPH_RMT_MODULE);
 128          rmt_contex.rmt_module_enabled = false;
 129      }
 130      RMT_EXIT_CRITICAL();
 131  }
 132  
 133  esp_err_t rmt_set_clk_div(rmt_channel_t channel, uint8_t div_cnt)
 134  {
 135      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 136      RMT_ENTER_CRITICAL();
 137      rmt_ll_set_counter_clock_div(rmt_contex.hal.regs, channel, div_cnt);
 138      RMT_EXIT_CRITICAL();
 139      return ESP_OK;
 140  }
 141  
 142  esp_err_t rmt_get_clk_div(rmt_channel_t channel, uint8_t *div_cnt)
 143  {
 144      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 145      RMT_CHECK(div_cnt != NULL, RMT_ADDR_ERROR_STR, ESP_ERR_INVALID_ARG);
 146      RMT_ENTER_CRITICAL();
 147      *div_cnt = (uint8_t)rmt_ll_get_counter_clock_div(rmt_contex.hal.regs, channel);
 148      RMT_EXIT_CRITICAL();
 149      return ESP_OK;
 150  }
 151  
 152  esp_err_t rmt_set_rx_idle_thresh(rmt_channel_t channel, uint16_t thresh)
 153  {
 154      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 155      RMT_ENTER_CRITICAL();
 156      rmt_ll_set_rx_idle_thres(rmt_contex.hal.regs, channel, thresh);
 157      RMT_EXIT_CRITICAL();
 158      return ESP_OK;
 159  }
 160  
 161  esp_err_t rmt_get_rx_idle_thresh(rmt_channel_t channel, uint16_t *thresh)
 162  {
 163      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 164      RMT_CHECK(thresh != NULL, RMT_ADDR_ERROR_STR, ESP_ERR_INVALID_ARG);
 165      RMT_ENTER_CRITICAL();
 166      *thresh = (uint16_t)rmt_ll_get_rx_idle_thres(rmt_contex.hal.regs, channel);
 167      RMT_EXIT_CRITICAL();
 168      return ESP_OK;
 169  }
 170  
 171  esp_err_t rmt_set_mem_block_num(rmt_channel_t channel, uint8_t rmt_mem_num)
 172  {
 173      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 174      RMT_CHECK(rmt_mem_num <= RMT_CHANNEL_MAX - channel, RMT_MEM_CNT_ERROR_STR, ESP_ERR_INVALID_ARG);
 175      RMT_ENTER_CRITICAL();
 176      rmt_ll_set_mem_blocks(rmt_contex.hal.regs, channel, rmt_mem_num);
 177      RMT_EXIT_CRITICAL();
 178      return ESP_OK;
 179  }
 180  
 181  esp_err_t rmt_get_mem_block_num(rmt_channel_t channel, uint8_t *rmt_mem_num)
 182  {
 183      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 184      RMT_CHECK(rmt_mem_num != NULL, RMT_ADDR_ERROR_STR, ESP_ERR_INVALID_ARG);
 185      RMT_ENTER_CRITICAL();
 186      *rmt_mem_num = (uint8_t)rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel);
 187      RMT_EXIT_CRITICAL();
 188      return ESP_OK;
 189  }
 190  
 191  esp_err_t rmt_set_tx_carrier(rmt_channel_t channel, bool carrier_en, uint16_t high_level, uint16_t low_level,
 192                               rmt_carrier_level_t carrier_level)
 193  {
 194      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 195      RMT_CHECK(carrier_level < RMT_CARRIER_LEVEL_MAX, RMT_CARRIER_ERROR_STR, ESP_ERR_INVALID_ARG);
 196      RMT_ENTER_CRITICAL();
 197      rmt_ll_set_tx_carrier_high_low_ticks(rmt_contex.hal.regs, channel, high_level, low_level);
 198      rmt_ll_set_carrier_on_level(rmt_contex.hal.regs, channel, carrier_level);
 199      rmt_ll_enable_carrier(rmt_contex.hal.regs, channel, carrier_en);
 200      RMT_EXIT_CRITICAL();
 201      return ESP_OK;
 202  }
 203  
 204  esp_err_t rmt_set_mem_pd(rmt_channel_t channel, bool pd_en)
 205  {
 206      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 207      RMT_ENTER_CRITICAL();
 208      rmt_ll_power_down_mem(rmt_contex.hal.regs, pd_en);
 209      RMT_EXIT_CRITICAL();
 210      return ESP_OK;
 211  }
 212  
 213  esp_err_t rmt_get_mem_pd(rmt_channel_t channel, bool *pd_en)
 214  {
 215      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 216      RMT_ENTER_CRITICAL();
 217      *pd_en = rmt_ll_is_mem_power_down(rmt_contex.hal.regs);
 218      RMT_EXIT_CRITICAL();
 219      return ESP_OK;
 220  }
 221  
 222  esp_err_t rmt_tx_start(rmt_channel_t channel, bool tx_idx_rst)
 223  {
 224      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 225      RMT_ENTER_CRITICAL();
 226      if (tx_idx_rst) {
 227          rmt_ll_reset_tx_pointer(rmt_contex.hal.regs, channel);
 228      }
 229      rmt_ll_clear_tx_end_interrupt(rmt_contex.hal.regs, channel);
 230      // enable tx end interrupt in non-loop mode
 231      if (!rmt_ll_is_tx_loop_enabled(rmt_contex.hal.regs, channel)) {
 232          rmt_ll_enable_tx_end_interrupt(rmt_contex.hal.regs, channel, true);
 233      } else {
 234  #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
 235          rmt_ll_reset_tx_loop(rmt_contex.hal.regs, channel);
 236          rmt_ll_enable_tx_loop_count(rmt_contex.hal.regs, channel, true);
 237          rmt_ll_clear_tx_loop_interrupt(rmt_contex.hal.regs, channel);
 238          rmt_ll_enable_tx_loop_interrupt(rmt_contex.hal.regs, channel, true);
 239  #endif
 240      }
 241      rmt_ll_start_tx(rmt_contex.hal.regs, channel);
 242      RMT_EXIT_CRITICAL();
 243      return ESP_OK;
 244  }
 245  
 246  esp_err_t rmt_tx_stop(rmt_channel_t channel)
 247  {
 248      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 249      RMT_ENTER_CRITICAL();
 250      rmt_ll_stop_tx(rmt_contex.hal.regs, channel);
 251      rmt_ll_reset_tx_pointer(rmt_contex.hal.regs, channel);
 252      RMT_EXIT_CRITICAL();
 253      return ESP_OK;
 254  }
 255  
 256  esp_err_t rmt_rx_start(rmt_channel_t channel, bool rx_idx_rst)
 257  {
 258      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 259      RMT_ENTER_CRITICAL();
 260      rmt_ll_enable_rx(rmt_contex.hal.regs, channel, false);
 261      if (rx_idx_rst) {
 262          rmt_ll_reset_rx_pointer(rmt_contex.hal.regs, channel);
 263      }
 264      rmt_ll_clear_rx_end_interrupt(rmt_contex.hal.regs, channel);
 265      rmt_ll_enable_rx_end_interrupt(rmt_contex.hal.regs, channel, true);
 266  
 267  #if SOC_RMT_SUPPORT_RX_PINGPONG
 268      const uint32_t item_block_len = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel) * RMT_MEM_ITEM_NUM;
 269      p_rmt_obj[channel]->rx_item_start_idx = 0;
 270      p_rmt_obj[channel]->rx_item_len = 0;
 271      rmt_set_rx_thr_intr_en(channel, true, item_block_len / 2);
 272  #endif
 273  
 274      rmt_ll_enable_rx(rmt_contex.hal.regs, channel, true);
 275      RMT_EXIT_CRITICAL();
 276      return ESP_OK;
 277  }
 278  
 279  esp_err_t rmt_rx_stop(rmt_channel_t channel)
 280  {
 281      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 282      RMT_ENTER_CRITICAL();
 283      rmt_ll_enable_rx_end_interrupt(rmt_contex.hal.regs, channel, false);
 284      rmt_ll_enable_rx(rmt_contex.hal.regs, channel, false);
 285      rmt_ll_reset_rx_pointer(rmt_contex.hal.regs, channel);
 286  #if SOC_RMT_SUPPORT_RX_PINGPONG
 287      rmt_ll_enable_rx_thres_interrupt(rmt_contex.hal.regs, channel, false);
 288  #endif
 289      RMT_EXIT_CRITICAL();
 290      return ESP_OK;
 291  }
 292  
 293  esp_err_t rmt_memory_rw_rst(rmt_channel_t channel)
 294  {
 295      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 296      RMT_ENTER_CRITICAL();
 297      rmt_ll_reset_tx_pointer(rmt_contex.hal.regs, channel);
 298      rmt_ll_reset_rx_pointer(rmt_contex.hal.regs, channel);
 299      RMT_EXIT_CRITICAL();
 300      return ESP_OK;
 301  }
 302  
 303  esp_err_t rmt_set_memory_owner(rmt_channel_t channel, rmt_mem_owner_t owner)
 304  {
 305      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 306      RMT_CHECK(owner < RMT_MEM_OWNER_MAX, RMT_MEM_OWNER_ERROR_STR, ESP_ERR_INVALID_ARG);
 307      RMT_ENTER_CRITICAL();
 308      rmt_ll_set_mem_owner(rmt_contex.hal.regs, channel, owner);
 309      RMT_EXIT_CRITICAL();
 310      return ESP_OK;
 311  }
 312  
 313  esp_err_t rmt_get_memory_owner(rmt_channel_t channel, rmt_mem_owner_t *owner)
 314  {
 315      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 316      RMT_CHECK(owner != NULL, RMT_MEM_OWNER_ERROR_STR, ESP_ERR_INVALID_ARG);
 317      RMT_ENTER_CRITICAL();
 318      *owner = (rmt_mem_owner_t)rmt_ll_get_mem_owner(rmt_contex.hal.regs, channel);
 319      RMT_EXIT_CRITICAL();
 320      return ESP_OK;
 321  }
 322  
 323  esp_err_t rmt_set_tx_loop_mode(rmt_channel_t channel, bool loop_en)
 324  {
 325      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 326      RMT_ENTER_CRITICAL();
 327      rmt_ll_enable_tx_loop(rmt_contex.hal.regs, channel, loop_en);
 328      RMT_EXIT_CRITICAL();
 329      return ESP_OK;
 330  }
 331  
 332  esp_err_t rmt_get_tx_loop_mode(rmt_channel_t channel, bool *loop_en)
 333  {
 334      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 335      RMT_ENTER_CRITICAL();
 336      *loop_en = rmt_ll_is_tx_loop_enabled(rmt_contex.hal.regs, channel);
 337      RMT_EXIT_CRITICAL();
 338      return ESP_OK;
 339  }
 340  
 341  esp_err_t rmt_set_rx_filter(rmt_channel_t channel, bool rx_filter_en, uint8_t thresh)
 342  {
 343      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 344      RMT_ENTER_CRITICAL();
 345      rmt_ll_enable_rx_filter(rmt_contex.hal.regs, channel, rx_filter_en);
 346      rmt_ll_set_rx_filter_thres(rmt_contex.hal.regs, channel, thresh);
 347      RMT_EXIT_CRITICAL();
 348      return ESP_OK;
 349  }
 350  
 351  esp_err_t rmt_set_source_clk(rmt_channel_t channel, rmt_source_clk_t base_clk)
 352  {
 353      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 354      RMT_CHECK(base_clk < RMT_BASECLK_MAX, RMT_BASECLK_ERROR_STR, ESP_ERR_INVALID_ARG);
 355      RMT_ENTER_CRITICAL();
 356      rmt_ll_set_counter_clock_src(rmt_contex.hal.regs, channel, base_clk);
 357      RMT_EXIT_CRITICAL();
 358      return ESP_OK;
 359  }
 360  
 361  esp_err_t rmt_get_source_clk(rmt_channel_t channel, rmt_source_clk_t *src_clk)
 362  {
 363      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 364      RMT_ENTER_CRITICAL();
 365      *src_clk = (rmt_source_clk_t)rmt_ll_get_counter_clock_src(rmt_contex.hal.regs, channel);
 366      RMT_EXIT_CRITICAL();
 367      return ESP_OK;
 368  }
 369  
 370  esp_err_t rmt_set_idle_level(rmt_channel_t channel, bool idle_out_en, rmt_idle_level_t level)
 371  {
 372      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 373      RMT_CHECK(level < RMT_IDLE_LEVEL_MAX, "RMT IDLE LEVEL ERR", ESP_ERR_INVALID_ARG);
 374      RMT_ENTER_CRITICAL();
 375      rmt_ll_enable_tx_idle(rmt_contex.hal.regs, channel, idle_out_en);
 376      rmt_ll_set_tx_idle_level(rmt_contex.hal.regs, channel, level);
 377      RMT_EXIT_CRITICAL();
 378      return ESP_OK;
 379  }
 380  
 381  esp_err_t rmt_get_idle_level(rmt_channel_t channel, bool *idle_out_en, rmt_idle_level_t *level)
 382  {
 383      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 384      RMT_ENTER_CRITICAL();
 385      *idle_out_en = rmt_ll_is_tx_idle_enabled(rmt_contex.hal.regs, channel);
 386      *level = rmt_ll_get_tx_idle_level(rmt_contex.hal.regs, channel);
 387      RMT_EXIT_CRITICAL();
 388      return ESP_OK;
 389  }
 390  
 391  esp_err_t rmt_get_status(rmt_channel_t channel, uint32_t *status)
 392  {
 393      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 394      RMT_ENTER_CRITICAL();
 395      *status = rmt_ll_get_channel_status(rmt_contex.hal.regs, channel);
 396      RMT_EXIT_CRITICAL();
 397      return ESP_OK;
 398  }
 399  
 400  void rmt_set_intr_enable_mask(uint32_t mask)
 401  {
 402      RMT_ENTER_CRITICAL();
 403      rmt_ll_set_intr_enable_mask(mask);
 404      RMT_EXIT_CRITICAL();
 405  }
 406  
 407  void rmt_clr_intr_enable_mask(uint32_t mask)
 408  {
 409      RMT_ENTER_CRITICAL();
 410      rmt_ll_clr_intr_enable_mask(mask);
 411      RMT_EXIT_CRITICAL();
 412  }
 413  
 414  esp_err_t rmt_set_rx_intr_en(rmt_channel_t channel, bool en)
 415  {
 416      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 417      RMT_ENTER_CRITICAL();
 418      rmt_ll_enable_rx_end_interrupt(rmt_contex.hal.regs, channel, en);
 419      RMT_EXIT_CRITICAL();
 420      return ESP_OK;
 421  }
 422  
 423  #if SOC_RMT_SUPPORT_RX_PINGPONG
 424  esp_err_t rmt_set_rx_thr_intr_en(rmt_channel_t channel, bool en, uint16_t evt_thresh)
 425  {
 426      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 427      if (en) {
 428          uint32_t item_block_len = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel) * RMT_MEM_ITEM_NUM;
 429          RMT_CHECK(evt_thresh <= item_block_len, "RMT EVT THRESH ERR", ESP_ERR_INVALID_ARG);
 430          RMT_ENTER_CRITICAL();
 431          rmt_ll_set_rx_limit(rmt_contex.hal.regs, channel, evt_thresh);
 432          rmt_ll_enable_rx_thres_interrupt(rmt_contex.hal.regs, channel, true);
 433          RMT_EXIT_CRITICAL();
 434      } else {
 435          RMT_ENTER_CRITICAL();
 436          rmt_ll_enable_rx_thres_interrupt(rmt_contex.hal.regs, channel, false);
 437          RMT_EXIT_CRITICAL();
 438      }
 439      return ESP_OK;
 440  }
 441  #endif
 442  
 443  esp_err_t rmt_set_err_intr_en(rmt_channel_t channel, bool en)
 444  {
 445      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 446      RMT_ENTER_CRITICAL();
 447      rmt_ll_enable_err_interrupt(rmt_contex.hal.regs, channel, en);
 448      RMT_EXIT_CRITICAL();
 449      return ESP_OK;
 450  }
 451  
 452  esp_err_t rmt_set_tx_intr_en(rmt_channel_t channel, bool en)
 453  {
 454      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 455      RMT_ENTER_CRITICAL();
 456      rmt_ll_enable_tx_end_interrupt(rmt_contex.hal.regs, channel, en);
 457      RMT_EXIT_CRITICAL();
 458      return ESP_OK;
 459  }
 460  
 461  esp_err_t rmt_set_tx_thr_intr_en(rmt_channel_t channel, bool en, uint16_t evt_thresh)
 462  {
 463      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 464      if (en) {
 465          uint32_t item_block_len = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel) * RMT_MEM_ITEM_NUM;
 466          RMT_CHECK(evt_thresh <= item_block_len, "RMT EVT THRESH ERR", ESP_ERR_INVALID_ARG);
 467          RMT_ENTER_CRITICAL();
 468          rmt_ll_set_tx_limit(rmt_contex.hal.regs, channel, evt_thresh);
 469          rmt_ll_enable_tx_thres_interrupt(rmt_contex.hal.regs, channel, true);
 470          RMT_EXIT_CRITICAL();
 471      } else {
 472          RMT_ENTER_CRITICAL();
 473          rmt_ll_enable_tx_thres_interrupt(rmt_contex.hal.regs, channel, false);
 474          RMT_EXIT_CRITICAL();
 475      }
 476      return ESP_OK;
 477  }
 478  
 479  esp_err_t rmt_set_pin(rmt_channel_t channel, rmt_mode_t mode, gpio_num_t gpio_num)
 480  {
 481      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 482      RMT_CHECK(mode < RMT_MODE_MAX, RMT_MODE_ERROR_STR, ESP_ERR_INVALID_ARG);
 483      RMT_CHECK(((GPIO_IS_VALID_GPIO(gpio_num) && (mode == RMT_MODE_RX)) ||
 484                 (GPIO_IS_VALID_OUTPUT_GPIO(gpio_num) && (mode == RMT_MODE_TX))),
 485                RMT_GPIO_ERROR_STR, ESP_ERR_INVALID_ARG);
 486  
 487      PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[gpio_num], PIN_FUNC_GPIO);
 488      if (mode == RMT_MODE_TX) {
 489          gpio_set_direction(gpio_num, GPIO_MODE_OUTPUT);
 490          esp_rom_gpio_connect_out_signal(gpio_num, RMT_SIG_OUT0_IDX + channel, 0, 0);
 491      } else {
 492          gpio_set_direction(gpio_num, GPIO_MODE_INPUT);
 493          esp_rom_gpio_connect_in_signal(gpio_num, RMT_SIG_IN0_IDX + channel, 0);
 494      }
 495      return ESP_OK;
 496  }
 497  
 498  static esp_err_t rmt_internal_config(rmt_dev_t *dev, const rmt_config_t *rmt_param)
 499  {
 500      uint8_t mode = rmt_param->rmt_mode;
 501      uint8_t channel = rmt_param->channel;
 502      uint8_t gpio_num = rmt_param->gpio_num;
 503      uint8_t mem_cnt = rmt_param->mem_block_num;
 504      uint8_t clk_div = rmt_param->clk_div;
 505      uint32_t carrier_freq_hz = rmt_param->tx_config.carrier_freq_hz;
 506      bool carrier_en = rmt_param->tx_config.carrier_en;
 507      uint32_t rmt_source_clk_hz;
 508  
 509      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 510      RMT_CHECK((mem_cnt + channel <= 8 && mem_cnt > 0), RMT_MEM_CNT_ERROR_STR, ESP_ERR_INVALID_ARG);
 511      RMT_CHECK((clk_div > 0), RMT_CLK_DIV_ERROR_STR, ESP_ERR_INVALID_ARG);
 512  
 513      if (mode == RMT_MODE_TX) {
 514          RMT_CHECK((!carrier_en || carrier_freq_hz > 0), "RMT carrier frequency can't be zero", ESP_ERR_INVALID_ARG);
 515      }
 516  
 517      RMT_ENTER_CRITICAL();
 518      rmt_ll_set_counter_clock_div(dev, channel, clk_div);
 519      rmt_ll_enable_mem_access(dev, true);
 520      rmt_ll_reset_tx_pointer(dev, channel);
 521      rmt_ll_reset_rx_pointer(dev, channel);
 522      if (rmt_param->flags & RMT_CHANNEL_FLAGS_ALWAYS_ON) {
 523          // clock src: REF_CLK
 524          rmt_source_clk_hz = REF_CLK_FREQ;
 525          rmt_ll_set_counter_clock_src(dev, channel, RMT_BASECLK_REF);
 526      } else {
 527          // clock src: APB_CLK
 528          rmt_source_clk_hz = APB_CLK_FREQ;
 529          rmt_ll_set_counter_clock_src(dev, channel, RMT_BASECLK_APB);
 530      }
 531      rmt_ll_set_mem_blocks(dev, channel, mem_cnt);
 532      rmt_ll_set_mem_owner(dev, channel, RMT_MEM_OWNER_HW);
 533      RMT_EXIT_CRITICAL();
 534  
 535      if (mode == RMT_MODE_TX) {
 536          uint16_t carrier_duty_percent = rmt_param->tx_config.carrier_duty_percent;
 537          uint8_t carrier_level = rmt_param->tx_config.carrier_level;
 538          uint8_t idle_level = rmt_param->tx_config.idle_level;
 539  
 540          RMT_ENTER_CRITICAL();
 541          rmt_ll_enable_tx_loop(dev, channel, rmt_param->tx_config.loop_en);
 542  #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
 543          if (rmt_param->tx_config.loop_en) {
 544              rmt_ll_set_tx_loop_count(dev, channel, rmt_param->tx_config.loop_count);
 545          }
 546  #endif
 547          /* always enable tx ping-pong */
 548          rmt_ll_enable_tx_pingpong(dev, true);
 549          /*Set idle level */
 550          rmt_ll_enable_tx_idle(dev, channel, rmt_param->tx_config.idle_output_en);
 551          rmt_ll_set_tx_idle_level(dev, channel, idle_level);
 552          /*Set carrier*/
 553          rmt_ll_enable_carrier(dev, channel, carrier_en);
 554          if (carrier_en) {
 555              uint32_t duty_div, duty_h, duty_l;
 556              duty_div = rmt_source_clk_hz / carrier_freq_hz;
 557              duty_h = duty_div * carrier_duty_percent / 100;
 558              duty_l = duty_div - duty_h;
 559              rmt_ll_set_carrier_on_level(dev, channel, carrier_level);
 560              rmt_ll_set_tx_carrier_high_low_ticks(dev, channel, duty_h, duty_l);
 561          } else {
 562              rmt_ll_set_carrier_on_level(dev, channel, 0);
 563              rmt_ll_set_tx_carrier_high_low_ticks(dev, channel, 0, 0);
 564          }
 565          RMT_EXIT_CRITICAL();
 566  
 567          ESP_LOGD(RMT_TAG, "Rmt Tx Channel %u|Gpio %u|Sclk_Hz %u|Div %u|Carrier_Hz %u|Duty %u",
 568                   channel, gpio_num, rmt_source_clk_hz, clk_div, carrier_freq_hz, carrier_duty_percent);
 569      } else if (RMT_MODE_RX == mode) {
 570          uint8_t filter_cnt = rmt_param->rx_config.filter_ticks_thresh;
 571          uint16_t threshold = rmt_param->rx_config.idle_threshold;
 572  
 573          RMT_ENTER_CRITICAL();
 574          /*Set idle threshold*/
 575          rmt_ll_set_rx_idle_thres(dev, channel, threshold);
 576          /* Set RX filter */
 577          rmt_ll_set_rx_filter_thres(dev, channel, filter_cnt);
 578          rmt_ll_enable_rx_filter(dev, channel, rmt_param->rx_config.filter_en);
 579  
 580  #if SOC_RMT_SUPPORT_RX_PINGPONG
 581          /* always enable rx ping-pong */
 582          rmt_ll_enable_rx_pingpong(dev, channel, true);
 583  #endif
 584  
 585  #if SOC_RMT_SUPPORT_RX_DEMODULATION
 586          rmt_ll_enable_carrier(dev, channel, rmt_param->rx_config.rm_carrier);
 587          if (rmt_param->rx_config.rm_carrier) {
 588              uint32_t duty_total = rmt_source_clk_hz / rmt_ll_get_counter_clock_div(dev, channel) / rmt_param->rx_config.carrier_freq_hz;
 589              uint32_t duty_high = duty_total * rmt_param->rx_config.carrier_duty_percent / 100;
 590              // there could be residual in timing the carrier pulse, so double enlarge the theoretical value
 591              rmt_ll_set_rx_carrier_high_low_ticks(dev, channel, duty_high * 2, (duty_total - duty_high) * 2);
 592              rmt_ll_set_carrier_on_level(dev, channel, rmt_param->rx_config.carrier_level);
 593          }
 594  #endif
 595          RMT_EXIT_CRITICAL();
 596  
 597          ESP_LOGD(RMT_TAG, "Rmt Rx Channel %u|Gpio %u|Sclk_Hz %u|Div %u|Thresold %u|Filter %u",
 598                   channel, gpio_num, rmt_source_clk_hz, clk_div, threshold, filter_cnt);
 599      }
 600      return ESP_OK;
 601  }
 602  
 603  esp_err_t rmt_config(const rmt_config_t *rmt_param)
 604  {
 605      rmt_module_enable();
 606  
 607      RMT_CHECK(rmt_set_pin(rmt_param->channel, rmt_param->rmt_mode, rmt_param->gpio_num) == ESP_OK,
 608                "set gpio for RMT driver failed", ESP_ERR_INVALID_ARG);
 609  
 610      RMT_CHECK(rmt_internal_config(&RMT, rmt_param) == ESP_OK,
 611                "initialize RMT driver failed", ESP_ERR_INVALID_ARG);
 612  
 613      return ESP_OK;
 614  }
 615  
 616  static void IRAM_ATTR rmt_fill_memory(rmt_channel_t channel, const rmt_item32_t *item,
 617                                        uint16_t item_num, uint16_t mem_offset)
 618  {
 619      RMT_ENTER_CRITICAL();
 620      rmt_ll_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_SW);
 621      rmt_ll_write_memory(rmt_contex.hal.mem, channel, item, item_num, mem_offset);
 622      rmt_ll_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_HW);
 623      RMT_EXIT_CRITICAL();
 624  }
 625  
 626  esp_err_t rmt_fill_tx_items(rmt_channel_t channel, const rmt_item32_t *item, uint16_t item_num, uint16_t mem_offset)
 627  {
 628      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, (0));
 629      RMT_CHECK((item != NULL), RMT_ADDR_ERROR_STR, ESP_ERR_INVALID_ARG);
 630      RMT_CHECK((item_num > 0), RMT_DRIVER_LENGTH_ERROR_STR, ESP_ERR_INVALID_ARG);
 631  
 632      /*Each block has 64 x 32 bits of data*/
 633      uint8_t mem_cnt = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel);
 634      RMT_CHECK((mem_cnt * RMT_MEM_ITEM_NUM >= item_num), RMT_WR_MEM_OVF_ERROR_STR, ESP_ERR_INVALID_ARG);
 635      rmt_fill_memory(channel, item, item_num, mem_offset);
 636      return ESP_OK;
 637  }
 638  
 639  esp_err_t rmt_isr_register(void (*fn)(void *), void *arg, int intr_alloc_flags, rmt_isr_handle_t *handle)
 640  {
 641      RMT_CHECK((fn != NULL), RMT_ADDR_ERROR_STR, ESP_ERR_INVALID_ARG);
 642      RMT_CHECK(rmt_contex.rmt_driver_channels == 0, "RMT driver installed, can not install generic ISR handler", ESP_FAIL);
 643  
 644      return esp_intr_alloc(ETS_RMT_INTR_SOURCE, intr_alloc_flags, fn, arg, handle);
 645  }
 646  
 647  esp_err_t rmt_isr_deregister(rmt_isr_handle_t handle)
 648  {
 649      return esp_intr_free(handle);
 650  }
 651  
 652  static int IRAM_ATTR rmt_get_mem_len(rmt_channel_t channel)
 653  {
 654      int block_num = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel);
 655      int item_block_len = block_num * RMT_MEM_ITEM_NUM;
 656      volatile rmt_item32_t *data = (rmt_item32_t *)RMTMEM.chan[channel].data32;
 657      int idx;
 658      for (idx = 0; idx < item_block_len; idx++) {
 659          if (data[idx].duration0 == 0) {
 660              return idx;
 661          } else if (data[idx].duration1 == 0) {
 662              return idx + 1;
 663          }
 664      }
 665      return idx;
 666  }
 667  
 668  static void IRAM_ATTR rmt_driver_isr_default(void *arg)
 669  {
 670      uint32_t status = 0;
 671      rmt_item32_t volatile *addr = NULL;
 672      uint8_t channel = 0;
 673      rmt_hal_context_t *hal = (rmt_hal_context_t *)arg;
 674      portBASE_TYPE HPTaskAwoken = pdFALSE;
 675  
 676      // Tx end interrupt
 677      status = rmt_ll_get_tx_end_interrupt_status(hal->regs);
 678      while (status) {
 679          channel = __builtin_ffs(status) - 1;
 680          status &= ~(1 << channel);
 681          rmt_obj_t *p_rmt = p_rmt_obj[channel];
 682          if (p_rmt) {
 683              xSemaphoreGiveFromISR(p_rmt->tx_sem, &HPTaskAwoken);
 684              rmt_ll_reset_tx_pointer(rmt_contex.hal.regs, channel);
 685              p_rmt->tx_data = NULL;
 686              p_rmt->tx_len_rem = 0;
 687              p_rmt->tx_offset = 0;
 688              p_rmt->tx_sub_len = 0;
 689              p_rmt->sample_cur = NULL;
 690              p_rmt->translator = false;
 691              if (rmt_contex.rmt_tx_end_callback.function != NULL) {
 692                  rmt_contex.rmt_tx_end_callback.function(channel, rmt_contex.rmt_tx_end_callback.arg);
 693              }
 694          }
 695          rmt_ll_clear_tx_end_interrupt(hal->regs, channel);
 696      }
 697  
 698      // Tx thres interrupt
 699      status = rmt_ll_get_tx_thres_interrupt_status(hal->regs);
 700      while (status) {
 701          channel = __builtin_ffs(status) - 1;
 702          status &= ~(1 << channel);
 703          rmt_obj_t *p_rmt = p_rmt_obj[channel];
 704          if (p_rmt) {
 705              if (p_rmt->translator) {
 706                  if (p_rmt->sample_size_remain > 0) {
 707                      size_t translated_size = 0;
 708                      p_rmt->sample_to_rmt((void *)p_rmt->sample_cur,
 709                                           p_rmt->tx_buf,
 710                                           p_rmt->sample_size_remain,
 711                                           p_rmt->tx_sub_len,
 712                                           &translated_size,
 713                                           &p_rmt->tx_len_rem);
 714                      p_rmt->sample_size_remain -= translated_size;
 715                      p_rmt->sample_cur += translated_size;
 716                      p_rmt->tx_data = p_rmt->tx_buf;
 717                  } else {
 718                      p_rmt->sample_cur = NULL;
 719                      p_rmt->translator = false;
 720                  }
 721              }
 722              const rmt_item32_t *pdata = p_rmt->tx_data;
 723              int len_rem = p_rmt->tx_len_rem;
 724              if (len_rem >= p_rmt->tx_sub_len) {
 725                  rmt_fill_memory(channel, pdata, p_rmt->tx_sub_len, p_rmt->tx_offset);
 726                  p_rmt->tx_data += p_rmt->tx_sub_len;
 727                  p_rmt->tx_len_rem -= p_rmt->tx_sub_len;
 728              } else if (len_rem == 0) {
 729                  rmt_item32_t stop_data = {0};
 730                  rmt_ll_write_memory(rmt_contex.hal.mem, channel, &stop_data, 1, p_rmt->tx_offset);
 731              } else {
 732                  rmt_fill_memory(channel, pdata, len_rem, p_rmt->tx_offset);
 733                  rmt_item32_t stop_data = {0};
 734                  rmt_ll_write_memory(rmt_contex.hal.mem, channel, &stop_data, 1, p_rmt->tx_offset + len_rem);
 735                  p_rmt->tx_data += len_rem;
 736                  p_rmt->tx_len_rem -= len_rem;
 737              }
 738              if (p_rmt->tx_offset == 0) {
 739                  p_rmt->tx_offset = p_rmt->tx_sub_len;
 740              } else {
 741                  p_rmt->tx_offset = 0;
 742              }
 743          }
 744          rmt_ll_clear_tx_thres_interrupt(hal->regs, channel);
 745      }
 746  
 747      // Rx end interrupt
 748      status = rmt_ll_get_rx_end_interrupt_status(hal->regs);
 749      while (status) {
 750          channel = __builtin_ffs(status) - 1;
 751          status &= ~(1 << channel);
 752          rmt_obj_t *p_rmt = p_rmt_obj[channel];
 753          if (p_rmt) {
 754              rmt_ll_enable_rx(rmt_contex.hal.regs, channel, false);
 755              int item_len = rmt_get_mem_len(channel);
 756              rmt_ll_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_SW);
 757              if (p_rmt->rx_buf) {
 758                  addr = RMTMEM.chan[channel].data32;
 759  #if SOC_RMT_SUPPORT_RX_PINGPONG
 760                  if (item_len > p_rmt->rx_item_start_idx) {
 761                      item_len = item_len - p_rmt->rx_item_start_idx;
 762                  }
 763                  memcpy((void *)(p_rmt->rx_item_buf + p_rmt->rx_item_len), (void *)(addr + p_rmt->rx_item_start_idx), item_len * 4);
 764                  p_rmt->rx_item_len += item_len;
 765                  BaseType_t res = xRingbufferSendFromISR(p_rmt->rx_buf, (void *)(p_rmt->rx_item_buf), p_rmt->rx_item_len * 4, &HPTaskAwoken);
 766  #else
 767                  BaseType_t res = xRingbufferSendFromISR(p_rmt->rx_buf, (void *)addr, item_len * 4, &HPTaskAwoken);
 768  #endif
 769                  if (res == pdFALSE) {
 770                      ESP_EARLY_LOGE(RMT_TAG, "RMT RX BUFFER FULL");
 771                  }
 772              } else {
 773                  ESP_EARLY_LOGE(RMT_TAG, "RMT RX BUFFER ERROR");
 774              }
 775  
 776  #if SOC_RMT_SUPPORT_RX_PINGPONG
 777              p_rmt->rx_item_start_idx = 0;
 778              p_rmt->rx_item_len = 0;
 779              memset((void *)p_rmt->rx_item_buf, 0, p_rmt->rx_item_buf_size);
 780  #endif
 781              rmt_ll_reset_rx_pointer(rmt_contex.hal.regs, channel);
 782              rmt_ll_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_HW);
 783              rmt_ll_enable_rx(rmt_contex.hal.regs, channel, true);
 784          }
 785          rmt_ll_clear_rx_end_interrupt(hal->regs, channel);
 786      }
 787  
 788  #if SOC_RMT_SUPPORT_RX_PINGPONG
 789      // Rx thres interrupt
 790      status = rmt_ll_get_rx_thres_interrupt_status(hal->regs);
 791      while (status) {
 792          channel = __builtin_ffs(status) - 1;
 793          status &= ~(1 << channel);
 794          rmt_obj_t *p_rmt = p_rmt_obj[channel];
 795          int mem_item_size = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel) * RMT_MEM_ITEM_NUM;
 796          int rx_thres_lim = rmt_ll_get_rx_limit(rmt_contex.hal.regs, channel);
 797          int item_len = (p_rmt->rx_item_start_idx == 0) ? rx_thres_lim : (mem_item_size - rx_thres_lim);
 798          if ((p_rmt->rx_item_len + item_len) < (p_rmt->rx_item_buf_size / 4)) {
 799              rmt_ll_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_SW);
 800              memcpy((void *)(p_rmt->rx_item_buf + p_rmt->rx_item_len), (void *)(RMTMEM.chan[channel].data32 + p_rmt->rx_item_start_idx), item_len * 4);
 801              rmt_ll_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_HW);
 802              p_rmt->rx_item_len += item_len;
 803              p_rmt->rx_item_start_idx += item_len;
 804              if (p_rmt->rx_item_start_idx >= mem_item_size) {
 805                  p_rmt->rx_item_start_idx = 0;
 806              }
 807          } else {
 808              ESP_EARLY_LOGE(RMT_TAG, "---RX buffer too small: %d", sizeof(p_rmt->rx_item_buf));
 809          }
 810          rmt_ll_clear_rx_thres_interrupt(hal->regs, channel);
 811      }
 812  #endif
 813  
 814  #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
 815      // loop count interrupt
 816      status = rmt_ll_get_tx_loop_interrupt_status(hal->regs);
 817      while (status) {
 818          channel = __builtin_ffs(status) - 1;
 819          status &= ~(1 << channel);
 820          rmt_obj_t *p_rmt = p_rmt_obj[channel];
 821          if (p_rmt) {
 822              xSemaphoreGiveFromISR(p_rmt->tx_sem, &HPTaskAwoken);
 823              if (rmt_contex.rmt_tx_end_callback.function != NULL) {
 824                  rmt_contex.rmt_tx_end_callback.function(channel,  rmt_contex.rmt_tx_end_callback.arg);
 825              }
 826          }
 827          rmt_ll_clear_tx_loop_interrupt(hal->regs, channel);
 828      }
 829  #endif
 830  
 831      // Err interrupt
 832      status = rmt_ll_get_err_interrupt_status(hal->regs);
 833      while (status) {
 834          channel = __builtin_ffs(status) - 1;
 835          status &= ~(1 << channel);
 836          rmt_obj_t *p_rmt = p_rmt_obj[channel];
 837          if (p_rmt) {
 838              // Reset the receiver/transmitter's write/read addresses to prevent endless err interrupts.
 839              rmt_ll_reset_tx_pointer(rmt_contex.hal.regs, channel);
 840              rmt_ll_reset_rx_pointer(rmt_contex.hal.regs, channel);
 841              ESP_EARLY_LOGD(RMT_TAG, "RMT[%d] ERR", channel);
 842              ESP_EARLY_LOGD(RMT_TAG, "status: 0x%08x", rmt_ll_get_channel_status(rmt_contex.hal.regs, channel));
 843          }
 844          rmt_ll_clear_err_interrupt(hal->regs, channel);
 845      }
 846  
 847      if (HPTaskAwoken == pdTRUE) {
 848          portYIELD_FROM_ISR();
 849      }
 850  }
 851  
 852  esp_err_t rmt_driver_uninstall(rmt_channel_t channel)
 853  {
 854      esp_err_t err = ESP_OK;
 855      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 856      RMT_CHECK((rmt_contex.rmt_driver_channels & BIT(channel)) != 0, "No RMT driver for this channel", ESP_ERR_INVALID_STATE);
 857      if (p_rmt_obj[channel] == NULL) {
 858          return ESP_OK;
 859      }
 860      //Avoid blocking here(when the interrupt is disabled and do not wait tx done).
 861      if (p_rmt_obj[channel]->wait_done) {
 862          xSemaphoreTake(p_rmt_obj[channel]->tx_sem, portMAX_DELAY);
 863      }
 864      rmt_set_rx_intr_en(channel, 0);
 865      rmt_set_err_intr_en(channel, 0);
 866      rmt_set_tx_intr_en(channel, 0);
 867      rmt_set_tx_thr_intr_en(channel, false, 0xffff);
 868  #if SOC_RMT_SUPPORT_RX_PINGPONG
 869      rmt_set_rx_thr_intr_en(channel, false, 0xffff);
 870  #endif
 871  
 872      _lock_acquire_recursive(&(rmt_contex.rmt_driver_isr_lock));
 873  
 874      rmt_contex.rmt_driver_channels &= ~BIT(channel);
 875      if (rmt_contex.rmt_driver_channels == 0) {
 876          rmt_module_disable();
 877          // all channels have driver disabled
 878          err = rmt_isr_deregister(rmt_contex.rmt_driver_intr_handle);
 879          rmt_contex.rmt_driver_intr_handle = NULL;
 880      }
 881  
 882      _lock_release_recursive(&(rmt_contex.rmt_driver_isr_lock));
 883  
 884      if (err != ESP_OK) {
 885          return err;
 886      }
 887  
 888      if (p_rmt_obj[channel]->tx_sem) {
 889          vSemaphoreDelete(p_rmt_obj[channel]->tx_sem);
 890          p_rmt_obj[channel]->tx_sem = NULL;
 891      }
 892      if (p_rmt_obj[channel]->rx_buf) {
 893          vRingbufferDelete(p_rmt_obj[channel]->rx_buf);
 894          p_rmt_obj[channel]->rx_buf = NULL;
 895      }
 896      if (p_rmt_obj[channel]->tx_buf) {
 897          free(p_rmt_obj[channel]->tx_buf);
 898          p_rmt_obj[channel]->tx_buf = NULL;
 899      }
 900      if (p_rmt_obj[channel]->sample_to_rmt) {
 901          p_rmt_obj[channel]->sample_to_rmt = NULL;
 902      }
 903  #if SOC_RMT_SUPPORT_RX_PINGPONG
 904      if (p_rmt_obj[channel]->rx_item_buf) {
 905          free(p_rmt_obj[channel]->rx_item_buf);
 906          p_rmt_obj[channel]->rx_item_buf = NULL;
 907          p_rmt_obj[channel]->rx_item_buf_size = 0;
 908      }
 909  #endif
 910  
 911      free(p_rmt_obj[channel]);
 912      p_rmt_obj[channel] = NULL;
 913      return ESP_OK;
 914  }
 915  
 916  esp_err_t rmt_driver_install(rmt_channel_t channel, size_t rx_buf_size, int intr_alloc_flags)
 917  {
 918      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
 919      RMT_CHECK((rmt_contex.rmt_driver_channels & BIT(channel)) == 0,
 920                "RMT driver already installed for channel", ESP_ERR_INVALID_STATE);
 921  
 922      esp_err_t err = ESP_OK;
 923  
 924      if (p_rmt_obj[channel] != NULL) {
 925          ESP_LOGD(RMT_TAG, "RMT driver already installed");
 926          return ESP_ERR_INVALID_STATE;
 927      }
 928  
 929  #if !CONFIG_SPIRAM_USE_MALLOC
 930      p_rmt_obj[channel] = calloc(1, sizeof(rmt_obj_t));
 931  #else
 932      if (!(intr_alloc_flags & ESP_INTR_FLAG_IRAM)) {
 933          p_rmt_obj[channel] = calloc(1, sizeof(rmt_obj_t));
 934      } else {
 935          p_rmt_obj[channel] = heap_caps_calloc(1, sizeof(rmt_obj_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
 936      }
 937  #endif
 938  
 939      if (p_rmt_obj[channel] == NULL) {
 940          ESP_LOGE(RMT_TAG, "RMT driver malloc error");
 941          return ESP_ERR_NO_MEM;
 942      }
 943  
 944      p_rmt_obj[channel]->tx_len_rem = 0;
 945      p_rmt_obj[channel]->tx_data = NULL;
 946      p_rmt_obj[channel]->channel = channel;
 947      p_rmt_obj[channel]->tx_offset = 0;
 948      p_rmt_obj[channel]->tx_sub_len = 0;
 949      p_rmt_obj[channel]->wait_done = false;
 950      p_rmt_obj[channel]->translator = false;
 951      p_rmt_obj[channel]->sample_to_rmt = NULL;
 952      if (p_rmt_obj[channel]->tx_sem == NULL) {
 953  #if !CONFIG_SPIRAM_USE_MALLOC
 954          p_rmt_obj[channel]->tx_sem = xSemaphoreCreateBinary();
 955  #else
 956          p_rmt_obj[channel]->intr_alloc_flags = intr_alloc_flags;
 957          if (!(intr_alloc_flags & ESP_INTR_FLAG_IRAM)) {
 958              p_rmt_obj[channel]->tx_sem = xSemaphoreCreateBinary();
 959          } else {
 960              p_rmt_obj[channel]->tx_sem = xSemaphoreCreateBinaryStatic(&p_rmt_obj[channel]->tx_sem_buffer);
 961          }
 962  #endif
 963          xSemaphoreGive(p_rmt_obj[channel]->tx_sem);
 964      }
 965      if (p_rmt_obj[channel]->rx_buf == NULL && rx_buf_size > 0) {
 966          p_rmt_obj[channel]->rx_buf = xRingbufferCreate(rx_buf_size, RINGBUF_TYPE_NOSPLIT);
 967      }
 968  
 969  #if SOC_RMT_SUPPORT_RX_PINGPONG
 970      if (p_rmt_obj[channel]->rx_item_buf == NULL && rx_buf_size > 0) {
 971  #if !CONFIG_SPIRAM_USE_MALLOC
 972          p_rmt_obj[channel]->rx_item_buf = calloc(1, rx_buf_size);
 973  #else
 974          if (!(p_rmt_obj[channel]->intr_alloc_flags & ESP_INTR_FLAG_IRAM)) {
 975              p_rmt_obj[channel]->rx_item_buf = calloc(1, rx_buf_size);
 976          } else {
 977              p_rmt_obj[channel]->rx_item_buf = heap_caps_calloc(1, rx_buf_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
 978          }
 979  #endif
 980          if (p_rmt_obj[channel]->rx_item_buf == NULL) {
 981              ESP_LOGE(RMT_TAG, "RMT malloc fail");
 982              return ESP_FAIL;
 983          }
 984          p_rmt_obj[channel]->rx_item_buf_size = rx_buf_size;
 985      }
 986  #endif
 987  
 988      _lock_acquire_recursive(&(rmt_contex.rmt_driver_isr_lock));
 989  
 990      if (rmt_contex.rmt_driver_channels == 0) {
 991          // first RMT channel using driver
 992          err = rmt_isr_register(rmt_driver_isr_default, &rmt_contex.hal, intr_alloc_flags, &(rmt_contex.rmt_driver_intr_handle));
 993      }
 994      if (err == ESP_OK) {
 995          rmt_contex.rmt_driver_channels |= BIT(channel);
 996      }
 997      _lock_release_recursive(&(rmt_contex.rmt_driver_isr_lock));
 998      rmt_module_enable();
 999      rmt_set_err_intr_en(channel, 0);
1000      rmt_hal_channel_reset(&rmt_contex.hal, channel);
1001      rmt_set_err_intr_en(channel, 1);
1002  
1003      return err;
1004  }
1005  
1006  esp_err_t rmt_write_items(rmt_channel_t channel, const rmt_item32_t *rmt_item, int item_num, bool wait_tx_done)
1007  {
1008      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
1009      RMT_CHECK(p_rmt_obj[channel] != NULL, RMT_DRIVER_ERROR_STR, ESP_FAIL);
1010      RMT_CHECK(rmt_item != NULL, RMT_ADDR_ERROR_STR, ESP_FAIL);
1011      RMT_CHECK(item_num > 0, RMT_DRIVER_LENGTH_ERROR_STR, ESP_ERR_INVALID_ARG);
1012  #if CONFIG_SPIRAM_USE_MALLOC
1013      if (p_rmt_obj[channel]->intr_alloc_flags & ESP_INTR_FLAG_IRAM) {
1014          if (!esp_ptr_internal(rmt_item)) {
1015              ESP_LOGE(RMT_TAG, RMT_PSRAM_BUFFER_WARN_STR);
1016              return ESP_ERR_INVALID_ARG;
1017          }
1018      }
1019  #endif
1020      rmt_obj_t *p_rmt = p_rmt_obj[channel];
1021      int block_num = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel);
1022      int item_block_len = block_num * RMT_MEM_ITEM_NUM;
1023      int item_sub_len = block_num * RMT_MEM_ITEM_NUM / 2;
1024      int len_rem = item_num;
1025      xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1026      // fill the memory block first
1027      if (item_num >= item_block_len) {
1028          rmt_fill_memory(channel, rmt_item, item_block_len, 0);
1029          len_rem -= item_block_len;
1030          rmt_set_tx_loop_mode(channel, false);
1031          rmt_set_tx_thr_intr_en(channel, 1, item_sub_len);
1032          p_rmt->tx_data = rmt_item + item_block_len;
1033          p_rmt->tx_len_rem = len_rem;
1034          p_rmt->tx_offset = 0;
1035          p_rmt->tx_sub_len = item_sub_len;
1036      } else {
1037          rmt_fill_memory(channel, rmt_item, len_rem, 0);
1038          rmt_item32_t stop_data = {0};
1039          rmt_ll_write_memory(rmt_contex.hal.mem, channel, &stop_data, 1, len_rem);
1040          p_rmt->tx_len_rem = 0;
1041      }
1042      rmt_tx_start(channel, true);
1043      p_rmt->wait_done = wait_tx_done;
1044      if (wait_tx_done) {
1045          // wait loop done
1046          if (rmt_ll_is_tx_loop_enabled(rmt_contex.hal.regs, channel)) {
1047  #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
1048              xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1049              xSemaphoreGive(p_rmt->tx_sem);
1050  #endif
1051          } else {
1052              // wait tx end
1053              xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1054              xSemaphoreGive(p_rmt->tx_sem);
1055          }
1056      }
1057      return ESP_OK;
1058  }
1059  
1060  esp_err_t rmt_wait_tx_done(rmt_channel_t channel, TickType_t wait_time)
1061  {
1062      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
1063      RMT_CHECK(p_rmt_obj[channel] != NULL, RMT_DRIVER_ERROR_STR, ESP_FAIL);
1064      if (xSemaphoreTake(p_rmt_obj[channel]->tx_sem, wait_time) == pdTRUE) {
1065          p_rmt_obj[channel]->wait_done = false;
1066          xSemaphoreGive(p_rmt_obj[channel]->tx_sem);
1067          return ESP_OK;
1068      } else {
1069          if (wait_time != 0) {
1070              // Don't emit error message if just polling.
1071              ESP_LOGE(RMT_TAG, "Timeout on wait_tx_done");
1072          }
1073          return ESP_ERR_TIMEOUT;
1074      }
1075  }
1076  
1077  esp_err_t rmt_get_ringbuf_handle(rmt_channel_t channel, RingbufHandle_t *buf_handle)
1078  {
1079      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
1080      RMT_CHECK(p_rmt_obj[channel] != NULL, RMT_DRIVER_ERROR_STR, ESP_FAIL);
1081      RMT_CHECK(buf_handle != NULL, RMT_ADDR_ERROR_STR, ESP_ERR_INVALID_ARG);
1082      *buf_handle = p_rmt_obj[channel]->rx_buf;
1083      return ESP_OK;
1084  }
1085  
1086  rmt_tx_end_callback_t rmt_register_tx_end_callback(rmt_tx_end_fn_t function, void *arg)
1087  {
1088      rmt_tx_end_callback_t previous = rmt_contex.rmt_tx_end_callback;
1089      rmt_contex.rmt_tx_end_callback.function = function;
1090      rmt_contex.rmt_tx_end_callback.arg = arg;
1091      return previous;
1092  }
1093  
1094  esp_err_t rmt_translator_init(rmt_channel_t channel, sample_to_rmt_t fn)
1095  {
1096      RMT_CHECK(fn != NULL, RMT_TRANSLATOR_NULL_STR, ESP_ERR_INVALID_ARG);
1097      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
1098      RMT_CHECK(p_rmt_obj[channel] != NULL, RMT_DRIVER_ERROR_STR, ESP_FAIL);
1099      const uint32_t block_size = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel) *
1100                                  RMT_MEM_ITEM_NUM * sizeof(rmt_item32_t);
1101      if (p_rmt_obj[channel]->tx_buf == NULL) {
1102  #if !CONFIG_SPIRAM_USE_MALLOC
1103          p_rmt_obj[channel]->tx_buf = (rmt_item32_t *)malloc(block_size);
1104  #else
1105          if (p_rmt_obj[channel]->intr_alloc_flags & ESP_INTR_FLAG_IRAM) {
1106              p_rmt_obj[channel]->tx_buf = (rmt_item32_t *)malloc(block_size);
1107          } else {
1108              p_rmt_obj[channel]->tx_buf = (rmt_item32_t *)heap_caps_calloc(1, block_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
1109          }
1110  #endif
1111          if (p_rmt_obj[channel]->tx_buf == NULL) {
1112              ESP_LOGE(RMT_TAG, "RMT translator buffer create fail");
1113              return ESP_FAIL;
1114          }
1115      }
1116      p_rmt_obj[channel]->sample_to_rmt = fn;
1117      p_rmt_obj[channel]->sample_size_remain = 0;
1118      p_rmt_obj[channel]->sample_cur = NULL;
1119      ESP_LOGD(RMT_TAG, "RMT translator init done");
1120      return ESP_OK;
1121  }
1122  
1123  esp_err_t rmt_write_sample(rmt_channel_t channel, const uint8_t *src, size_t src_size, bool wait_tx_done)
1124  {
1125      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
1126      RMT_CHECK(p_rmt_obj[channel] != NULL, RMT_DRIVER_ERROR_STR, ESP_FAIL);
1127      RMT_CHECK(p_rmt_obj[channel]->sample_to_rmt != NULL, RMT_TRANSLATOR_UNINIT_STR, ESP_FAIL);
1128  #if CONFIG_SPIRAM_USE_MALLOC
1129      if (p_rmt_obj[channel]->intr_alloc_flags & ESP_INTR_FLAG_IRAM) {
1130          if (!esp_ptr_internal(src)) {
1131              ESP_LOGE(RMT_TAG, RMT_PSRAM_BUFFER_WARN_STR);
1132              return ESP_ERR_INVALID_ARG;
1133          }
1134      }
1135  #endif
1136      size_t item_num = 0;
1137      size_t translated_size = 0;
1138      rmt_obj_t *p_rmt = p_rmt_obj[channel];
1139      const uint32_t item_block_len = rmt_ll_get_mem_blocks(rmt_contex.hal.regs, channel) * RMT_MEM_ITEM_NUM;
1140      const uint32_t item_sub_len = item_block_len / 2;
1141      xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1142      p_rmt->sample_to_rmt((void *)src, p_rmt->tx_buf, src_size, item_block_len, &translated_size, &item_num);
1143      p_rmt->sample_size_remain = src_size - translated_size;
1144      p_rmt->sample_cur = src + translated_size;
1145      rmt_fill_memory(channel, p_rmt->tx_buf, item_num, 0);
1146      if (item_num == item_block_len) {
1147          rmt_set_tx_thr_intr_en(channel, 1, item_sub_len);
1148          p_rmt->tx_data = p_rmt->tx_buf;
1149          p_rmt->tx_offset = 0;
1150          p_rmt->tx_sub_len = item_sub_len;
1151          p_rmt->translator = true;
1152      } else {
1153          rmt_item32_t stop_data = {0};
1154          rmt_ll_write_memory(rmt_contex.hal.mem, channel, &stop_data, 1, item_num);
1155          p_rmt->tx_len_rem = 0;
1156          p_rmt->sample_cur = NULL;
1157          p_rmt->translator = false;
1158      }
1159      rmt_tx_start(channel, true);
1160      p_rmt->wait_done = wait_tx_done;
1161      if (wait_tx_done) {
1162          xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1163          xSemaphoreGive(p_rmt->tx_sem);
1164      }
1165      return ESP_OK;
1166  }
1167  
1168  esp_err_t rmt_get_channel_status(rmt_channel_status_result_t *channel_status)
1169  {
1170      RMT_CHECK(channel_status != NULL, RMT_PARAM_ERR_STR, ESP_ERR_INVALID_ARG);
1171      for (int i = 0; i < RMT_CHANNEL_MAX; i++) {
1172          channel_status->status[i] = RMT_CHANNEL_UNINIT;
1173          if (p_rmt_obj[i] != NULL) {
1174              if (p_rmt_obj[i]->tx_sem != NULL) {
1175                  if (xSemaphoreTake(p_rmt_obj[i]->tx_sem, (TickType_t)0) == pdTRUE) {
1176                      channel_status->status[i] = RMT_CHANNEL_IDLE;
1177                      xSemaphoreGive(p_rmt_obj[i]->tx_sem);
1178                  } else {
1179                      channel_status->status[i] = RMT_CHANNEL_BUSY;
1180                  }
1181              }
1182          }
1183      }
1184      return ESP_OK;
1185  }
1186  
1187  esp_err_t rmt_get_counter_clock(rmt_channel_t channel, uint32_t *clock_hz)
1188  {
1189      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
1190      RMT_CHECK(clock_hz, "parameter clock_hz can't be null", ESP_ERR_INVALID_ARG);
1191      RMT_ENTER_CRITICAL();
1192      if (rmt_ll_get_counter_clock_src(rmt_contex.hal.regs, channel) == RMT_BASECLK_REF) {
1193          *clock_hz = rmt_hal_get_counter_clock(&rmt_contex.hal, channel, REF_CLK_FREQ);
1194      } else {
1195          *clock_hz = rmt_hal_get_counter_clock(&rmt_contex.hal, channel, APB_CLK_FREQ);
1196      }
1197      RMT_EXIT_CRITICAL();
1198      return ESP_OK;
1199  }
1200  
1201  #if SOC_RMT_SUPPORT_TX_GROUP
1202  esp_err_t rmt_add_channel_to_group(rmt_channel_t channel)
1203  {
1204      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
1205      RMT_ENTER_CRITICAL();
1206      rmt_ll_enable_tx_sync(rmt_contex.hal.regs, true);
1207      rmt_ll_add_channel_to_group(rmt_contex.hal.regs, channel);
1208      rmt_ll_reset_counter_clock_div(rmt_contex.hal.regs, channel);
1209      RMT_EXIT_CRITICAL();
1210      return ESP_OK;
1211  }
1212  
1213  esp_err_t rmt_remove_channel_from_group(rmt_channel_t channel)
1214  {
1215      RMT_CHECK(channel < RMT_CHANNEL_MAX, RMT_CHANNEL_ERROR_STR, ESP_ERR_INVALID_ARG);
1216      RMT_ENTER_CRITICAL();
1217      if (rmt_ll_remove_channel_from_group(rmt_contex.hal.regs, channel) == 0) {
1218          rmt_ll_enable_tx_sync(rmt_contex.hal.regs, false);
1219      }
1220      RMT_EXIT_CRITICAL();
1221      return ESP_OK;
1222  }
1223  #endif