stdatomic.c
1 // Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 //replacement for gcc built-in functions 16 17 #include "sdkconfig.h" 18 #include <stdbool.h> 19 #include "xtensa/config/core-isa.h" 20 #include "xtensa/xtruntime.h" 21 22 //reserved to measure atomic operation time 23 #define atomic_benchmark_intr_disable() 24 #define atomic_benchmark_intr_restore(STATE) 25 26 // This allows nested interrupts disabling and restoring via local registers or stack. 27 // They can be called from interrupts too. 28 // WARNING: Only applies to current CPU. 29 #define _ATOMIC_ENTER_CRITICAL(void) ({ \ 30 unsigned state = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); \ 31 atomic_benchmark_intr_disable(); \ 32 state; \ 33 }) 34 35 #define _ATOMIC_EXIT_CRITICAL(state) do { \ 36 atomic_benchmark_intr_restore(state); \ 37 XTOS_RESTORE_JUST_INTLEVEL(state); \ 38 } while (0) 39 40 #define CMP_EXCHANGE(n, type) bool __atomic_compare_exchange_ ## n (type* mem, type* expect, type desired, int success, int failure) \ 41 { \ 42 bool ret = false; \ 43 unsigned state = _ATOMIC_ENTER_CRITICAL(); \ 44 if (*mem == *expect) { \ 45 ret = true; \ 46 *mem = desired; \ 47 } else { \ 48 *expect = *mem; \ 49 } \ 50 _ATOMIC_EXIT_CRITICAL(state); \ 51 return ret; \ 52 } 53 54 #define FETCH_ADD(n, type) type __atomic_fetch_add_ ## n (type* ptr, type value, int memorder) \ 55 { \ 56 unsigned state = _ATOMIC_ENTER_CRITICAL(); \ 57 type ret = *ptr; \ 58 *ptr = *ptr + value; \ 59 _ATOMIC_EXIT_CRITICAL(state); \ 60 return ret; \ 61 } 62 63 #define FETCH_SUB(n, type) type __atomic_fetch_sub_ ## n (type* ptr, type value, int memorder) \ 64 { \ 65 unsigned state = _ATOMIC_ENTER_CRITICAL(); \ 66 type ret = *ptr; \ 67 *ptr = *ptr - value; \ 68 _ATOMIC_EXIT_CRITICAL(state); \ 69 return ret; \ 70 } 71 72 #define FETCH_AND(n, type) type __atomic_fetch_and_ ## n (type* ptr, type value, int memorder) \ 73 { \ 74 unsigned state = _ATOMIC_ENTER_CRITICAL(); \ 75 type ret = *ptr; \ 76 *ptr = *ptr & value; \ 77 _ATOMIC_EXIT_CRITICAL(state); \ 78 return ret; \ 79 } 80 81 #define FETCH_OR(n, type) type __atomic_fetch_or_ ## n (type* ptr, type value, int memorder) \ 82 { \ 83 unsigned state = _ATOMIC_ENTER_CRITICAL(); \ 84 type ret = *ptr; \ 85 *ptr = *ptr | value; \ 86 _ATOMIC_EXIT_CRITICAL(state); \ 87 return ret; \ 88 } 89 90 #define FETCH_XOR(n, type) type __atomic_fetch_xor_ ## n (type* ptr, type value, int memorder) \ 91 { \ 92 unsigned state = _ATOMIC_ENTER_CRITICAL(); \ 93 type ret = *ptr; \ 94 *ptr = *ptr ^ value; \ 95 _ATOMIC_EXIT_CRITICAL(state); \ 96 return ret; \ 97 } 98 99 #ifndef XCHAL_HAVE_S32C1I 100 #error "XCHAL_HAVE_S32C1I not defined, include correct header!" 101 #endif 102 103 //this piece of code should only be compiled if the cpu doesn't support atomic compare and swap (s32c1i) 104 #if XCHAL_HAVE_S32C1I == 0 105 106 #pragma GCC diagnostic ignored "-Wbuiltin-declaration-mismatch" 107 108 CMP_EXCHANGE(1, uint8_t) 109 CMP_EXCHANGE(2, uint16_t) 110 CMP_EXCHANGE(4, uint32_t) 111 CMP_EXCHANGE(8, uint64_t) 112 113 FETCH_ADD(1, uint8_t) 114 FETCH_ADD(2, uint16_t) 115 FETCH_ADD(4, uint32_t) 116 FETCH_ADD(8, uint64_t) 117 118 FETCH_SUB(1, uint8_t) 119 FETCH_SUB(2, uint16_t) 120 FETCH_SUB(4, uint32_t) 121 FETCH_SUB(8, uint64_t) 122 123 FETCH_AND(1, uint8_t) 124 FETCH_AND(2, uint16_t) 125 FETCH_AND(4, uint32_t) 126 FETCH_AND(8, uint64_t) 127 128 FETCH_OR(1, uint8_t) 129 FETCH_OR(2, uint16_t) 130 FETCH_OR(4, uint32_t) 131 FETCH_OR(8, uint64_t) 132 133 FETCH_XOR(1, uint8_t) 134 FETCH_XOR(2, uint16_t) 135 FETCH_XOR(4, uint32_t) 136 FETCH_XOR(8, uint64_t) 137 138 #endif