pthread.c
1 // Copyright 2018 Espressif Systems (Shanghai) PTE LTD 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 // 15 // This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing 16 // libstdcxx threading framework to operate correctly. So not all original pthread routines are supported. 17 // 18 19 #include <time.h> 20 #include <errno.h> 21 #include <pthread.h> 22 #include <string.h> 23 #include "esp_err.h" 24 #include "esp_attr.h" 25 #include "sys/queue.h" 26 #include "freertos/FreeRTOS.h" 27 #include "freertos/task.h" 28 #include "freertos/semphr.h" 29 #include "soc/soc_memory_layout.h" 30 31 #include "pthread_internal.h" 32 #include "esp_pthread.h" 33 34 #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL 35 #include "esp_log.h" 36 const static char *TAG = "pthread"; 37 38 /** task state */ 39 enum esp_pthread_task_state { 40 PTHREAD_TASK_STATE_RUN, 41 PTHREAD_TASK_STATE_EXIT 42 }; 43 44 /** pthread thread FreeRTOS wrapper */ 45 typedef struct esp_pthread_entry { 46 SLIST_ENTRY(esp_pthread_entry) list_node; ///< Tasks list node struct. 47 TaskHandle_t handle; ///< FreeRTOS task handle 48 TaskHandle_t join_task; ///< Handle of the task waiting to join 49 enum esp_pthread_task_state state; ///< pthread task state 50 bool detached; ///< True if pthread is detached 51 void *retval; ///< Value supplied to calling thread during join 52 void *task_arg; ///< Task arguments 53 } esp_pthread_t; 54 55 /** pthread wrapper task arg */ 56 typedef struct { 57 void *(*func)(void *); ///< user task entry 58 void *arg; ///< user task argument 59 esp_pthread_cfg_t cfg; ///< pthread configuration 60 } esp_pthread_task_arg_t; 61 62 /** pthread mutex FreeRTOS wrapper */ 63 typedef struct { 64 SemaphoreHandle_t sem; ///< Handle of the task waiting to join 65 int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE 66 } esp_pthread_mutex_t; 67 68 69 static SemaphoreHandle_t s_threads_mux = NULL; 70 static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED; 71 static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list 72 = SLIST_HEAD_INITIALIZER(s_threads_list); 73 static pthread_key_t s_pthread_cfg_key; 74 75 76 static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo); 77 78 static void esp_pthread_cfg_key_destructor(void *value) 79 { 80 free(value); 81 } 82 83 esp_err_t esp_pthread_init(void) 84 { 85 if (pthread_key_create(&s_pthread_cfg_key, esp_pthread_cfg_key_destructor) != 0) { 86 return ESP_ERR_NO_MEM; 87 } 88 s_threads_mux = xSemaphoreCreateMutex(); 89 if (s_threads_mux == NULL) { 90 pthread_key_delete(s_pthread_cfg_key); 91 return ESP_ERR_NO_MEM; 92 } 93 return ESP_OK; 94 } 95 96 static void *pthread_list_find_item(void *(*item_check)(esp_pthread_t *, void *arg), void *check_arg) 97 { 98 esp_pthread_t *it; 99 SLIST_FOREACH(it, &s_threads_list, list_node) { 100 void *val = item_check(it, check_arg); 101 if (val) { 102 return val; 103 } 104 } 105 return NULL; 106 } 107 108 static void *pthread_get_handle_by_desc(esp_pthread_t *item, void *desc) 109 { 110 if (item == desc) { 111 return item->handle; 112 } 113 return NULL; 114 } 115 116 static void *pthread_get_desc_by_handle(esp_pthread_t *item, void *hnd) 117 { 118 if (hnd == item->handle) { 119 return item; 120 } 121 return NULL; 122 } 123 124 static inline TaskHandle_t pthread_find_handle(pthread_t thread) 125 { 126 return pthread_list_find_item(pthread_get_handle_by_desc, (void *)thread); 127 } 128 129 static esp_pthread_t *pthread_find(TaskHandle_t task_handle) 130 { 131 return pthread_list_find_item(pthread_get_desc_by_handle, task_handle); 132 } 133 134 static void pthread_delete(esp_pthread_t *pthread) 135 { 136 SLIST_REMOVE(&s_threads_list, pthread, esp_pthread_entry, list_node); 137 free(pthread); 138 } 139 140 /* Call this function to configure pthread stacks in Pthreads */ 141 esp_err_t esp_pthread_set_cfg(const esp_pthread_cfg_t *cfg) 142 { 143 if (cfg->stack_size < PTHREAD_STACK_MIN) { 144 return ESP_ERR_INVALID_ARG; 145 } 146 147 /* If a value is already set, update that value */ 148 esp_pthread_cfg_t *p = pthread_getspecific(s_pthread_cfg_key); 149 if (!p) { 150 p = malloc(sizeof(esp_pthread_cfg_t)); 151 if (!p) { 152 return ESP_ERR_NO_MEM; 153 } 154 } 155 *p = *cfg; 156 pthread_setspecific(s_pthread_cfg_key, p); 157 return 0; 158 } 159 160 esp_err_t esp_pthread_get_cfg(esp_pthread_cfg_t *p) 161 { 162 esp_pthread_cfg_t *cfg = pthread_getspecific(s_pthread_cfg_key); 163 if (cfg) { 164 *p = *cfg; 165 return ESP_OK; 166 } 167 memset(p, 0, sizeof(*p)); 168 return ESP_ERR_NOT_FOUND; 169 } 170 171 static int get_default_pthread_core(void) 172 { 173 return CONFIG_PTHREAD_TASK_CORE_DEFAULT == -1 ? tskNO_AFFINITY : CONFIG_PTHREAD_TASK_CORE_DEFAULT; 174 } 175 176 esp_pthread_cfg_t esp_pthread_get_default_config(void) 177 { 178 esp_pthread_cfg_t cfg = { 179 .stack_size = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT, 180 .prio = CONFIG_PTHREAD_TASK_PRIO_DEFAULT, 181 .inherit_cfg = false, 182 .thread_name = NULL, 183 .pin_to_core = get_default_pthread_core() 184 }; 185 186 return cfg; 187 } 188 189 static void pthread_task_func(void *arg) 190 { 191 void *rval = NULL; 192 esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg; 193 194 ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func); 195 196 // wait for start 197 xTaskNotifyWait(0, 0, NULL, portMAX_DELAY); 198 199 if (task_arg->cfg.inherit_cfg) { 200 /* If inherit option is set, then do a set_cfg() ourselves for future forks, 201 but first set thread_name to NULL to enable inheritance of the name too. 202 (This also to prevents dangling pointers to name of tasks that might 203 possibly have been deleted when we use the configuration).*/ 204 esp_pthread_cfg_t *cfg = &task_arg->cfg; 205 cfg->thread_name = NULL; 206 esp_pthread_set_cfg(cfg); 207 } 208 ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func); 209 rval = task_arg->func(task_arg->arg); 210 ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func); 211 212 pthread_exit(rval); 213 214 ESP_LOGV(TAG, "%s EXIT", __FUNCTION__); 215 } 216 217 int pthread_create(pthread_t *thread, const pthread_attr_t *attr, 218 void *(*start_routine) (void *), void *arg) 219 { 220 TaskHandle_t xHandle = NULL; 221 222 ESP_LOGV(TAG, "%s", __FUNCTION__); 223 esp_pthread_task_arg_t *task_arg = calloc(1, sizeof(esp_pthread_task_arg_t)); 224 if (task_arg == NULL) { 225 ESP_LOGE(TAG, "Failed to allocate task args!"); 226 return ENOMEM; 227 } 228 229 esp_pthread_t *pthread = calloc(1, sizeof(esp_pthread_t)); 230 if (pthread == NULL) { 231 ESP_LOGE(TAG, "Failed to allocate pthread data!"); 232 free(task_arg); 233 return ENOMEM; 234 } 235 236 uint32_t stack_size = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT; 237 BaseType_t prio = CONFIG_PTHREAD_TASK_PRIO_DEFAULT; 238 BaseType_t core_id = get_default_pthread_core(); 239 const char *task_name = CONFIG_PTHREAD_TASK_NAME_DEFAULT; 240 241 esp_pthread_cfg_t *pthread_cfg = pthread_getspecific(s_pthread_cfg_key); 242 if (pthread_cfg) { 243 if (pthread_cfg->stack_size) { 244 stack_size = pthread_cfg->stack_size; 245 } 246 if (pthread_cfg->prio && pthread_cfg->prio < configMAX_PRIORITIES) { 247 prio = pthread_cfg->prio; 248 } 249 250 if (pthread_cfg->inherit_cfg) { 251 if (pthread_cfg->thread_name == NULL) { 252 // Inherit task name from current task. 253 task_name = pcTaskGetTaskName(NULL); 254 } else { 255 // Inheriting, but new task name. 256 task_name = pthread_cfg->thread_name; 257 } 258 } else if (pthread_cfg->thread_name == NULL) { 259 task_name = CONFIG_PTHREAD_TASK_NAME_DEFAULT; 260 } else { 261 task_name = pthread_cfg->thread_name; 262 } 263 264 if (pthread_cfg->pin_to_core >= 0 && pthread_cfg->pin_to_core < portNUM_PROCESSORS) { 265 core_id = pthread_cfg->pin_to_core; 266 } 267 268 task_arg->cfg = *pthread_cfg; 269 } 270 271 if (attr) { 272 /* Overwrite attributes */ 273 stack_size = attr->stacksize; 274 275 switch (attr->detachstate) { 276 case PTHREAD_CREATE_DETACHED: 277 pthread->detached = true; 278 break; 279 case PTHREAD_CREATE_JOINABLE: 280 default: 281 pthread->detached = false; 282 } 283 } 284 285 task_arg->func = start_routine; 286 task_arg->arg = arg; 287 pthread->task_arg = task_arg; 288 BaseType_t res = xTaskCreatePinnedToCore(&pthread_task_func, 289 task_name, 290 // stack_size is in bytes. This transformation ensures that the units are 291 // transformed to the units used in FreeRTOS. 292 // Note: float division of ceil(m / n) == 293 // integer division of (m + n - 1) / n 294 (stack_size + sizeof(StackType_t) - 1) / sizeof(StackType_t), 295 task_arg, 296 prio, 297 &xHandle, 298 core_id); 299 300 if (res != pdPASS) { 301 ESP_LOGE(TAG, "Failed to create task!"); 302 free(pthread); 303 free(task_arg); 304 if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) { 305 return ENOMEM; 306 } else { 307 return EAGAIN; 308 } 309 } 310 pthread->handle = xHandle; 311 312 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { 313 assert(false && "Failed to lock threads list!"); 314 } 315 SLIST_INSERT_HEAD(&s_threads_list, pthread, list_node); 316 xSemaphoreGive(s_threads_mux); 317 318 // start task 319 xTaskNotify(xHandle, 0, eNoAction); 320 321 *thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t) 322 323 ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle); 324 325 return 0; 326 } 327 328 int pthread_join(pthread_t thread, void **retval) 329 { 330 esp_pthread_t *pthread = (esp_pthread_t *)thread; 331 int ret = 0; 332 bool wait = false; 333 void *child_task_retval = 0; 334 335 ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread); 336 337 // find task 338 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { 339 assert(false && "Failed to lock threads list!"); 340 } 341 TaskHandle_t handle = pthread_find_handle(thread); 342 if (!handle) { 343 // not found 344 ret = ESRCH; 345 } else if (pthread->detached) { 346 // Thread is detached 347 ret = EDEADLK; 348 } else if (pthread->join_task) { 349 // already have waiting task to join 350 ret = EINVAL; 351 } else if (handle == xTaskGetCurrentTaskHandle()) { 352 // join to self not allowed 353 ret = EDEADLK; 354 } else { 355 esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle()); 356 if (cur_pthread && cur_pthread->join_task == handle) { 357 // join to each other not allowed 358 ret = EDEADLK; 359 } else { 360 if (pthread->state == PTHREAD_TASK_STATE_RUN) { 361 pthread->join_task = xTaskGetCurrentTaskHandle(); 362 wait = true; 363 } else { 364 child_task_retval = pthread->retval; 365 pthread_delete(pthread); 366 } 367 } 368 } 369 xSemaphoreGive(s_threads_mux); 370 371 if (ret == 0) { 372 if (wait) { 373 xTaskNotifyWait(0, 0, NULL, portMAX_DELAY); 374 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { 375 assert(false && "Failed to lock threads list!"); 376 } 377 child_task_retval = pthread->retval; 378 pthread_delete(pthread); 379 xSemaphoreGive(s_threads_mux); 380 } 381 vTaskDelete(handle); 382 } 383 384 if (retval) { 385 *retval = child_task_retval; 386 } 387 388 ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret); 389 return ret; 390 } 391 392 int pthread_detach(pthread_t thread) 393 { 394 esp_pthread_t *pthread = (esp_pthread_t *)thread; 395 int ret = 0; 396 397 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { 398 assert(false && "Failed to lock threads list!"); 399 } 400 TaskHandle_t handle = pthread_find_handle(thread); 401 if (!handle) { 402 ret = ESRCH; 403 } else if (pthread->detached) { 404 // already detached 405 ret = EINVAL; 406 } else if (pthread->join_task) { 407 // already have waiting task to join 408 ret = EINVAL; 409 } else if (pthread->state == PTHREAD_TASK_STATE_RUN) { 410 // pthread still running 411 pthread->detached = true; 412 } else { 413 // pthread already stopped 414 pthread_delete(pthread); 415 vTaskDelete(handle); 416 } 417 xSemaphoreGive(s_threads_mux); 418 ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret); 419 return ret; 420 } 421 422 void pthread_exit(void *value_ptr) 423 { 424 bool detached = false; 425 /* preemptively clean up thread local storage, rather than 426 waiting for the idle task to clean up the thread */ 427 pthread_internal_local_storage_destructor_callback(); 428 429 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { 430 assert(false && "Failed to lock threads list!"); 431 } 432 esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle()); 433 if (!pthread) { 434 assert(false && "Failed to find pthread for current task!"); 435 } 436 if (pthread->task_arg) { 437 free(pthread->task_arg); 438 } 439 if (pthread->detached) { 440 // auto-free for detached threads 441 pthread_delete(pthread); 442 detached = true; 443 } else { 444 // Set return value 445 pthread->retval = value_ptr; 446 // Remove from list, it indicates that task has exited 447 if (pthread->join_task) { 448 // notify join 449 xTaskNotify(pthread->join_task, 0, eNoAction); 450 } else { 451 pthread->state = PTHREAD_TASK_STATE_EXIT; 452 } 453 } 454 xSemaphoreGive(s_threads_mux); 455 456 ESP_LOGD(TAG, "Task stk_wm = %d", uxTaskGetStackHighWaterMark(NULL)); 457 458 if (detached) { 459 vTaskDelete(NULL); 460 } else { 461 vTaskSuspend(NULL); 462 } 463 464 // Should never be reached 465 abort(); 466 } 467 468 int pthread_cancel(pthread_t thread) 469 { 470 ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__); 471 return ENOSYS; 472 } 473 474 int sched_yield( void ) 475 { 476 vTaskDelay(0); 477 return 0; 478 } 479 480 pthread_t pthread_self(void) 481 { 482 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { 483 assert(false && "Failed to lock threads list!"); 484 } 485 esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle()); 486 if (!pthread) { 487 assert(false && "Failed to find current thread ID!"); 488 } 489 xSemaphoreGive(s_threads_mux); 490 return (pthread_t)pthread; 491 } 492 493 int pthread_equal(pthread_t t1, pthread_t t2) 494 { 495 return t1 == t2 ? 1 : 0; 496 } 497 498 /***************** ONCE ******************/ 499 int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) 500 { 501 if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) { 502 ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__); 503 return EINVAL; 504 } 505 506 uint32_t res = 1; 507 #if defined(CONFIG_SPIRAM) 508 if (esp_ptr_external_ram(once_control)) { 509 uxPortCompareSetExtram((uint32_t *) &once_control->init_executed, 0, &res); 510 } else { 511 #endif 512 uxPortCompareSet((uint32_t *) &once_control->init_executed, 0, &res); 513 #if defined(CONFIG_SPIRAM) 514 } 515 #endif 516 // Check if compare and set was successful 517 if (res == 0) { 518 ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control); 519 init_routine(); 520 } 521 522 return 0; 523 } 524 525 /***************** MUTEX ******************/ 526 static int mutexattr_check(const pthread_mutexattr_t *attr) 527 { 528 if (attr->type != PTHREAD_MUTEX_NORMAL && 529 attr->type != PTHREAD_MUTEX_RECURSIVE && 530 attr->type != PTHREAD_MUTEX_ERRORCHECK) { 531 return EINVAL; 532 } 533 return 0; 534 } 535 536 int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr) 537 { 538 int type = PTHREAD_MUTEX_NORMAL; 539 540 if (!mutex) { 541 return EINVAL; 542 } 543 544 if (attr) { 545 if (!attr->is_initialized) { 546 return EINVAL; 547 } 548 int res = mutexattr_check(attr); 549 if (res) { 550 return res; 551 } 552 type = attr->type; 553 } 554 555 esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t)); 556 if (!mux) { 557 return ENOMEM; 558 } 559 mux->type = type; 560 561 if (mux->type == PTHREAD_MUTEX_RECURSIVE) { 562 mux->sem = xSemaphoreCreateRecursiveMutex(); 563 } else { 564 mux->sem = xSemaphoreCreateMutex(); 565 } 566 if (!mux->sem) { 567 free(mux); 568 return EAGAIN; 569 } 570 571 *mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t) 572 573 return 0; 574 } 575 576 int pthread_mutex_destroy(pthread_mutex_t *mutex) 577 { 578 esp_pthread_mutex_t *mux; 579 580 ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex); 581 582 if (!mutex) { 583 return EINVAL; 584 } 585 mux = (esp_pthread_mutex_t *)*mutex; 586 if (!mux) { 587 return EINVAL; 588 } 589 590 // check if mux is busy 591 int res = pthread_mutex_lock_internal(mux, 0); 592 if (res == EBUSY) { 593 return EBUSY; 594 } 595 596 vSemaphoreDelete(mux->sem); 597 free(mux); 598 599 return 0; 600 } 601 602 static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo) 603 { 604 if (!mux) { 605 return EINVAL; 606 } 607 608 if ((mux->type == PTHREAD_MUTEX_ERRORCHECK) && 609 (xSemaphoreGetMutexHolder(mux->sem) == xTaskGetCurrentTaskHandle())) { 610 return EDEADLK; 611 } 612 613 if (mux->type == PTHREAD_MUTEX_RECURSIVE) { 614 if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) { 615 return EBUSY; 616 } 617 } else { 618 if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) { 619 return EBUSY; 620 } 621 } 622 623 return 0; 624 } 625 626 static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) 627 { 628 int res = 0; 629 if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) { 630 portENTER_CRITICAL(&s_mutex_init_lock); 631 if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) { 632 res = pthread_mutex_init(mutex, NULL); 633 } 634 portEXIT_CRITICAL(&s_mutex_init_lock); 635 } 636 return res; 637 } 638 639 int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex) 640 { 641 if (!mutex) { 642 return EINVAL; 643 } 644 int res = pthread_mutex_init_if_static(mutex); 645 if (res != 0) { 646 return res; 647 } 648 return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY); 649 } 650 651 int IRAM_ATTR pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *timeout) 652 { 653 if (!mutex) { 654 return EINVAL; 655 } 656 int res = pthread_mutex_init_if_static(mutex); 657 if (res != 0) { 658 return res; 659 } 660 661 struct timespec currtime; 662 clock_gettime(CLOCK_REALTIME, &currtime); 663 TickType_t tmo = ((timeout->tv_sec - currtime.tv_sec)*1000 + 664 (timeout->tv_nsec - currtime.tv_nsec)/1000000)/portTICK_PERIOD_MS; 665 666 res = pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, tmo); 667 if (res == EBUSY) { 668 return ETIMEDOUT; 669 } 670 return res; 671 } 672 673 int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex) 674 { 675 if (!mutex) { 676 return EINVAL; 677 } 678 int res = pthread_mutex_init_if_static(mutex); 679 if (res != 0) { 680 return res; 681 } 682 return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0); 683 } 684 685 int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex) 686 { 687 esp_pthread_mutex_t *mux; 688 689 if (!mutex) { 690 return EINVAL; 691 } 692 mux = (esp_pthread_mutex_t *)*mutex; 693 if (!mux) { 694 return EINVAL; 695 } 696 697 if (((mux->type == PTHREAD_MUTEX_RECURSIVE) || 698 (mux->type == PTHREAD_MUTEX_ERRORCHECK)) && 699 (xSemaphoreGetMutexHolder(mux->sem) != xTaskGetCurrentTaskHandle())) { 700 return EPERM; 701 } 702 703 int ret; 704 if (mux->type == PTHREAD_MUTEX_RECURSIVE) { 705 ret = xSemaphoreGiveRecursive(mux->sem); 706 } else { 707 ret = xSemaphoreGive(mux->sem); 708 } 709 if (ret != pdTRUE) { 710 assert(false && "Failed to unlock mutex!"); 711 } 712 return 0; 713 } 714 715 int pthread_mutexattr_init(pthread_mutexattr_t *attr) 716 { 717 if (!attr) { 718 return EINVAL; 719 } 720 attr->type = PTHREAD_MUTEX_NORMAL; 721 attr->is_initialized = 1; 722 return 0; 723 } 724 725 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr) 726 { 727 if (!attr) { 728 return EINVAL; 729 } 730 attr->is_initialized = 0; 731 return 0; 732 } 733 734 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type) 735 { 736 if (!attr) { 737 return EINVAL; 738 } 739 *type = attr->type; 740 return 0; 741 } 742 743 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) 744 { 745 if (!attr) { 746 return EINVAL; 747 } 748 pthread_mutexattr_t tmp_attr = {.type = type}; 749 int res = mutexattr_check(&tmp_attr); 750 if (!res) { 751 attr->type = type; 752 } 753 return res; 754 } 755 756 /***************** ATTRIBUTES ******************/ 757 int pthread_attr_init(pthread_attr_t *attr) 758 { 759 if (attr) { 760 /* Nothing to allocate. Set everything to default */ 761 attr->stacksize = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT; 762 attr->detachstate = PTHREAD_CREATE_JOINABLE; 763 return 0; 764 } 765 return EINVAL; 766 } 767 768 int pthread_attr_destroy(pthread_attr_t *attr) 769 { 770 if (attr) { 771 /* Nothing to deallocate. Reset everything to default */ 772 attr->stacksize = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT; 773 attr->detachstate = PTHREAD_CREATE_JOINABLE; 774 return 0; 775 } 776 return EINVAL; 777 } 778 779 int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize) 780 { 781 if (attr) { 782 *stacksize = attr->stacksize; 783 return 0; 784 } 785 return EINVAL; 786 } 787 788 int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) 789 { 790 if (attr && !(stacksize < PTHREAD_STACK_MIN)) { 791 attr->stacksize = stacksize; 792 return 0; 793 } 794 return EINVAL; 795 } 796 797 int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate) 798 { 799 if (attr) { 800 *detachstate = attr->detachstate; 801 return 0; 802 } 803 return EINVAL; 804 } 805 806 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) 807 { 808 if (attr) { 809 switch (detachstate) { 810 case PTHREAD_CREATE_DETACHED: 811 attr->detachstate = PTHREAD_CREATE_DETACHED; 812 break; 813 case PTHREAD_CREATE_JOINABLE: 814 attr->detachstate = PTHREAD_CREATE_JOINABLE; 815 break; 816 default: 817 return EINVAL; 818 } 819 return 0; 820 } 821 return EINVAL; 822 } 823 824 /* Hook function to force linking this file */ 825 void pthread_include_pthread_impl(void) 826 { 827 }