tasklets.c
1 /** 2 * @ingroup tasklets 3 * @file 4 * 5 * Implementation of the @ref tasklets "RTAI tasklets module". 6 * 7 * @author Paolo Mantegazza 8 * 9 * @note Copyright © 1999-2006 Paolo Mantegazza <mantegazza@aero.polimi.it> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License as 13 * published by the Free Software Foundation; either version 2 of the 14 * License, or (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 */ 25 26 /** 27 * @defgroup tasklets module 28 * 29 * The tasklets module adds an interesting feature along the line, pioneered 30 * by RTAI, of a symmetric usage of all its services inter-intra kernel and 31 * user space, both for soft and hard real time applications. In such a way 32 * you have opened a whole spectrum of development and implementation 33 * lanes, allowing maximum flexibility with uncompromized performances. 34 * 35 * The new services provided can be useful when you have many tasks, both 36 * in kernel and user space, that must execute simple, often ripetitive, 37 * functions, both in soft and hard real time, asynchronously within their 38 * parent application. Such tasks are here called tasklets and can be of 39 * two kinds: normal tasklets and timed tasklets (timers). 40 * 41 * It must be noted that only timers should need to be made available both in 42 * user and kernel space. In fact normal tasklets in kernel space are nothing 43 * but standard functions that can be directly executed by calling them, so 44 * there would be no need for any special treatment. However to maintain full 45 * usage symmetry and to ease any possible porting from one address space to 46 * the other, plain tasklets can be used in the same way from whatever address 47 * space. 48 * 49 * Tasklets should be used where and whenever the standard hard real time 50 * RTAI tasks are used. Instances of such applications are timed polling and 51 * simple Programmable Logic Controllers (PLC) like sequences of services. 52 * Obviously there are many others instances that can make it sufficient the 53 * use of tasklets, either normal or timers. In general such an approach can 54 * be a very useful complement to fully featured tasks in controlling complex 55 * machines and systems, both for basic and support services. 56 * 57 * It is remarked that the implementation found here for timed tasklets rely on 58 * server support tasks, one per cpu, that execute the related timer functions, 59 * either in oneshot or periodic mode, on the base of their time deadline and 60 * according to their, user assigned, priority. Instead, as told above, plain 61 * tasklets are just functions executed from kernel space; their execution 62 * needs no server and is simply triggered by calling a given service function 63 * at due time, either from a kernel task or interrupt handler requiring, or in 64 * charge of, their execution whenever they are needed. 65 * 66 * Note that in user space you run within the memory of the process owning the 67 * tasklet function so you MUST lock all of your tasks memory in core, by 68 * using mlockall, to prevent it being swapped out. Pre grow also your stack 69 * to the largest size needed during the execution of your application, see 70 * mlockall usage in Linux mans. 71 * 72 * The RTAI distribution contains many useful examples that demonstrate the use 73 * of most tasklets services, both in kernel and user space. 74 * 75 *@{*/ 76 77 #include <linux/module.h> 78 #include <linux/version.h> 79 #include <linux/proc_fs.h> 80 #include <linux/sched.h> 81 #include <linux/slab.h> 82 #include <linux/interrupt.h> 83 #include <asm/uaccess.h> 84 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) 85 #include <asm/system.h> 86 #endif 87 #include <asm/rtai_sched.h> 88 #include <rtai_tasklets.h> 89 #include <rtai_lxrt.h> 90 #include <rtai_malloc.h> 91 #include <rtai_schedcore.h> 92 93 MODULE_LICENSE("GPL"); 94 95 extern struct epoch_struct boot_epoch; 96 97 DEFINE_LINUX_CR0 98 99 #ifdef CONFIG_SMP 100 #define NUM_CPUS RTAI_NR_CPUS 101 #define TIMED_TIMER_CPUID (timed_timer->cpuid) 102 #define TIMER_CPUID (timer->cpuid) 103 #define LIST_CPUID (cpuid) 104 #else 105 #define NUM_CPUS 1 106 #define TIMED_TIMER_CPUID (0) 107 #define TIMER_CPUID (0) 108 #define LIST_CPUID (0) 109 #endif 110 111 112 static struct rt_tasklet_struct timers_list[NUM_CPUS] = 113 { { &timers_list[0], &timers_list[0], RT_SCHED_LOWEST_PRIORITY, 0, 0, RT_TIME_END, 0LL, NULL, 0UL, 0UL, 0, NULL, NULL, 0, 114 #ifdef CONFIG_RTAI_LONG_TIMED_LIST 115 { NULL } 116 #endif 117 }, }; 118 119 static struct rt_tasklet_struct tasklets_list = 120 { &tasklets_list, &tasklets_list, }; 121 122 // static spinlock_t timers_lock[NUM_CPUS] = { SPIN_LOCK_UNLOCKED, }; 123 static spinlock_t timers_lock[NUM_CPUS] = { __SPIN_LOCK_UNLOCKED(timers_lock[0]), }; 124 static DEFINE_SPINLOCK(tasklets_lock); 125 126 static struct rt_fun_entry rt_tasklet_fun[] __attribute__ ((__unused__)); 127 128 static struct rt_fun_entry rt_tasklet_fun[] = { 129 { 0, rt_init_tasklet }, // 0 130 { 0, rt_delete_tasklet }, // 1 131 { 0, rt_insert_tasklet }, // 2 132 { 0, rt_remove_tasklet }, // 3 133 { 0, rt_tasklet_use_fpu }, // 4 134 { 0, rt_insert_timer }, // 5 135 { 0, rt_remove_timer }, // 6 136 { 0, rt_set_timer_priority }, // 7 137 { 0, rt_set_timer_firing_time }, // 8 138 { 0, rt_set_timer_period }, // 9 139 { 0, rt_set_tasklet_handler }, // 10 140 { 0, rt_set_tasklet_data }, // 11 141 { 0, rt_exec_tasklet }, // 12 142 { 0, rt_wait_tasklet_is_hard }, // 13 143 { 0, rt_set_tasklet_priority }, // 14 144 { 0, rt_register_task }, // 15 145 { 0, rt_get_timer_times }, // 16 146 { 0, rt_get_timer_overrun }, // 17 147 148 /* Posix timers support */ 149 150 { 0, rt_ptimer_create }, // 18 151 { 0, rt_ptimer_settime }, // 19 152 { 0, rt_ptimer_overrun }, // 20 153 { 0, rt_ptimer_gettime }, // 21 154 { 0, rt_ptimer_delete } // 22 155 156 /* End Posix timers support */ 157 158 }; 159 160 #ifdef CONFIG_RTAI_LONG_TIMED_LIST 161 162 /* BINARY TREE */ 163 static inline void enq_timer(struct rt_tasklet_struct *timed_timer) 164 { 165 struct rt_tasklet_struct *timerh, *tmrnxt, *timer; 166 rb_node_t **rbtn, *rbtpn = NULL; 167 timer = timerh = &timers_list[TIMED_TIMER_CPUID]; 168 rbtn = &timerh->rbr.rb_node; 169 170 while (*rbtn) { 171 rbtpn = *rbtn; 172 tmrnxt = rb_entry(rbtpn, struct rt_tasklet_struct, rbn); 173 if (timer->firing_time > tmrnxt->firing_time) { 174 rbtn = &(rbtpn)->rb_right; 175 } else { 176 rbtn = &(rbtpn)->rb_left; 177 timer = tmrnxt; 178 } 179 } 180 rb_link_node(&timed_timer->rbn, rbtpn, rbtn); 181 rb_insert_color(&timed_timer->rbn, &timerh->rbr); 182 timer->prev = (timed_timer->prev = timer->prev)->next = timed_timer; 183 timed_timer->next = timer; 184 } 185 186 #define rb_erase_timer(timer) \ 187 rb_erase(&(timer)->rbn, &timers_list[NUM_CPUS > 1 ? (timer)->cpuid : 0].rbr) 188 189 #else /* !CONFIG_RTAI_LONG_TIMED_LIST */ 190 191 /* LINEAR */ 192 static inline void enq_timer(struct rt_tasklet_struct *timed_timer) 193 { 194 struct rt_tasklet_struct *timer; 195 timer = &timers_list[TIMED_TIMER_CPUID]; 196 while (timed_timer->firing_time > (timer = timer->next)->firing_time); 197 timer->prev = (timed_timer->prev = timer->prev)->next = timed_timer; 198 timed_timer->next = timer; 199 } 200 201 #define rb_erase_timer(timer) 202 203 #endif /* CONFIG_RTAI_LONG_TIMED_LIST */ 204 205 static inline void rem_timer(struct rt_tasklet_struct *timer) 206 { 207 (timer->next)->prev = timer->prev; 208 (timer->prev)->next = timer->next; 209 timer->next = timer->prev = timer; 210 rb_erase_timer(timer); 211 } 212 213 /** 214 * Insert a tasklet in the list of tasklets to be processed. 215 * 216 * rt_insert_tasklet insert a tasklet in the list of tasklets to be processed. 217 * 218 * @param tasklet is the pointer to the tasklet structure to be used to manage 219 * the tasklet at hand. 220 * 221 * @param handler is the tasklet function to be executed. 222 * 223 * @param data is an unsigned long to be passed to the handler. Clearly by an 224 * appropriate type casting one can pass a pointer to whatever data structure 225 * and type is needed. 226 * 227 * @param id is a unique unsigned number to be used to identify the tasklet 228 * tasklet. It is typically required by the kernel space service, interrupt 229 * handler ot task, in charge of executing a user space tasklet. The support 230 * functions nam2num() and num2nam() can be used for setting up id from a six 231 * character string. 232 * 233 * @param pid is an integer that marks a tasklet either as being a kernel or 234 * user space one. Despite its name you need not to know the pid of the tasklet 235 * parent process in user space. Simple use 0 for kernel space and 1 for user 236 * space. 237 * 238 * @retval 0 on success 239 * @return a negative number to indicate that an invalid handler address has 240 * been passed. 241 * 242 */ 243 244 RTAI_SYSCALL_MODE int rt_insert_tasklet(struct rt_tasklet_struct *tasklet, int priority, void (*handler)(unsigned long), unsigned long data, unsigned long id, int pid) 245 { 246 unsigned long flags; 247 248 // tasklet initialization 249 if (!handler || !id) { 250 return -EINVAL; 251 } 252 tasklet->uses_fpu = 0; 253 tasklet->priority = priority; 254 tasklet->handler = handler; 255 tasklet->data = data; 256 tasklet->id = id; 257 if (!pid) { 258 tasklet->task = 0; 259 } else { 260 (tasklet->task)->priority = priority; 261 rt_copy_to_user(tasklet->usptasklet, tasklet, sizeof(struct rt_usp_tasklet_struct)); 262 } 263 // tasklet insertion tasklets_list 264 flags = rt_spin_lock_irqsave(&tasklets_lock); 265 tasklet->next = &tasklets_list; 266 tasklet->prev = tasklets_list.prev; 267 (tasklets_list.prev)->next = tasklet; 268 tasklets_list.prev = tasklet; 269 rt_spin_unlock_irqrestore(flags, &tasklets_lock); 270 return 0; 271 } 272 273 /** 274 * Remove a tasklet in the list of tasklets to be processed. 275 * 276 * rt_remove_tasklet remove a tasklet from the list of tasklets to be processed. 277 * 278 * @param tasklet is the pointer to the tasklet structure to be used to manage 279 * the tasklet at hand. 280 * 281 */ 282 283 RTAI_SYSCALL_MODE void rt_remove_tasklet(struct rt_tasklet_struct *tasklet) 284 { 285 if (tasklet->next && tasklet->prev && tasklet->next != tasklet && tasklet->prev != tasklet) { 286 unsigned long flags; 287 flags = rt_spin_lock_irqsave(&tasklets_lock); 288 (tasklet->next)->prev = tasklet->prev; 289 (tasklet->prev)->next = tasklet->next; 290 tasklet->next = tasklet->prev = tasklet; 291 rt_spin_unlock_irqrestore(flags, &tasklets_lock); 292 } 293 } 294 295 /** 296 * Find a tasklet identified by its id. 297 * 298 * @param id is the unique unsigned long to be used to identify the tasklet. 299 * 300 * The support functions nam2num() and num2nam() can be used for setting up id 301 * from a six character string. 302 * 303 * @return the pointer to a tasklet handler on success 304 * @retval 0 to indicate that @a id is not a valid identifier so that the 305 * related tasklet was not found. 306 * 307 */ 308 309 struct rt_tasklet_struct *rt_find_tasklet_by_id(unsigned long id) 310 { 311 struct rt_tasklet_struct *tasklet; 312 313 tasklet = &tasklets_list; 314 while ((tasklet = tasklet->next) != &tasklets_list) { 315 if (id == tasklet->id) { 316 return tasklet; 317 } 318 } 319 return 0; 320 } 321 322 /** 323 * Exec a tasklet. 324 * 325 * rt_exec_tasklet execute a tasklet from the list of tasklets to be processed. 326 * 327 * @param tasklet is the pointer to the tasklet structure to be used to manage 328 * the tasklet @a tasklet. 329 * 330 * Kernel space tasklets addresses are usually available directly and can be 331 * easily be used in calling rt_tasklet_exec. In fact one can call the related 332 * handler directly without using such a support function, which is mainly 333 * supplied for symmetry and to ease the porting of applications from one space 334 * to the other. 335 * 336 * User space tasklets instead must be first found within the tasklet list by 337 * calling rt_find_tasklet_by_id() to get the tasklet address to be used 338 * in rt_tasklet_exec(). 339 * 340 */ 341 342 RTAI_SYSCALL_MODE int rt_exec_tasklet(struct rt_tasklet_struct *tasklet) 343 { 344 if (tasklet && tasklet->next != tasklet && tasklet->prev != tasklet) { 345 if (!tasklet->task) { 346 tasklet->handler(tasklet->data); 347 } else { 348 rt_task_resume(tasklet->task); 349 } 350 return 0; 351 } 352 return -EINVAL; 353 } 354 355 RTAI_SYSCALL_MODE void rt_set_tasklet_priority(struct rt_tasklet_struct *tasklet, int priority) 356 { 357 tasklet->priority = priority; 358 if (tasklet->task) { 359 (tasklet->task)->priority = priority; 360 } 361 } 362 363 RTAI_SYSCALL_MODE int rt_set_tasklet_handler(struct rt_tasklet_struct *tasklet, void (*handler)(unsigned long)) 364 { 365 if (!handler) { 366 return -EINVAL; 367 } 368 tasklet->handler = handler; 369 if (tasklet->task) { 370 rt_copy_to_user(tasklet->usptasklet, tasklet, sizeof(struct rt_usp_tasklet_struct)); 371 } 372 return 0; 373 } 374 375 RTAI_SYSCALL_MODE void rt_set_tasklet_data(struct rt_tasklet_struct *tasklet, unsigned long data) 376 { 377 tasklet->data = data; 378 if (tasklet->task) { 379 rt_copy_to_user(tasklet->usptasklet, tasklet, sizeof(struct rt_usp_tasklet_struct)); 380 } 381 } 382 383 RTAI_SYSCALL_MODE RT_TASK *rt_tasklet_use_fpu(struct rt_tasklet_struct *tasklet, int use_fpu) 384 { 385 tasklet->uses_fpu = use_fpu ? 1 : 0; 386 return tasklet->task; 387 } 388 389 static RT_TASK timers_manager[NUM_CPUS]; 390 391 static inline void asgn_min_prio(int cpuid) 392 { 393 // find minimum priority in timers_struct 394 RT_TASK *timer_manager; 395 struct rt_tasklet_struct *timer, *timerl; 396 spinlock_t *lock; 397 unsigned long flags; 398 int priority; 399 400 priority = (timer = (timerl = &timers_list[LIST_CPUID])->next)->priority; 401 flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]); 402 while ((timer = timer->next) != timerl) { 403 if (timer->priority < priority) { 404 priority = timer->priority; 405 } 406 rt_spin_unlock_irqrestore(flags, lock); 407 flags = rt_spin_lock_irqsave(lock); 408 } 409 rt_spin_unlock_irqrestore(flags, lock); 410 flags = rt_global_save_flags_and_cli(); 411 if ((timer_manager = &timers_manager[LIST_CPUID])->priority > priority) { 412 timer_manager->priority = priority; 413 if (timer_manager->state == RT_SCHED_READY) { 414 rem_ready_task(timer_manager); 415 enq_ready_task(timer_manager); 416 } 417 } 418 rt_global_restore_flags(flags); 419 } 420 421 static inline void set_timer_firing_time(struct rt_tasklet_struct *timer, RTIME firing_time) 422 { 423 if (timer->next != timer && timer->prev != timer) { 424 spinlock_t *lock; 425 unsigned long flags; 426 427 timer->firing_time = firing_time; 428 flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]); 429 rem_timer(timer); 430 enq_timer(timer); 431 rt_spin_unlock_irqrestore(flags, lock); 432 } 433 } 434 435 /** 436 * Insert a timer in the list of timers to be processed. 437 * 438 * rt_insert_timer insert a timer in the list of timers to be processed. Timers 439 * can be either periodic or oneshot. A periodic timer is reloaded at each 440 * expiration so that it executes with the assigned periodicity. A oneshot 441 * timer is fired just once and then removed from the timers list. Timers can be 442 * reinserted or modified within their handlers functions. 443 * 444 * @param timer is the pointer to the timer structure to be used to manage the 445 * timer at hand. 446 * 447 * @param priority is the priority to be used to execute timers handlers when 448 * more than one timer has to be fired at the same time.It can be assigned any 449 * value such that: 0 < priority < RT_LOWEST_PRIORITY. 450 * 451 * @param firing_time is the time of the first timer expiration. 452 * 453 * @param period is the period of a periodic timer. A periodic timer keeps 454 * calling its handler at firing_time + k*period k = 0, 1. To define a oneshot 455 * timer simply use a null period. 456 * 457 * @param handler is the timer function to be executed at each timer expiration. 458 * 459 * @param data is an unsigned long to be passed to the handler. Clearly by a 460 * appropriate type casting one can pass a pointer to whatever data structure 461 * and type is needed. 462 * 463 * @param pid is an integer that marks a timer either as being a kernel or user 464 * space one. Despite its name you need not to know the pid of the timer parent 465 * process in user space. Simple use 0 for kernel space and 1 for user space. 466 * 467 * @retval 0 on success 468 * @retval EINVAL if @a handler is an invalid handler address 469 * 470 */ 471 472 RTAI_SYSCALL_MODE int rt_insert_timer(struct rt_tasklet_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data, int pid) 473 { 474 spinlock_t *lock; 475 unsigned long flags, cpuid; 476 RT_TASK *timer_manager; 477 478 // timer initialization 479 timer->uses_fpu = 0; 480 481 if (pid >= 0) { 482 if (!handler) { 483 return -EINVAL; 484 } 485 timer->handler = handler; 486 timer->data = data; 487 } else { 488 if (timer->handler != NULL || timer->handler == (void *)1) { 489 timer->handler = (void *)1; 490 timer->data = data; 491 } 492 } 493 494 timer->priority = priority; 495 REALTIME2COUNT(firing_time) 496 timer->firing_time = firing_time; 497 timer->period = period; 498 499 if (!pid) { 500 timer->task = 0; 501 timer->cpuid = cpuid = NUM_CPUS > 1 ? rtai_cpuid() : 0; 502 } else { 503 timer->cpuid = cpuid = NUM_CPUS > 1 ? (timer->task)->runnable_on_cpus : 0; 504 (timer->task)->priority = priority; 505 rt_copy_to_user(timer->usptasklet, timer, sizeof(struct rt_usp_tasklet_struct)); 506 } 507 // timer insertion in timers_list 508 flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]); 509 enq_timer(timer); 510 rt_spin_unlock_irqrestore(flags, lock); 511 // timers_manager priority inheritance 512 if (timer->priority < (timer_manager = &timers_manager[LIST_CPUID])->priority) { 513 timer_manager->priority = timer->priority; 514 } 515 // timers_task deadline inheritance 516 flags = rt_global_save_flags_and_cli(); 517 if (timers_list[LIST_CPUID].next == timer && (timer_manager->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) { 518 timer_manager->resume_time = firing_time; 519 rem_timed_task(timer_manager); 520 enq_timed_task(timer_manager); 521 rt_schedule(); 522 } 523 rt_global_restore_flags(flags); 524 return 0; 525 } 526 527 /** 528 * Remove a timer in the list of timers to be processed. 529 * 530 * rt_remove_timer remove a timer from the list of the timers to be processed. 531 * 532 * @param timer is the pointer to the timer structure to be used to manage the 533 * timer at hand. 534 * 535 */ 536 537 RTAI_SYSCALL_MODE void rt_remove_timer(struct rt_tasklet_struct *timer) 538 { 539 if (timer->next && timer->prev && timer->next != timer && timer->prev != timer) { 540 spinlock_t *lock; 541 unsigned long flags; 542 flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]); 543 rem_timer(timer); 544 rt_spin_unlock_irqrestore(flags, lock); 545 asgn_min_prio(TIMER_CPUID); 546 } 547 } 548 549 /** 550 * Change the priority of an existing timer. 551 * 552 * rt_set_timer_priority change the priority of an existing timer. 553 * 554 * @param timer is the pointer to the timer structure to be used to manage the 555 * timer at hand. 556 * 557 * @param priority is the priority to be used to execute timers handlers when 558 * more than one timer has to be fired at the same time. It can be assigned any 559 * value such that: 0 < priority < RT_LOWEST_PRIORITY. 560 * 561 * This function can be used within the timer handler. 562 * 563 */ 564 565 RTAI_SYSCALL_MODE void rt_set_timer_priority(struct rt_tasklet_struct *timer, int priority) 566 { 567 timer->priority = priority; 568 if (timer->task) { 569 (timer->task)->priority = priority; 570 } 571 asgn_min_prio(TIMER_CPUID); 572 } 573 574 /** 575 * Change the firing time of a timer. 576 * 577 * rt_set_timer_firing_time changes the firing time of a periodic timer 578 * overloading any existing value, so that the timer next shoot will take place 579 * at the new firing time. Note that if a oneshot timer has its firing time 580 * changed after it has already expired this function has no effect. You 581 * should reinsert it in the timer list with the new firing time. 582 * 583 * @param timer is the pointer to the timer structure to be used to manage the 584 * timer at hand. 585 * 586 * @param firing_time is the new time of the first timer expiration. 587 * 588 * This function can be used within the timer handler. 589 * 590 * @retval 0 on success. 591 * 592 */ 593 594 RTAI_SYSCALL_MODE void rt_set_timer_firing_time(struct rt_tasklet_struct *timer, RTIME firing_time) 595 { 596 unsigned long flags; 597 RT_TASK *timer_manager; 598 599 set_timer_firing_time(timer, firing_time); 600 flags = rt_global_save_flags_and_cli(); 601 if (timers_list[TIMER_CPUID].next == timer && ((timer_manager = &timers_manager[TIMER_CPUID])->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) { 602 timer_manager->resume_time = firing_time; 603 rem_timed_task(timer_manager); 604 enq_timed_task(timer_manager); 605 rt_schedule(); 606 } 607 rt_global_restore_flags(flags); 608 } 609 610 /** 611 * Change the period of a timer. 612 * 613 * rt_set_timer_period changes the period of a periodic timer. Note that the new 614 * period will be used to pace the timer only after the expiration of the firing 615 * time already in place. Using this function with a period different from zero 616 * for a oneshot timer, that has not expired yet, will transform it into a 617 * periodic timer. 618 * 619 * @param timer is the pointer to the timer structure to be used to manage the 620 * timer at hand. 621 * 622 * @param period is the new period of a periodic timer. 623 * 624 * The macro #rt_fast_set_timer_period can substitute the corresponding 625 * function in kernel space if both the existing timer period and the new one 626 * fit into an 32 bits integer. 627 * 628 * This function an be used within the timer handler. 629 * 630 * @retval 0 on success. 631 * 632 */ 633 634 RTAI_SYSCALL_MODE void rt_set_timer_period(struct rt_tasklet_struct *timer, RTIME period) 635 { 636 spinlock_t *lock; 637 unsigned long flags; 638 flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]); 639 timer->period = period; 640 rt_spin_unlock_irqrestore(flags, lock); 641 } 642 643 RTAI_SYSCALL_MODE void rt_get_timer_times(struct rt_tasklet_struct *timer, RTIME timer_times[]) 644 { 645 RTIME firing; 646 647 firing = -rt_get_time(); 648 firing += timer->firing_time; 649 650 timer_times[0] = firing > 0 ? firing : -1; 651 timer_times[1] = timer->period; 652 } 653 654 RTAI_SYSCALL_MODE RTIME rt_get_timer_overrun(struct rt_tasklet_struct *timer) 655 { 656 return timer->overrun; 657 } 658 659 static int TimersManagerPrio = 0; 660 RTAI_MODULE_PARM(TimersManagerPrio, int); 661 662 // the timers_manager task function 663 664 static void rt_timers_manager(long cpuid) 665 { 666 RTIME now; 667 RT_TASK *timer_manager; 668 struct rt_tasklet_struct *tmr, *timer, *timerl; 669 spinlock_t *lock; 670 unsigned long flags, timer_tol; 671 int priority, used_fpu; 672 673 timer_manager = &timers_manager[LIST_CPUID]; 674 timerl = &timers_list[LIST_CPUID]; 675 lock = &timers_lock[LIST_CPUID]; 676 timer_tol = tuned.timers_tol[LIST_CPUID]; 677 678 while (1) { 679 int retval; 680 retval = rt_sleep_until((timerl->next)->firing_time); 681 // now = timer_manager->resume_time + timer_tol; 682 now = rt_get_time() + timer_tol; 683 // find all the timers to be fired, in priority order 684 while (1) { 685 used_fpu = 0; 686 tmr = timer = timerl; 687 priority = RT_SCHED_LOWEST_PRIORITY; 688 flags = rt_spin_lock_irqsave(lock); 689 while ((tmr = tmr->next)->firing_time <= now) { 690 if (tmr->priority < priority) { 691 priority = (timer = tmr)->priority; 692 } 693 } 694 rt_spin_unlock_irqrestore(flags, lock); 695 if (timer == timerl) { 696 if (timer_manager->priority > TimersManagerPrio) { 697 timer_manager->priority = TimersManagerPrio; 698 } 699 break; 700 } 701 timer_manager->priority = priority; 702 #if 1 703 flags = rt_spin_lock_irqsave(lock); 704 rem_timer(timer); 705 if (timer->period) { 706 timer->firing_time += timer->period; 707 enq_timer(timer); 708 } 709 rt_spin_unlock_irqrestore(flags, lock); 710 #else 711 if (!timer->period) { 712 flags = rt_spin_lock_irqsave(lock); 713 rem_timer(timer); 714 rt_spin_unlock_irqrestore(flags, lock); 715 } else { 716 set_timer_firing_time(timer, timer->firing_time + timer->period); 717 } 718 #endif 719 // if (retval != RTE_TMROVRN) { 720 tmr->overrun = 0; 721 if (!timer->task) { 722 if (!used_fpu && timer->uses_fpu) { 723 used_fpu = 1; 724 save_fpcr_and_enable_fpu(linux_cr0); 725 save_fpenv(timer_manager->fpu_reg); 726 } 727 timer->handler(timer->data); 728 } else { 729 rt_task_resume(timer->task); 730 } 731 // } else { 732 // tmr->overrun++; 733 // } 734 } 735 if (used_fpu) { 736 restore_fpenv(timer_manager->fpu_reg); 737 restore_fpcr(linux_cr0); 738 } 739 // set next timers_manager priority according to the highest priority timer 740 asgn_min_prio(LIST_CPUID); 741 // if no more timers in timers_struct remove timers_manager from tasks list 742 } 743 } 744 745 /** 746 * Init, in kernel space, a tasklet structure to be used in user space. 747 * 748 * rt_tasklet_init allocate a tasklet structure (struct rt_tasklet_struct) in 749 * kernel space to be used for the management of a user space tasklet. 750 * 751 * This function is to be used only for user space tasklets. In kernel space 752 * it is just an empty macro, as the user can, and must allocate the related 753 * structure directly, either statically or dynamically. 754 * 755 * @return the pointer to the tasklet structure the user space application must 756 * use to access all its related services. 757 */ 758 struct rt_tasklet_struct *rt_init_tasklet(void) 759 { 760 struct rt_tasklet_struct *tasklet; 761 if ((tasklet = rt_malloc(sizeof(struct rt_tasklet_struct)))) { 762 memset(tasklet, 0, sizeof(struct rt_tasklet_struct)); 763 } 764 return tasklet; 765 } 766 767 RTAI_SYSCALL_MODE void rt_register_task(struct rt_tasklet_struct *tasklet, struct rt_tasklet_struct *usptasklet, RT_TASK *task) 768 { 769 tasklet->task = task; 770 tasklet->usptasklet = usptasklet; 771 rt_copy_to_user(usptasklet, tasklet, sizeof(struct rt_usp_tasklet_struct)); 772 } 773 774 RTAI_SYSCALL_MODE int rt_wait_tasklet_is_hard(struct rt_tasklet_struct *tasklet, long thread) 775 { 776 #define POLLS_PER_SEC 100 777 int i; 778 tasklet->thread = thread; 779 for (i = 0; i < POLLS_PER_SEC/5; i++) { 780 if (!tasklet->task || !((tasklet->task)->state & RT_SCHED_SUSPENDED)) { 781 current->state = TASK_INTERRUPTIBLE; 782 schedule_timeout(HZ/POLLS_PER_SEC); 783 } else { 784 return 0; 785 } 786 } 787 return 1; 788 #undef POLLS_PER_SEC 789 } 790 791 /** 792 * Delete, in kernel space, a tasklet structure to be used in user space. 793 * 794 * rt_tasklet_delete free a tasklet structure (struct rt_tasklet_struct) in 795 * kernel space that was allocated by rt_tasklet_init. 796 * 797 * @param tasklet is the pointer to the tasklet structure (struct 798 * rt_tasklet_struct) returned by rt_tasklet_init. 799 * 800 * This function is to be used only for user space tasklets. In kernel space 801 * it is just an empty macro, as the user can, and must allocate the related 802 * structure directly, either statically or dynamically. 803 * 804 */ 805 806 RTAI_SYSCALL_MODE int rt_delete_tasklet(struct rt_tasklet_struct *tasklet) 807 { 808 int thread; 809 810 rt_remove_tasklet(tasklet); 811 tasklet->handler = 0; 812 rt_copy_to_user(tasklet->usptasklet, tasklet, sizeof(struct rt_usp_tasklet_struct)); 813 rt_task_resume(tasklet->task); 814 thread = tasklet->thread; 815 rt_free(tasklet); 816 return thread; 817 } 818 819 /* 820 * Posix Timers support function 821 */ 822 823 824 static int PosixTimers = POSIX_TIMERS; 825 RTAI_MODULE_PARM(PosixTimers, int); 826 827 static DEFINE_SPINLOCK(ptimer_lock); 828 static volatile int ptimer_index; 829 struct ptimer_list { int t_indx, p_idx; struct ptimer_list *p_ptr; struct rt_tasklet_struct *timer;} *posix_timer; 830 831 static int init_ptimers(void) 832 { 833 int i; 834 835 if (!(posix_timer = (struct ptimer_list *)kmalloc((PosixTimers)*sizeof(struct ptimer_list), GFP_KERNEL))) { 836 printk("Init MODULE no memory for Posix Timer's list.\n"); 837 return -ENOMEM; 838 } 839 for (i = 0; i < PosixTimers; i++) { 840 posix_timer[i].t_indx = posix_timer[i].p_idx = i; 841 posix_timer[i].p_ptr = posix_timer + i; 842 } 843 return 0; 844 845 } 846 847 static void cleanup_ptimers(void) 848 { 849 kfree(posix_timer); 850 } 851 852 static inline int get_ptimer_indx(struct rt_tasklet_struct *timer) 853 { 854 unsigned long flags; 855 856 flags = rt_spin_lock_irqsave(&ptimer_lock); 857 if (ptimer_index < PosixTimers) { 858 struct ptimer_list *p; 859 p = posix_timer[ptimer_index++].p_ptr; 860 p->timer = timer; 861 rt_spin_unlock_irqrestore(flags, &ptimer_lock); 862 return p->t_indx; 863 } 864 rt_spin_unlock_irqrestore(flags, &ptimer_lock); 865 return 0; 866 } 867 868 static inline int gvb_ptimer_indx(int itimer) 869 { 870 unsigned long flags; 871 872 flags = rt_spin_lock_irqsave(&ptimer_lock); 873 if (itimer < PosixTimers) { 874 struct ptimer_list *tmp_p; 875 int tmp_place; 876 tmp_p = posix_timer[--ptimer_index].p_ptr; 877 tmp_place = posix_timer[itimer].p_idx; 878 posix_timer[itimer].p_idx = ptimer_index; 879 posix_timer[ptimer_index].p_ptr = &posix_timer[itimer]; 880 tmp_p->p_idx = tmp_place; 881 posix_timer[tmp_place].p_ptr = tmp_p; 882 rt_spin_unlock_irqrestore(flags, &ptimer_lock); 883 return 0; 884 } 885 rt_spin_unlock_irqrestore(flags, &ptimer_lock); 886 return -EINVAL; 887 } 888 889 RTAI_SYSCALL_MODE timer_t rt_ptimer_create(struct rt_tasklet_struct *timer, void (*handler)(unsigned long), unsigned long data, long pid, long thread) 890 { 891 if (thread) { 892 rt_wait_tasklet_is_hard(timer, thread); 893 } 894 timer->next = timer; 895 timer->prev = timer; 896 timer->data = data; 897 timer->handler = handler; 898 return get_ptimer_indx(timer); 899 } 900 EXPORT_SYMBOL(rt_ptimer_create); 901 902 RTAI_SYSCALL_MODE void rt_ptimer_settime(timer_t timer, const struct itimerspec *value, unsigned long data, long flags) 903 { 904 struct rt_tasklet_struct *tasklet; 905 RTIME now; 906 907 tasklet = posix_timer[timer].timer; 908 rt_remove_timer(tasklet); 909 now = rt_get_time(); 910 if (flags == TIMER_ABSTIME) { 911 if (timespec2count(&(value->it_value)) < now) { 912 now -= timespec2count (&(value->it_value)); 913 }else { 914 now = 0; 915 } 916 } 917 if (timespec2count ( &(value->it_value)) > 0) { 918 if (data) { 919 rt_insert_timer(tasklet, 0, now + timespec2count ( &(value->it_value) ), timespec2count ( &(value->it_interval) ), NULL, data, -1); 920 } else { 921 rt_insert_timer(tasklet, 0, now + timespec2count ( &(value->it_value) ), timespec2count ( &(value->it_interval) ), tasklet->handler, tasklet->data, 0); 922 } 923 } 924 } 925 EXPORT_SYMBOL(rt_ptimer_settime); 926 927 RTAI_SYSCALL_MODE int rt_ptimer_overrun(timer_t timer) 928 { 929 return rt_get_timer_overrun(posix_timer[timer].timer); 930 } 931 EXPORT_SYMBOL(rt_ptimer_overrun); 932 933 RTAI_SYSCALL_MODE void rt_ptimer_gettime(timer_t timer, RTIME timer_times[]) 934 { 935 rt_get_timer_times(posix_timer[timer].timer, timer_times); 936 } 937 EXPORT_SYMBOL(rt_ptimer_gettime); 938 939 RTAI_SYSCALL_MODE int rt_ptimer_delete(timer_t timer, long space) 940 { 941 struct rt_tasklet_struct *tasklet; 942 int rtn = 0; 943 944 tasklet = posix_timer[timer].timer; 945 gvb_ptimer_indx(timer); 946 rt_remove_tasklet(tasklet); 947 if (space) { 948 tasklet->handler = 0; 949 rt_copy_to_user(tasklet->usptasklet, tasklet, sizeof(struct rt_usp_tasklet_struct)); 950 rt_task_resume(tasklet->task); 951 rtn = tasklet->thread; 952 } 953 rt_free(tasklet); 954 return rtn; 955 } 956 EXPORT_SYMBOL(rt_ptimer_delete); 957 958 /* 959 * End Posix timers support function 960 */ 961 962 static int TaskletsStacksize = TASKLET_STACK_SIZE; 963 RTAI_MODULE_PARM(TaskletsStacksize, int); 964 965 int __rtai_tasklets_init(void) 966 { 967 int cpuid; 968 969 if(set_rt_fun_ext_index(rt_tasklet_fun, TASKLETS_IDX)) { 970 printk("Recompile your module with a different index\n"); 971 return -EACCES; 972 } 973 if (init_ptimers()) { 974 return -ENOMEM; 975 } 976 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { 977 timers_lock[cpuid] = timers_lock[0]; 978 timers_list[cpuid] = timers_list[0]; 979 timers_list[cpuid].cpuid = cpuid; 980 timers_list[cpuid].next = timers_list[cpuid].prev = &timers_list[cpuid]; 981 rt_task_init_cpuid(&timers_manager[cpuid], rt_timers_manager, cpuid, TaskletsStacksize, TimersManagerPrio, 0, 0, cpuid); 982 rt_task_resume(&timers_manager[cpuid]); 983 } 984 printk(KERN_INFO "RTAI[tasklets]: loaded.\n"); 985 return 0; 986 } 987 988 void __rtai_tasklets_exit(void) 989 { 990 int cpuid; 991 reset_rt_fun_ext_index(rt_tasklet_fun, TASKLETS_IDX); 992 cleanup_ptimers(); 993 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { 994 rt_task_delete(&timers_manager[cpuid]); 995 } 996 printk(KERN_INFO "RTAI[tasklets]: unloaded.\n"); 997 } 998 999 /*@}*/ 1000 1001 #ifndef CONFIG_RTAI_TASKLETS_BUILTIN 1002 module_init(__rtai_tasklets_init); 1003 module_exit(__rtai_tasklets_exit); 1004 #endif /* !CONFIG_RTAI_TASKLETS_BUILTIN */ 1005 1006 #ifdef CONFIG_KBUILD 1007 EXPORT_SYMBOL(rt_insert_tasklet); 1008 EXPORT_SYMBOL(rt_remove_tasklet); 1009 EXPORT_SYMBOL(rt_find_tasklet_by_id); 1010 EXPORT_SYMBOL(rt_exec_tasklet); 1011 EXPORT_SYMBOL(rt_set_tasklet_priority); 1012 EXPORT_SYMBOL(rt_set_tasklet_handler); 1013 EXPORT_SYMBOL(rt_set_tasklet_data); 1014 EXPORT_SYMBOL(rt_tasklet_use_fpu); 1015 EXPORT_SYMBOL(rt_insert_timer); 1016 EXPORT_SYMBOL(rt_remove_timer); 1017 EXPORT_SYMBOL(rt_set_timer_priority); 1018 EXPORT_SYMBOL(rt_set_timer_firing_time); 1019 EXPORT_SYMBOL(rt_set_timer_period); 1020 EXPORT_SYMBOL(rt_init_tasklet); 1021 EXPORT_SYMBOL(rt_register_task); 1022 EXPORT_SYMBOL(rt_wait_tasklet_is_hard); 1023 EXPORT_SYMBOL(rt_delete_tasklet); 1024 EXPORT_SYMBOL(rt_get_timer_times); 1025 EXPORT_SYMBOL(rt_get_timer_overrun); 1026 #endif /* CONFIG_KBUILD */