/ addons / rtdm / xn.h
xn.h
  1  /*
  2   * Copyright (C) 2005-2010 Paolo Mantegazza <mantegazza@aero.polimi.it>
  3   *
  4   * This program is free software; you can redistribute it and/or
  5   * modify it under the terms of the GNU General Public License as
  6   * published by the Free Software Foundation; either version 2 of the
  7   * License, or (at your option) any later version.
  8   *
  9   * This program is distributed in the hope that it will be useful,
 10   * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12   * GNU General Public License for more details.
 13   *
 14   * You should have received a copy of the GNU General Public License
 15   * along with this program; if not, write to the Free Software
 16   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 17   */
 18  
 19  
 20  // Wrappers and inlines to avoid too much an editing of RTDM code. 
 21  // The core stuff is just RTAI in disguise.
 22  
 23  #ifndef _RTAI_XNSTUFF_H
 24  #define _RTAI_XNSTUFF_H
 25  
 26  #include <linux/version.h>
 27  #include <linux/proc_fs.h>
 28  #include <asm/uaccess.h>
 29  #include <asm/mman.h>
 30  
 31  #include <rtai_schedcore.h>
 32  
 33  #define CONFIG_RTAI_OPT_PERVASIVE
 34  
 35  #ifndef CONFIG_RTAI_DEBUG_RTDM
 36  #define CONFIG_RTAI_DEBUG_RTDM  0
 37  #endif
 38  
 39  #define RTAI_DEBUG(subsystem)   (CONFIG_RTAI_DEBUG_##subsystem > 0)
 40  
 41  #define RTAI_ASSERT(subsystem, cond, action)  do { \
 42      if (unlikely(CONFIG_RTAI_DEBUG_##subsystem > 0 && !(cond))) { \
 43          xnlogerr("assertion failed at %s:%d (%s)\n", __FILE__, __LINE__, (#cond)); \
 44          action; \
 45      } \
 46  } while(0)
 47  
 48  #define RTAI_BUGON(subsystem, cond)  do { /*\
 49  	if (unlikely(CONFIG_RTAI_DEBUG_##subsystem > 0 && (cond))) \
 50  		xnpod_fatal("bug at %s:%d (%s)", __FILE__, __LINE__, (#cond)); */ \
 51   } while(0)
 52  
 53  /* 
 54    With what above we let some assertion diagnostic. Here below we keep knowledge
 55    of specific assertions we care of.
 56   */
 57  
 58  #define xnpod_root_p()          (!current->rtai_tskext(TSKEXT0) || !((RT_TASK *)(current->rtai_tskext(TSKEXT0)))->is_hard)
 59  #define xnshadow_thread(t)      ((xnthread_t *)current->rtai_tskext(TSKEXT0))
 60  #define rthal_local_irq_test()  (!rtai_save_flags_irqbit())
 61  #define rthal_local_irq_enable  rtai_sti 
 62  #define rthal_domain rtai_domain
 63  #define rthal_local_irq_disabled()                              \
 64  ({                                                              \
 65          unsigned long __flags, __ret;                           \
 66          local_irq_save_hw_smp(__flags);                         \
 67          __ret = ipipe_test_pipeline_from(&rthal_domain);        \
 68          local_irq_restore_hw_smp(__flags);                      \
 69          __ret;                                                  \
 70  })
 71  
 72  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 73  
 74  #define _MODULE_PARM_STRING_charp "s"
 75  #define compat_module_param_array(name, type, count, perm) \
 76          static inline void *__check_existence_##name(void) { return &name; } \
 77          MODULE_PARM(name, "1-" __MODULE_STRING(count) _MODULE_PARM_STRING_##type)
 78  
 79  typedef unsigned long phys_addr_t;
 80  
 81  #else
 82  
 83  #define compat_module_param_array(name, type, count, perm) \
 84          module_param_array(name, type, NULL, perm)
 85  
 86  #endif
 87  
 88  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
 89  #define trace_mark(ev, fmt, args...)  do { } while (0)
 90  #else
 91  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
 92  #include <linux/marker.h>
 93  #endif
 94  #define trace_mark(ev, fmt, args...)  do { } while (0)
 95  #endif
 96  
 97  //recursive smp locks, as for RTAI global lock stuff but with an own name
 98  
 99  #define nklock (*((xnlock_t *)rtai_cpu_lock))
100  
101  #define XNARCH_LOCK_UNLOCKED  (xnlock_t) { { 0, 0 } }
102  
103  typedef unsigned long spl_t;
104  typedef struct { volatile unsigned long lock[2]; } xnlock_t;
105  
106  #ifndef list_first_entry
107  #define list_first_entry(ptr, type, member) \
108          list_entry((ptr)->next, type, member)
109  #endif
110  
111  #ifndef local_irq_save_hw_smp
112  #ifdef CONFIG_SMP
113  #define local_irq_save_hw_smp(flags)    local_irq_save_hw(flags)
114  #define local_irq_restore_hw_smp(flags) local_irq_restore_hw(flags)
115  #else /* !CONFIG_SMP */
116  #define local_irq_save_hw_smp(flags)    do { (void)(flags); } while (0)
117  #define local_irq_restore_hw_smp(flags) do { } while (0)
118  #endif /* !CONFIG_SMP */
119  #endif /* !local_irq_save_hw_smp */
120  
121  #ifdef CONFIG_SMP
122  
123  #define DECLARE_XNLOCK(lock)              xnlock_t lock
124  #define DECLARE_EXTERN_XNLOCK(lock)       extern xnlock_t lock
125  #define DEFINE_XNLOCK(lock)               xnlock_t lock = XNARCH_LOCK_UNLOCKED
126  #define DEFINE_PRIVATE_XNLOCK(lock)       static DEFINE_XNLOCK(lock)
127  
128  static inline void xnlock_init(xnlock_t *lock)
129  {
130  	*lock = XNARCH_LOCK_UNLOCKED;
131  }
132  
133  static inline void xnlock_get(xnlock_t *lock)
134  {
135  	barrier();
136  	rtai_cli();
137  	if (!test_and_set_bit(hal_processor_id(), &lock->lock[0])) {
138  		rtai_spin_glock(&lock->lock[0]);
139  	}
140  	barrier();
141  }
142  
143  static inline void xnlock_put(xnlock_t *lock)
144  {
145  	barrier();
146  	rtai_cli();
147  	if (test_and_clear_bit(hal_processor_id(), &lock->lock[0])) {
148  		rtai_spin_gunlock(&lock->lock[0]);
149  	}
150  	barrier();
151  }
152  
153  static inline spl_t __xnlock_get_irqsave(xnlock_t *lock)
154  {
155  	unsigned long flags;
156  
157  	barrier();
158  	flags = rtai_save_flags_irqbit_and_cli();
159  	if (!test_and_set_bit(hal_processor_id(), &lock->lock[0])) {
160  		rtai_spin_glock(&lock->lock[0]);
161  		barrier();
162  		return flags | 1;
163  	}
164  	barrier();
165  	return flags;
166  }
167  
168  #define xnlock_get_irqsave(lock, flags)  \
169  	do { flags = __xnlock_get_irqsave(lock); } while (0)
170  
171  static inline void xnlock_put_irqrestore(xnlock_t *lock, spl_t flags)
172  {
173  	barrier();
174  	if (test_and_clear_bit(0, &flags)) {
175  		xnlock_put(lock);
176  	} else {
177  		xnlock_get(lock);
178  	}
179  	if (flags) {
180  		rtai_sti();
181  	}
182  	barrier();
183  }
184  
185  #else /* !CONFIG_SMP */
186  
187  #define DECLARE_XNLOCK(lock)
188  #define DECLARE_EXTERN_XNLOCK(lock)
189  #define DEFINE_XNLOCK(lock)
190  #define DEFINE_PRIVATE_XNLOCK(lock)
191  
192  #define xnlock_init(lock)                   do { } while(0)
193  #define xnlock_get(lock)                    rtai_cli()
194  #define xnlock_put(lock)                    rtai_sti()
195  #define xnlock_get_irqsave(lock, flags)     rtai_save_flags_and_cli(flags)
196  #define xnlock_put_irqrestore(lock, flags)  rtai_restore_flags(flags)
197  
198  #endif /* CONFIG_SMP */
199  
200  // memory allocation
201  
202  #define xnmalloc  rt_malloc
203  #define xnfree    rt_free
204  #define xnarch_fault_range(vma)
205  
206  // in kernel printing (taken from RTDM pet system)
207  
208  #define XNARCH_PROMPT "RTDM: "
209  
210  #define xnprintf(fmt, args...)  printk(KERN_INFO XNARCH_PROMPT fmt, ##args)
211  #define xnlogerr(fmt, args...)  printk(KERN_ERR  XNARCH_PROMPT fmt, ##args)
212  #define xnlogwarn               xnlogerr
213  
214  // user space access (taken from Linux)
215  
216  #define __xn_access_ok(task, type, addr, size) \
217  	(access_ok(type, addr, size))
218  
219  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
220  #define __xn_copy_from_user(task, dstP, srcP, n) \
221  	({ long err = __copy_from_user(dstP, srcP, n); err; })
222  
223  #define __xn_copy_to_user(task, dstP, srcP, n) \
224  	({ long err = __copy_to_user(dstP, srcP, n); err; })
225  #else
226  #define __xn_copy_from_user(task, dstP, srcP, n) \
227  	({ long err = __copy_from_user_inatomic(dstP, srcP, n); err; })
228  
229  #define __xn_copy_to_user(task, dstP, srcP, n) \
230  	({ long err = __copy_to_user_inatomic(dstP, srcP, n); err; })
231  #endif
232  
233  #if !defined CONFIG_M68K || defined CONFIG_MMU
234  #define __xn_strncpy_from_user(task, dstP, srcP, n) \
235  	({ long err = rt_strncpy_from_user(dstP, srcP, n); err; })
236  /*	({ long err = __strncpy_from_user(dstP, srcP, n); err; }) */
237  #else
238  #define __xn_strncpy_from_user(task, dstP, srcP, n) \
239  	({ long err = strncpy_from_user(dstP, srcP, n); err; })
240  #endif /* CONFIG_M68K */
241  
242  static inline int xnarch_remap_io_page_range(struct file *filp, struct vm_area_struct *vma, unsigned long from, unsigned long to, unsigned long size, pgprot_t prot)
243  {
244  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
245  
246  	vma->vm_flags |= VM_RESERVED;
247  	return remap_page_range(from, to, size, prot);
248  
249  #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) */
250  
251  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
252  	return remap_pfn_range(vma, from, (to) >> PAGE_SHIFT, size, prot);
253  #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
254  	return remap_pfn_range(vma, from, (to) >> PAGE_SHIFT, size, prot);
255  #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) */
256  	vma->vm_flags |= VM_RESERVED;
257  	return remap_page_range(vma, from, to, size, prot);
258  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) */
259  
260  #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) */
261  }
262  
263  #define wrap_remap_kmem_page_range(vma,from,to,size,prot) ({ \
264      vma->vm_flags |= VM_RESERVED; \
265      remap_page_range(from,to,size,prot); \
266  })
267  
268  static inline int xnarch_remap_kmem_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long to, unsigned long size, pgprot_t prot)
269  {
270  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
271  
272  	vma->vm_flags |= VM_RESERVED;
273  	return remap_page_range(from, to, size, prot);
274  
275  #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) */
276  
277  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) && defined(CONFIG_MMU)
278  	return remap_pfn_range(vma, from, to >> PAGE_SHIFT, size, prot);
279  #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
280  	return remap_pfn_range(vma, from, to >> PAGE_SHIFT, size, prot);
281  #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) */
282  	vma->vm_flags |= VM_RESERVED;
283  	return remap_page_range(from, to, size, prot);
284  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) */
285  
286  #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) */
287  }
288  
289  #include <rtai_shm.h>
290  #define __va_to_kva(adr)  UVIRT_TO_KVA(adr)
291  
292  #ifdef CONFIG_MMU
293  
294  static inline int xnarch_remap_vm_page(struct vm_area_struct *vma, unsigned long from, unsigned long to)
295  {
296  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
297  
298  	vma->vm_flags |= VM_RESERVED;
299  	return remap_page_range(from, virt_to_phys((void *)__va_to_kva(to)), PAGE_SIZE, PAGE_SHARED);
300  
301  #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) */
302  
303  #ifndef VM_RESERVED
304  #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
305  #endif
306  
307  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) && defined(CONFIG_MMU)
308  	vma->vm_flags |= VM_RESERVED;
309  	return vm_insert_page(vma, from, vmalloc_to_page((void *)to));
310  #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
311  	return remap_pfn_range(vma, from, virt_to_phys((void *)__va_to_kva(to)) >> PAGE_SHIFT, PAGE_SHIFT, PAGE_SHARED);
312  #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) */
313  	vma->vm_flags |= VM_RESERVED;
314  	return remap_page_range(from, virt_to_phys((void *)__va_to_kva(to)), PAGE_SIZE, PAGE_SHARED);
315  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) */
316  
317  #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) */
318  }
319  
320  #endif
321  
322  // interrupt setup/management (adopted_&|_adapted from RTDM pet system)
323  
324  #define RTHAL_NR_IRQS  IPIPE_NR_XIRQS
325  
326  #define XN_ISR_NONE       0x1
327  #define XN_ISR_HANDLED    0x2
328  
329  #define XN_ISR_PROPAGATE  0x100
330  #define XN_ISR_NOENABLE   0x200
331  #define XN_ISR_BITMASK    ~0xff
332  
333  #define XN_ISR_SHARED     0x1
334  #define XN_ISR_EDGE       0x2
335  
336  #define XN_ISR_ATTACHED   0x10000
337  
338  #if !defined(CONFIG_PPC) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)))
339  
340  #define rthal_virtualize_irq(dom, irq, isr, cookie, ackfn, mode) \
341  	ipipe_virtualize_irq(dom, irq, isr, ackfn, mode)
342  
343  #else
344  
345  #define rthal_virtualize_irq(dom, irq, isr, cookie, ackfn, mode) \
346  	ipipe_virtualize_irq(dom, irq, isr, cookie, ackfn, mode)
347  
348  #endif
349  
350  struct xnintr;
351  
352  typedef int (*xnisr_t)(struct xnintr *intr);
353  
354  typedef int (*xniack_t)(unsigned irq);
355  
356  typedef unsigned long xnflags_t;
357  
358  typedef atomic_t atomic_counter_t;
359  
360  typedef RTIME xnticks_t;
361  
362  typedef struct xnstat_exectime {
363          xnticks_t start;
364          xnticks_t total;
365  } xnstat_exectime_t;
366  
367  typedef struct xnstat_counter {
368          int counter;
369  } xnstat_counter_t;
370  #define xnstat_counter_inc(c)  ((c)->counter++)
371  
372  typedef struct xnintr {
373  #ifdef CONFIG_RTAI_RTDM_SHIRQ
374      struct xnintr *next;
375  #endif /* CONFIG_RTAI_RTDM_SHIRQ */
376      unsigned unhandled;
377      xnisr_t isr;
378      void *cookie;
379      xnflags_t flags;
380      unsigned irq;
381      xniack_t iack;
382      const char *name;
383      struct {
384  	xnstat_counter_t  hits;
385  	xnstat_exectime_t account;
386  	xnstat_exectime_t sum;
387      } stat[RTAI_NR_CPUS];
388  
389  } xnintr_t;
390  
391  #define xnsched_cpu(sched)  rtai_cpuid()
392  
393  int xnintr_shirq_attach(xnintr_t *intr, void *cookie);
394  int xnintr_shirq_detach(xnintr_t *intr);
395  int xnintr_init (xnintr_t *intr, const char *name, unsigned irq, xnisr_t isr, xniack_t iack, xnflags_t flags);
396  int xnintr_destroy (xnintr_t *intr);
397  int xnintr_attach (xnintr_t *intr, void *cookie);
398  int xnintr_detach (xnintr_t *intr);
399  int xnintr_enable (xnintr_t *intr);
400  int xnintr_disable (xnintr_t *intr);
401  
402  /* Atomic operations are already serializing on x86 */
403  #define xnarch_before_atomic_dec()  smp_mb__before_atomic_dec()
404  #define xnarch_after_atomic_dec()    smp_mb__after_atomic_dec()
405  #define xnarch_before_atomic_inc()  smp_mb__before_atomic_inc()
406  #define xnarch_after_atomic_inc()    smp_mb__after_atomic_inc()
407  
408  #define xnarch_memory_barrier()      smp_mb()
409  #define xnarch_atomic_get(pcounter)  atomic_read(pcounter)
410  #define xnarch_atomic_inc(pcounter)  atomic_inc(pcounter)
411  #define xnarch_atomic_dec(pcounter)  atomic_dec(pcounter)
412  
413  #define   testbits(flags, mask)  ((flags) & (mask))
414  #define __testbits(flags, mask)  ((flags) & (mask))
415  #define __setbits(flags, mask)   do { (flags) |= (mask);  } while(0)
416  #define __clrbits(flags, mask)   do { (flags) &= ~(mask); } while(0)
417  
418  #define xnarch_chain_irq   rt_pend_linux_irq
419  #define xnarch_end_irq     rt_enable_irq
420  
421  #define xnarch_hook_irq(irq, handler, iack, intr) \
422  	rt_request_irq_wack(irq, (void *)handler, intr, 0, (void *)iack);
423  #define xnarch_release_irq(irq) \
424  	rt_release_irq(irq);
425  
426  extern struct rtai_realtime_irq_s rtai_realtime_irq[];
427  //#define xnarch_get_irq_cookie(irq)  (rtai_realtime_irq[irq].cookie)
428  #define xnarch_get_irq_cookie(irq)  (rtai_domain.irqs[irq].cookie)
429  
430  extern unsigned long IsolCpusMask;
431  #define xnarch_set_irq_affinity(irq, nkaffinity) \
432  	rt_assign_irq_to_cpu(irq, IsolCpusMask)
433  
434  // support for RTDM timers
435  
436  struct rtdm_timer_struct {
437          struct rtdm_timer_struct *next, *prev;
438          int priority, cpuid;
439          RTIME firing_time, period;
440          void (*handler)(unsigned long);
441          unsigned long data;
442  #ifdef  CONFIG_RTAI_LONG_TIMED_LIST
443          rb_root_t rbr;
444          rb_node_t rbn;
445  #endif
446  };
447  
448  RTAI_SYSCALL_MODE void rt_timer_remove(struct rtdm_timer_struct *timer);
449  
450  RTAI_SYSCALL_MODE int rt_timer_insert(struct rtdm_timer_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data);
451  
452  typedef struct rtdm_timer_struct xntimer_t;
453  
454  #define XN_INFINITE  (0)
455  
456  /* Timer modes */
457  typedef enum xntmode {
458          XN_RELATIVE,
459          XN_ABSOLUTE,
460          XN_REALTIME
461  } xntmode_t;
462  
463  #define xntbase_ns2ticks(rtdm_tbase, expiry)  nano2count(expiry)
464  
465  static inline void xntimer_init(xntimer_t *timer, void (*handler)(xntimer_t *))
466  {
467          memset(timer, 0, sizeof(struct rtdm_timer_struct));
468          timer->handler = (void *)handler;
469          timer->data    = (unsigned long)timer;
470  	timer->next    =  timer->prev = timer;
471  }
472  
473  #define xntimer_set_name(timer, name)
474  
475  static inline int xntimer_start(xntimer_t *timer, xnticks_t value, xnticks_t interval, int mode)
476  {
477  	return rt_timer_insert(timer, 0, value, interval, timer->handler, (unsigned long)timer);
478  }
479  
480  static inline void xntimer_destroy(xntimer_t *timer)
481  {
482          rt_timer_remove(timer);
483  }
484  
485  static inline void xntimer_stop(xntimer_t *timer)
486  {
487          rt_timer_remove(timer);
488  }
489  
490  // support for use in RTDM usage testing found in RTAI SHOWROOM CVS
491  
492  static inline unsigned long long xnarch_ulldiv(unsigned long long ull, unsigned
493  long uld, unsigned long *r)
494  {
495          unsigned long rem = do_div(ull, uld);
496          if (r) {
497                  *r = rem;
498          }
499          return ull;
500  }
501  
502  // support for RTDM select
503  
504  typedef struct xnholder {
505  	struct xnholder *next;
506  	struct xnholder *prev;
507  } xnholder_t;
508  
509  typedef xnholder_t xnqueue_t;
510  
511  #define DEFINE_XNQUEUE(q) xnqueue_t q = { { &(q), &(q) } }
512  
513  #define inith(holder) \
514  	do { *(holder) = (xnholder_t) { holder, holder }; } while (0)
515  
516  #define initq(queue) \
517  	do { inith(queue); } while (0)
518  
519  #define appendq(queue, holder) \
520  do { \
521  	(holder)->prev = (queue); \
522  	((holder)->next = (queue)->next)->prev = holder; \
523  	(queue)->next = holder; \
524  } while (0)
525  
526  #define removeq(queue, holder) \
527  do { \
528  	(holder)->prev->next = (holder)->next; \
529  	(holder)->next->prev = (holder)->prev; \
530  } while (0)
531  
532  static inline xnholder_t *getheadq(xnqueue_t *queue)
533  {
534  	xnholder_t *holder = queue->next;
535  	return holder == queue ? NULL : holder;
536  }
537  
538  static inline xnholder_t *getq(xnqueue_t *queue)
539  {
540  	xnholder_t *holder;
541  	if ((holder = getheadq(queue))) {
542  		removeq(queue, holder);
543  	}
544  	return holder;
545  }
546  
547  static inline xnholder_t *nextq(xnqueue_t *queue, xnholder_t *holder)
548  {
549  	xnholder_t *nextholder = holder->next;
550  	return nextholder == queue ? NULL : nextholder;
551  }
552  
553  static inline int emptyq_p(xnqueue_t *queue)
554  {
555  	return queue->next == queue;
556  }
557  
558  #include "rtai_taskq.h"
559  
560  #define xnpod_schedule  rt_schedule_readied
561  
562  #define xnthread_t            RT_TASK
563  #define xnpod_current_thread  _rt_whoami
564  #define xnthread_test_info    rt_task_test_taskq_retval
565  
566  #define xnsynch_t                   TASKQ
567  #define xnsynch_init(s, f, p)       rt_taskq_init(s, f)
568  #define xnsynch_destroy             rt_taskq_delete
569  #define xnsynch_wakeup_one_sleeper  rt_taskq_ready_one
570  #define xnsynch_flush               rt_taskq_ready_all
571  static inline void xnsynch_sleep_on(void *synch, xnticks_t timeout, xntmode_t timeout_mode)
572  {
573  	if (timeout == XN_INFINITE) {
574  		rt_taskq_wait(synch);
575  	} else {
576  		rt_taskq_wait_until(synch, timeout_mode == XN_RELATIVE ? rt_get_time() + timeout : timeout);
577  	}
578  }
579  
580  #define XNSYNCH_NOPIP    0
581  #define XNSYNCH_PRIO     TASKQ_PRIO
582  #define XNSYNCH_FIFO     TASKQ_FIFO
583  #define XNSYNCH_RESCHED  1
584  
585  #define rthal_apc_alloc(name, handler, cookie) \
586  	rt_request_srq(nam2num(name), (void *)(handler), NULL);
587  
588  #define rthal_apc_free(apc) \
589  	rt_free_srq((apc))
590  
591  #define __rthal_apc_schedule(apc) \
592  	hal_pend_uncond(apc, rtai_cpuid())
593  
594  #define rthal_apc_schedule(apc) \
595  	rt_pend_linux_srq((apc))
596  
597  #ifdef CONFIG_RTAI_RTDM_SELECT
598  
599  #define SELECT_SIGNAL(select_block, state) \
600  do { \
601  	spl_t flags; \
602          xnlock_get_irqsave(&nklock, flags); \
603          if (xnselect_signal(select_block, state) && state) { \
604                  xnpod_schedule(); \
605  	} \
606  	xnlock_put_irqrestore(&nklock, flags); \
607  } while (0)
608  
609  #else
610  
611  #define SELECT_SIGNAL(select_block, state)  do { } while (0)
612  
613  #endif
614  
615  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
616  
617  #define __WORK_INITIALIZER(n,f,d) {                             \
618          .list   = { &(n).list, &(n).list },                     \
619          .sync = 0,                                              \
620          .routine = (f),                                         \
621          .data = (d),                                            \
622  }
623  
624  #define DECLARE_WORK(n,f,d)             struct tq_struct n = __WORK_INITIALIZER(n, f, d)
625  #define DECLARE_WORK_NODATA(n, f)       DECLARE_WORK(n, f, NULL)
626  #define DECLARE_WORK_FUNC(f)            void f(void *cookie)
627  #define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_WORK(n, f, NULL)
628  
629  #define schedule_delayed_work(work, delay) do {                 \
630  	if (delay) {                                            \
631  		set_current_state(TASK_UNINTERRUPTIBLE);        \
632  		schedule_timeout(delay);                        \
633  	}                                                       \
634  	schedule_task(work);                                    \
635  } while (0)
636  
637  #else
638  
639  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
640  #define DECLARE_WORK_NODATA(f, n)       DECLARE_WORK(f, n, NULL)
641  #define DECLARE_WORK_FUNC(f)            void f(void *cookie)
642  #define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_DELAYED_WORK(n, f, NULL)
643  #else /* >= 2.6.20 */
644  #define DECLARE_WORK_NODATA(f, n)       DECLARE_WORK(f, n)
645  #define DECLARE_WORK_FUNC(f)            void f(struct work_struct *work)
646  #define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_DELAYED_WORK(n, f)
647  #endif /* >= 2.6.20 */
648  
649  #endif
650  
651  #endif /* !_RTAI_XNSTUFF_H */