/ base / malloc / malloc.c
malloc.c
   1  /*!\file malloc.c
   2   * \brief Dynamic memory allocation services.
   3   *
   4   * Copyright (C) 2007 Paolo Mantegazza <mantegazza@aero.polimi.it>.
   5   * Specific following parts as copyrighted/licensed by their authors. 
   6   *
   7   * RTAI is free software; you can redistribute it and/or modify it
   8   * under the terms of the GNU General Public License as published by
   9   * the Free Software Foundation; either version 2 of the License, or
  10   * (at your option) any later version.
  11   *
  12   * RTAI is distributed in the hope that it will be useful, but WITHOUT
  13   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  14   * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
  15   * License for more details.
  16   *
  17   * You should have received a copy of the GNU General Public License
  18   * along with RTAI; if not, write to the Free Software Foundation,
  19   * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20   *
  21   * \ingroup shm
  22   */
  23  
  24  /*!
  25   * @addtogroup shm
  26   *@{*/
  27  
  28  #include <linux/module.h>
  29  #include <linux/kernel.h>
  30  #include <linux/version.h>
  31  #include <linux/slab.h>
  32  
  33  #include <rtai_config.h>
  34  #include <asm/rtai.h>
  35  #include <rtai_malloc.h>
  36  #include <rtai_shm.h>
  37  
  38  int rtai_global_heap_size = RTHEAP_GLOBALSZ;
  39  RTAI_MODULE_PARM(rtai_global_heap_size, int);
  40  
  41  void *rtai_global_heap_adr = NULL;
  42  
  43  rtheap_t rtai_global_heap;	/* Global system heap */
  44  
  45  static void *alloc_extent(u_long size, int suprt)
  46  {
  47  	caddr_t p;
  48  	if (!suprt) {
  49  		if ((p = (caddr_t)vmalloc(size))) {
  50  			unsigned long _p = (unsigned long)p;
  51  //			printk("RTAI[malloc]: vmalloced extent %p, size %lu.\n", p, size);
  52  			for (; size > 0; size -= PAGE_SIZE, _p += PAGE_SIZE) {
  53  //				mem_map_reserve(virt_to_page(UVIRT_TO_KVA(_p)));
  54  				SetPageReserved(vmalloc_to_page((void *)_p));
  55  			}
  56  		}
  57  	} else {
  58  		if (size <= KMALLOC_LIMIT) {
  59  			p = kmalloc(size, suprt);
  60  		} else {
  61  			p = (void*)__get_free_pages(suprt, get_order(size));
  62  		}
  63  //		p = (caddr_t)kmalloc(size, suprt);
  64  //		printk("RTAI[malloc]: kmalloced extent %p, size %lu.\n", p, size);
  65  	}
  66  	if (p) {
  67  		memset(p, 0, size);
  68  	}
  69  	return p;
  70  }
  71  
  72  static void free_extent(void *p, u_long size, int suprt)
  73  {
  74  	if (!suprt) {
  75  		unsigned long _p = (unsigned long)p;
  76  
  77  //		printk("RTAI[malloc]: vfreed extent %p, size %lu.\n", p, size);
  78  		for (; size > 0; size -= PAGE_SIZE, _p += PAGE_SIZE) {
  79  //			mem_map_unreserve(virt_to_page(UVIRT_TO_KVA(_p)));
  80  			ClearPageReserved(vmalloc_to_page((void *)_p));
  81  		}
  82  		vfree(p);
  83  	} else {
  84  //		printk("RTAI[malloc]: kfreed extent %p, size %lu.\n", p, size);
  85  //		kfree(p);
  86  		if (size <= KMALLOC_LIMIT) {
  87  			kfree(p);
  88  		} else {
  89  			free_pages((unsigned long)p, get_order(size));
  90  		}
  91  	}
  92  }
  93  
  94  #ifndef CONFIG_RTAI_USE_TLSF
  95  #define CONFIG_RTAI_USE_TLSF 0
  96  #endif
  97  
  98  #if CONFIG_RTAI_USE_TLSF
  99  
 100  static size_t init_memory_pool(size_t mem_pool_size, void *mem_pool);
 101  static inline void *malloc_ex(size_t size, void *mem_pool);
 102  static inline void free_ex(void *ptr, void *mem_pool);
 103  
 104  static void init_extent(rtheap_t *heap, rtextent_t *extent)
 105  {
 106  	INIT_LIST_HEAD(&extent->link);
 107  	extent->membase = (caddr_t)extent + sizeof(rtextent_t);
 108  	extent->memlim  = (caddr_t)extent + heap->extentsize;
 109  }
 110  
 111  int rtheap_init(rtheap_t *heap, void *heapaddr, u_long heapsize, u_long pagesize, int suprt)
 112  {
 113  	rtextent_t *extent;
 114  
 115  	INIT_LIST_HEAD(&heap->extents);
 116  	spin_lock_init(&heap->lock);
 117  
 118  	heap->extentsize = heapsize;
 119  	if (!heapaddr && suprt) {
 120  		if (heapsize <= KMALLOC_LIMIT || (heapaddr = alloc_extent(heapsize, suprt)) == NULL) {
 121  			heap->extentsize = KMALLOC_LIMIT;
 122  			heapaddr = NULL;
 123  		}
 124  	}
 125  
 126  	if (heapaddr) {
 127  		extent = (rtextent_t *)heapaddr;
 128  		init_extent(heap, extent);
 129  		list_add_tail(&extent->link, &heap->extents);
 130  		return init_memory_pool(heapsize - sizeof(rtextent_t), heapaddr + sizeof(rtextent_t)) < 0 ? RTHEAP_NOMEM : 0;
 131  	} else {
 132  		u_long init_size = 0;
 133  		while (init_size < heapsize) {
 134  			if (!(extent = (rtextent_t *)alloc_extent(heap->extentsize, suprt)) || init_memory_pool(heap->extentsize - sizeof(rtextent_t), (void *)extent + sizeof(rtextent_t)) < 0) {
 135  				struct list_head *holder, *nholder;
 136  				list_for_each_safe(holder, nholder, &heap->extents) {
 137  					extent = list_entry(holder, rtextent_t, link);
 138  					free_extent(extent, heap->extentsize, suprt);
 139  				}
 140  				return RTHEAP_NOMEM;
 141  			}
 142  			init_extent(heap, extent);
 143  			list_add_tail(&extent->link, &heap->extents);
 144  			init_size += heap->extentsize;
 145  		}
 146  	}
 147  	return 0;
 148  }
 149  
 150  void rtheap_destroy(rtheap_t *heap, int suprt)
 151  {
 152  	struct list_head *holder, *nholder;
 153  	list_for_each_safe(holder, nholder, &heap->extents) {
 154  		free_extent(list_entry(holder, rtextent_t, link), heap->extentsize, suprt);
 155  	}
 156  }
 157  
 158  void *rtheap_alloc(rtheap_t *heap, u_long size, int mode)
 159  {
 160  	void *adr = NULL;
 161  	struct list_head *holder;
 162  	unsigned long flags;
 163  
 164  	if (!size) {
 165  		return NULL;
 166  	}
 167  
 168  	flags = rt_spin_lock_irqsave(&heap->lock);
 169  	list_for_each(holder, &heap->extents) {
 170  		if ((adr = malloc_ex(size, list_entry(holder, rtextent_t, link)->membase)) != NULL) {
 171  			break;
 172  		}
 173  	}
 174  	rt_spin_unlock_irqrestore(flags, &heap->lock);
 175  	return adr;
 176  }
 177  
 178  int rtheap_free(rtheap_t *heap, void *block)
 179  {
 180  	unsigned long flags;
 181  	struct list_head *holder;
 182  
 183  	flags = rt_spin_lock_irqsave(&heap->lock);
 184  	list_for_each(holder, &heap->extents) {
 185  		rtextent_t *extent;
 186  		extent = list_entry(holder, rtextent_t, link);
 187  		if ((caddr_t)block < extent->memlim && (caddr_t)block >= extent->membase) {
 188  			free_ex(block, extent->membase);
 189  			break;
 190  		}
 191  	}
 192  	rt_spin_unlock_irqrestore(flags, &heap->lock);
 193  	return 0;
 194  }
 195  
 196  /******************************** BEGIN TLSF ********************************/
 197  
 198  /* 
 199   * Two Levels Segregate Fit memory allocator (TLSF)
 200   * Version 2.4.2
 201   *
 202   * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
 203   *
 204   * Thanks to Ismael Ripoll for his suggestions and reviews
 205   *
 206   * Copyright (C) 2008, 2007, 2006, 2005, 2004
 207   *
 208   * This code is released using a dual license strategy: GPL/LGPL
 209   * You can choose the licence that better fits your requirements.
 210   *
 211   * Released under the terms of the GNU General Public License Version 2.0
 212   * Released under the terms of the GNU Lesser General Public License Version 2.1
 213   *
 214   * Adaption to RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>,
 215   * with TLSF downloaded from: http://rtportal.upv.es/rtmalloc/.
 216   *
 217   */
 218  
 219  /*
 220   * Code contributions:
 221   *
 222   * (Jul 28 2007)  Herman ten Brugge <hermantenbrugge@home.nl>:
 223   *
 224   * - Add 64 bit support. It now runs on x86_64 and solaris64.
 225   * - I also tested this on vxworks/32and solaris/32 and i386/32 processors.
 226   * - Remove assembly code. I could not measure any performance difference 
 227   *   on my core2 processor. This also makes the code more portable.
 228   * - Moved defines/typedefs from tlsf.h to tlsf.c
 229   * - Changed MIN_BLOCK_SIZE to sizeof (free_ptr_t) and BHDR_OVERHEAD to 
 230   *   (sizeof (bhdr_t) - MIN_BLOCK_SIZE). This does not change the fact 
 231   *    that the minumum size is still sizeof 
 232   *   (bhdr_t).
 233   * - Changed all C++ comment style to C style. (// -> /.* ... *./)
 234   * - Used ls_bit instead of ffs and ms_bit instead of fls. I did this to 
 235   *   avoid confusion with the standard ffs function which returns 
 236   *   different values.
 237   * - Created set_bit/clear_bit fuctions because they are not present 
 238   *   on x86_64.
 239   * - Added locking support + extra file target.h to show how to use it.
 240   * - Added get_used_size function (REMOVED in 2.4)
 241   * - Added rtl_realloc and rtl_calloc function
 242   * - Implemented realloc clever support.
 243   * - Added some test code in the example directory.
 244   *        
 245   *
 246   * (Oct 23 2006) Adam Scislowicz: 
 247   *
 248   * - Support for ARMv5 implemented
 249   *
 250   */
 251  
 252  /*#define USE_SBRK        (0) */
 253  /*#define USE_MMAP        (0) */
 254  
 255  #ifndef TLSF_USE_LOCKS
 256  #define	TLSF_USE_LOCKS 	(0)
 257  #endif
 258  
 259  #ifndef TLSF_STATISTIC
 260  #define	TLSF_STATISTIC 	(1)
 261  #endif
 262  
 263  #ifndef USE_MMAP
 264  #define	USE_MMAP 	(0)
 265  #endif
 266  
 267  #ifndef USE_SBRK
 268  #define	USE_SBRK 	(0)
 269  #endif
 270  
 271  
 272  #if TLSF_USE_LOCKS
 273  #include "target.h"
 274  #else
 275  #define TLSF_CREATE_LOCK(_unused_)   do{}while(0)
 276  #define TLSF_DESTROY_LOCK(_unused_)  do{}while(0) 
 277  #define TLSF_ACQUIRE_LOCK(_unused_)  do{}while(0)
 278  #define TLSF_RELEASE_LOCK(_unused_)  do{}while(0)
 279  #endif
 280  
 281  #if TLSF_STATISTIC
 282  #define	TLSF_ADD_SIZE(tlsf, b) do {									\
 283  		tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;	\
 284  		if (tlsf->used_size > tlsf->max_size) {						\
 285  			tlsf->max_size = tlsf->used_size;						\
 286  		} 										\
 287  	} while(0)
 288  
 289  #define	TLSF_REMOVE_SIZE(tlsf, b) do {								\
 290  		tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;	\
 291  	} while(0)
 292  #else
 293  #define	TLSF_ADD_SIZE(tlsf, b)	     do{}while(0)
 294  #define	TLSF_REMOVE_SIZE(tlsf, b)    do{}while(0)
 295  #endif
 296  
 297  #if USE_MMAP || USE_SBRK
 298  #include <unistd.h>
 299  #endif
 300  
 301  #if USE_MMAP
 302  #include <sys/mman.h>
 303  #endif
 304  
 305  #if !defined(__GNUC__)
 306  #ifndef __inline__
 307  #define __inline__
 308  #endif
 309  #endif
 310  
 311  /* The  debug functions  only can  be used  when _DEBUG_TLSF_  is set. */
 312  #ifndef _DEBUG_TLSF_
 313  #define _DEBUG_TLSF_  (0)
 314  #endif
 315  
 316  /*************************************************************************/
 317  /* Definition of the structures used by TLSF */
 318  
 319  
 320  /* Some IMPORTANT TLSF parameters */
 321  /* Unlike the preview TLSF versions, now they are statics */
 322  #define BLOCK_ALIGN (sizeof(void *) * 2)
 323  
 324  #define MAX_FLI		(30)
 325  #define MAX_LOG2_SLI	(5)
 326  #define MAX_SLI		(1 << MAX_LOG2_SLI)     /* MAX_SLI = 2^MAX_LOG2_SLI */
 327  
 328  #define FLI_OFFSET	(6)     /* tlsf structure just will manage blocks bigger */
 329  /* than 128 bytes */
 330  #define SMALL_BLOCK	(128)
 331  #define REAL_FLI	(MAX_FLI - FLI_OFFSET)
 332  #define MIN_BLOCK_SIZE	(sizeof (free_ptr_t))
 333  #define BHDR_OVERHEAD	(sizeof (bhdr_t) - MIN_BLOCK_SIZE)
 334  #define TLSF_SIGNATURE	(0x2A59FA59)
 335  
 336  #define	PTR_MASK	(sizeof(void *) - 1)
 337  #undef BLOCK_SIZE
 338  #define BLOCK_SIZE	(0xFFFFFFFF - PTR_MASK)
 339  
 340  #define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r)))
 341  #define	MEM_ALIGN		  ((BLOCK_ALIGN) - 1)
 342  #define ROUNDUP_SIZE(_r)          (((_r) + MEM_ALIGN) & ~MEM_ALIGN)
 343  #define ROUNDDOWN_SIZE(_r)        ((_r) & ~MEM_ALIGN)
 344  #define ROUNDUP(_x, _v)           ((((~(_x)) + 1) & ((_v)-1)) + (_x))
 345  
 346  #define BLOCK_STATE	(0x1)
 347  #define PREV_STATE	(0x2)
 348  
 349  /* bit 0 of the block size */
 350  #define FREE_BLOCK	(0x1)
 351  #define USED_BLOCK	(0x0)
 352  
 353  /* bit 1 of the block size */
 354  #define PREV_FREE	(0x2)
 355  #define PREV_USED	(0x0)
 356  
 357  
 358  #define DEFAULT_AREA_SIZE (1024*10)
 359  
 360  #if USE_MMAP
 361  #define PAGE_SIZE (getpagesize())
 362  #endif
 363  
 364  #define PRINT_MSG(fmt, args...) rt_printk(fmt, ## args)
 365  #define ERROR_MSG(fmt, args...) rt_printk(fmt, ## args)
 366  
 367  typedef unsigned int u32_t;     /* NOTE: Make sure that this type is 4 bytes long on your computer */
 368  typedef unsigned char u8_t;     /* NOTE: Make sure that this type is 1 byte on your computer */
 369  
 370  typedef struct free_ptr_struct {
 371      struct bhdr_struct *prev;
 372      struct bhdr_struct *next;
 373  } free_ptr_t;
 374  
 375  typedef struct bhdr_struct {
 376      /* This pointer is just valid if the first bit of size is set */
 377      struct bhdr_struct *prev_hdr;
 378      /* The size is stored in bytes */
 379      size_t size;                /* bit 0 indicates whether the block is used and */
 380      /* bit 1 allows to know whether the previous block is free */
 381      union {
 382          struct free_ptr_struct free_ptr;
 383          u8_t buffer[1];         /*sizeof(struct free_ptr_struct)]; */
 384      } ptr;
 385  } bhdr_t;
 386  
 387  /* This structure is embedded at the beginning of each area, giving us
 388   * enough information to cope with a set of areas */
 389  
 390  typedef struct area_info_struct {
 391      bhdr_t *end;
 392      struct area_info_struct *next;
 393  } area_info_t;
 394  
 395  typedef struct TLSF_struct {
 396      /* the TLSF's structure signature */
 397      u32_t tlsf_signature;
 398  
 399  #if TLSF_USE_LOCKS
 400      TLSF_MLOCK_T lock;
 401  #endif
 402  
 403  #if TLSF_STATISTIC
 404      /* These can not be calculated outside tlsf because we
 405       * do not know the sizes when freeing/reallocing memory. */
 406      size_t used_size;
 407      size_t max_size;
 408  #endif
 409  
 410      /* A linked list holding all the existing areas */
 411      area_info_t *area_head;
 412  
 413      /* the first-level bitmap */
 414      /* This array should have a size of REAL_FLI bits */
 415      u32_t fl_bitmap;
 416  
 417      /* the second-level bitmap */
 418      u32_t sl_bitmap[REAL_FLI];
 419  
 420      bhdr_t *matrix[REAL_FLI][MAX_SLI];
 421  } tlsf_t;
 422  
 423  
 424  /******************************************************************/
 425  /**************     Helping functions    **************************/
 426  /******************************************************************/
 427  static __inline__ void tlsf_set_bit(int nr, u32_t * addr);
 428  static __inline__ void tlsf_clear_bit(int nr, u32_t * addr);
 429  static __inline__ int ls_bit(int x);
 430  static __inline__ int ms_bit(int x);
 431  static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl);
 432  static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl);
 433  static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl);
 434  static __inline__ bhdr_t *process_area(void *area, size_t size);
 435  #if USE_SBRK || USE_MMAP
 436  static __inline__ void *get_new_area(size_t * size);
 437  #endif
 438  
 439  static const int table[] = {
 440      -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
 441      4, 4,
 442      4, 4, 4, 4, 4, 4, 4,
 443      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
 444      5,
 445      5, 5, 5, 5, 5, 5, 5,
 446      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
 447      6,
 448      6, 6, 6, 6, 6, 6, 6,
 449      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
 450      6,
 451      6, 6, 6, 6, 6, 6, 6,
 452      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
 453      7,
 454      7, 7, 7, 7, 7, 7, 7,
 455      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
 456      7,
 457      7, 7, 7, 7, 7, 7, 7,
 458      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
 459      7,
 460      7, 7, 7, 7, 7, 7, 7,
 461      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
 462      7,
 463      7, 7, 7, 7, 7, 7, 7
 464  };
 465  
 466  static __inline__ int ls_bit(int i)
 467  {
 468      unsigned int a;
 469      unsigned int x = i & -i;
 470  
 471      a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
 472      return table[x >> a] + a;
 473  }
 474  
 475  static __inline__ int ms_bit(int i)
 476  {
 477      unsigned int a;
 478      unsigned int x = (unsigned int) i;
 479  
 480      a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
 481      return table[x >> a] + a;
 482  }
 483  
 484  static __inline__ void tlsf_set_bit(int nr, u32_t * addr)
 485  {
 486      addr[nr >> 5] |= 1 << (nr & 0x1f);
 487  }
 488  
 489  static __inline__ void tlsf_clear_bit(int nr, u32_t * addr)
 490  {
 491      addr[nr >> 5] &= ~(1 << (nr & 0x1f));
 492  }
 493  
 494  static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl)
 495  {
 496      int _t;
 497  
 498      if (*_r < SMALL_BLOCK) {
 499          *_fl = 0;
 500          *_sl = *_r / (SMALL_BLOCK / MAX_SLI);
 501      } else {
 502          _t = (1 << (ms_bit(*_r) - MAX_LOG2_SLI)) - 1;
 503          *_r = *_r + _t;
 504          *_fl = ms_bit(*_r);
 505          *_sl = (*_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
 506          *_fl -= FLI_OFFSET;
 507          /*if ((*_fl -= FLI_OFFSET) < 0) // FL wil be always >0!
 508           *_fl = *_sl = 0;
 509           */
 510          *_r &= ~_t;
 511      }
 512  }
 513  
 514  static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl)
 515  {
 516      if (_r < SMALL_BLOCK) {
 517          *_fl = 0;
 518          *_sl = _r / (SMALL_BLOCK / MAX_SLI);
 519      } else {
 520          *_fl = ms_bit(_r);
 521          *_sl = (_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
 522          *_fl -= FLI_OFFSET;
 523      }
 524  }
 525  
 526  
 527  static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl)
 528  {
 529      u32_t _tmp = _tlsf->sl_bitmap[*_fl] & (~0 << *_sl);
 530      bhdr_t *_b = NULL;
 531  
 532      if (_tmp) {
 533          *_sl = ls_bit(_tmp);
 534          _b = _tlsf->matrix[*_fl][*_sl];
 535      } else {
 536          *_fl = ls_bit(_tlsf->fl_bitmap & (~0 << (*_fl + 1)));
 537          if (*_fl > 0) {         /* likely */
 538              *_sl = ls_bit(_tlsf->sl_bitmap[*_fl]);
 539              _b = _tlsf->matrix[*_fl][*_sl];
 540          }
 541      }
 542      return _b;
 543  }
 544  
 545  
 546  #define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) {					\
 547  		_tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next;		\
 548  		if (_tlsf -> matrix[_fl][_sl])								\
 549  			_tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL;	\
 550  		else {														\
 551  			tlsf_clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]);				\
 552  			if (!_tlsf -> sl_bitmap [_fl])							\
 553  				tlsf_clear_bit (_fl, &_tlsf -> fl_bitmap);				\
 554  		}															\
 555  		_b -> ptr.free_ptr = (free_ptr_t) {NULL, NULL};				\
 556  	}
 557  
 558  
 559  #define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) {							\
 560  		if (_b -> ptr.free_ptr.next)									\
 561  			_b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \
 562  		if (_b -> ptr.free_ptr.prev)									\
 563  			_b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \
 564  		if (_tlsf -> matrix [_fl][_sl] == _b) {							\
 565  			_tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next;		\
 566  			if (!_tlsf -> matrix [_fl][_sl]) {							\
 567  				tlsf_clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]);				\
 568  				if (!_tlsf -> sl_bitmap [_fl])							\
 569  					tlsf_clear_bit (_fl, &_tlsf -> fl_bitmap);				\
 570  			}															\
 571  		}																\
 572  		_b -> ptr.free_ptr = (free_ptr_t) {NULL, NULL};					\
 573  	}
 574  
 575  #define INSERT_BLOCK(_b, _tlsf, _fl, _sl) {								\
 576  		_b -> ptr.free_ptr = (free_ptr_t) {NULL, _tlsf -> matrix [_fl][_sl]}; \
 577  		if (_tlsf -> matrix [_fl][_sl])									\
 578  			_tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b;		\
 579  		_tlsf -> matrix [_fl][_sl] = _b;								\
 580  		tlsf_set_bit (_sl, &_tlsf -> sl_bitmap [_fl]);						\
 581  		tlsf_set_bit (_fl, &_tlsf -> fl_bitmap);								\
 582  	}
 583  
 584  #if USE_SBRK || USE_MMAP
 585  static __inline__ void *get_new_area(size_t * size) 
 586  {
 587      void *area;
 588  
 589  #if USE_SBRK
 590      area = sbrk(0);
 591      if (sbrk(*size) != ((void *) ~0))
 592          return area;
 593  #endif
 594  
 595  #if USE_MMAP
 596      *size = ROUNDUP(*size, PAGE_SIZE);
 597      if ((area = mmap(0, *size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) != MAP_FAILED)
 598          return area;
 599  #endif
 600      return ((void *) ~0);
 601  }
 602  #endif
 603  
 604  static __inline__ bhdr_t *process_area(void *area, size_t size)
 605  {
 606      bhdr_t *b, *lb, *ib;
 607      area_info_t *ai;
 608  
 609      ib = (bhdr_t *) area;
 610      ib->size =
 611          (sizeof(area_info_t) <
 612           MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(sizeof(area_info_t)) | USED_BLOCK | PREV_USED;
 613      b = (bhdr_t *) GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
 614      b->size = ROUNDDOWN_SIZE(size - 3 * BHDR_OVERHEAD - (ib->size & BLOCK_SIZE)) | USED_BLOCK | PREV_USED;
 615      b->ptr.free_ptr.prev = b->ptr.free_ptr.next = 0;
 616      lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
 617      lb->prev_hdr = b;
 618      lb->size = 0 | USED_BLOCK | PREV_FREE;
 619      ai = (area_info_t *) ib->ptr.buffer;
 620      ai->next = 0;
 621      ai->end = lb;
 622      return ib;
 623  }
 624  
 625  /******************************************************************/
 626  /******************** Begin of the allocator code *****************/
 627  /******************************************************************/
 628  
 629  /******************************************************************/
 630  size_t init_memory_pool(size_t mem_pool_size, void *mem_pool)
 631  {
 632  /******************************************************************/
 633      char *mp = NULL;
 634      tlsf_t *tlsf;
 635      bhdr_t *b, *ib;
 636  
 637      if (!mem_pool || !mem_pool_size || mem_pool_size < sizeof(tlsf_t) + BHDR_OVERHEAD * 8) {
 638          ERROR_MSG("init_memory_pool (): memory_pool invalid\n");
 639          return -1;
 640      }
 641  
 642      if (((unsigned long) mem_pool & PTR_MASK)) {
 643          ERROR_MSG("init_memory_pool (): mem_pool must be aligned to a word\n");
 644          return -1;
 645      }
 646      tlsf = (tlsf_t *) mem_pool;
 647      /* Check if already initialised */
 648      if (tlsf->tlsf_signature == TLSF_SIGNATURE) {
 649          mp = mem_pool;
 650          b = GET_NEXT_BLOCK(mp, ROUNDUP_SIZE(sizeof(tlsf_t)));
 651          return b->size & BLOCK_SIZE;
 652      }
 653  
 654      mp = mem_pool;
 655  
 656      /* Zeroing the memory pool */
 657      memset(mem_pool, 0, sizeof(tlsf_t));
 658  
 659      tlsf->tlsf_signature = TLSF_SIGNATURE;
 660  
 661      TLSF_CREATE_LOCK(&tlsf->lock);
 662  
 663      ib = process_area(GET_NEXT_BLOCK
 664                        (mem_pool, ROUNDUP_SIZE(sizeof(tlsf_t))), ROUNDDOWN_SIZE(mem_pool_size - sizeof(tlsf_t)));
 665      b = GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
 666      free_ex(b->ptr.buffer, tlsf);
 667      tlsf->area_head = (area_info_t *) ib->ptr.buffer;
 668  
 669  #if TLSF_STATISTIC
 670      tlsf->used_size = mem_pool_size - (b->size & BLOCK_SIZE);
 671      tlsf->max_size = tlsf->used_size;
 672  #endif
 673  
 674      return (b->size & BLOCK_SIZE);
 675  }
 676  
 677  /******************************************************************/
 678  void *malloc_ex(size_t size, void *mem_pool)
 679  {
 680  /******************************************************************/
 681      tlsf_t *tlsf = (tlsf_t *) mem_pool;
 682      bhdr_t *b, *b2, *next_b;
 683      int fl, sl;
 684      size_t tmp_size;
 685  
 686      size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
 687  
 688      /* Rounding up the requested size and calculating fl and sl */
 689      MAPPING_SEARCH(&size, &fl, &sl);
 690  
 691      /* Searching a free block, recall that this function changes the values of fl and sl,
 692         so they are not longer valid when the function fails */
 693      b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
 694  #if USE_MMAP || USE_SBRK
 695      if (!b) {
 696          size_t area_size;
 697          void *area;
 698          /* Growing the pool size when needed */
 699          area_size = size + BHDR_OVERHEAD * 8;   /* size plus enough room for the requered headers. */
 700          area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
 701          area = get_new_area(&area_size);        /* Call sbrk or mmap */
 702          if (area == ((void *) ~0))
 703              return NULL;        /* Not enough system memory */
 704          add_new_area(area, area_size, mem_pool);
 705          /* Rounding up the requested size and calculating fl and sl */
 706          MAPPING_SEARCH(&size, &fl, &sl);
 707          /* Searching a free block */
 708          b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
 709      }
 710  #endif
 711      if (!b)
 712          return NULL;            /* Not found */
 713  
 714      EXTRACT_BLOCK_HDR(b, tlsf, fl, sl);
 715  
 716      /*-- found: */
 717      next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
 718      /* Should the block be split? */
 719      tmp_size = (b->size & BLOCK_SIZE) - size;
 720      if (tmp_size >= sizeof(bhdr_t)) {
 721          tmp_size -= BHDR_OVERHEAD;
 722          b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
 723          b2->size = tmp_size | FREE_BLOCK | PREV_USED;
 724          next_b->prev_hdr = b2;
 725          MAPPING_INSERT(tmp_size, &fl, &sl);
 726          INSERT_BLOCK(b2, tlsf, fl, sl);
 727  
 728          b->size = size | (b->size & PREV_STATE);
 729      } else {
 730          next_b->size &= (~PREV_FREE);
 731          b->size &= (~FREE_BLOCK);       /* Now it's used */
 732      }
 733  
 734      TLSF_ADD_SIZE(tlsf, b);
 735  
 736      return (void *) b->ptr.buffer;
 737  }
 738  
 739  /******************************************************************/
 740  void free_ex(void *ptr, void *mem_pool)
 741  {
 742  /******************************************************************/
 743      tlsf_t *tlsf = (tlsf_t *) mem_pool;
 744      bhdr_t *b, *tmp_b;
 745      int fl = 0, sl = 0;
 746  
 747      if (!ptr) {
 748          return;
 749      }
 750      b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
 751      b->size |= FREE_BLOCK;
 752  
 753      TLSF_REMOVE_SIZE(tlsf, b);
 754  
 755      b->ptr.free_ptr = (free_ptr_t) { NULL, NULL};
 756      tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
 757      if (tmp_b->size & FREE_BLOCK) {
 758          MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
 759          EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
 760          b->size += (tmp_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
 761      }
 762      if (b->size & PREV_FREE) {
 763          tmp_b = b->prev_hdr;
 764          MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
 765          EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
 766          tmp_b->size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
 767          b = tmp_b;
 768      }
 769      MAPPING_INSERT(b->size & BLOCK_SIZE, &fl, &sl);
 770      INSERT_BLOCK(b, tlsf, fl, sl);
 771  
 772      tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
 773      tmp_b->size |= PREV_FREE;
 774      tmp_b->prev_hdr = b;
 775  }
 776  
 777  unsigned long tlsf_get_used_size(rtheap_t *heap) {
 778  #if TLSF_STATISTIC
 779          struct list_head *holder;
 780          list_for_each(holder, &heap->extents) { break; }
 781  	return ((tlsf_t *)(list_entry(holder, rtextent_t, link)->membase))->used_size;
 782  #else
 783  	return 0;
 784  #endif
 785  }
 786  
 787  /******************************** END TLSF ********************************/
 788  
 789  #else /* !CONFIG_RTAI_USE_TLSF */
 790  
 791  /***************************** BEGIN BSD GPMA ********************************/
 792  
 793  /*!\file malloc.c
 794   * \brief Dynamic memory allocation services.
 795   *
 796   * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
 797   *
 798   * RTAI is free software; you can redistribute it and/or modify it
 799   * under the terms of the GNU General Public License as published by
 800   * the Free Software Foundation; either version 2 of the License, or
 801   * (at your option) any later version.
 802   *
 803   * RTAI is distributed in the hope that it will be useful, but WITHOUT
 804   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 805   * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
 806   * License for more details.
 807   *
 808   * You should have received a copy of the GNU General Public License
 809   * along with RTAI; if not, write to the Free Software Foundation,
 810   * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 811   *
 812   * Dynamic memory allocation services lifted and adapted from the
 813   * Xenomai nucleus.
 814   *
 815   * This file implements the RTAI dynamic memory allocator based on the
 816   * algorithm described in "Design of a General Purpose Memory
 817   * Allocator for the 4.3BSD Unix Kernel" by Marshall K. McKusick and
 818   * Michael J. Karels.
 819   *
 820   * \ingroup shm
 821   */
 822  
 823  /*!
 824   * @addtogroup shm
 825   *@{*/
 826  
 827  static void init_extent (rtheap_t *heap, rtextent_t *extent)
 828  {
 829  	caddr_t freepage;
 830  	int n, lastpgnum;
 831  
 832  	INIT_LIST_HEAD(&extent->link);
 833  
 834  	/* The page area starts right after the (aligned) header. */
 835  	extent->membase = (caddr_t)extent + heap->hdrsize;
 836  	lastpgnum = heap->npages - 1;
 837  
 838  	/* Mark each page as free in the page map. */
 839  	for (n = 0, freepage = extent->membase; n < lastpgnum; n++, freepage += heap->pagesize) {
 840  		*((caddr_t *)freepage) = freepage + heap->pagesize;
 841  		extent->pagemap[n] = RTHEAP_PFREE;
 842  	}
 843  	*((caddr_t *)freepage) = NULL;
 844  	extent->pagemap[lastpgnum] = RTHEAP_PFREE;
 845  	extent->memlim = freepage + heap->pagesize;
 846  
 847  	/* The first page starts the free list of a new extent. */
 848  	extent->freelist = extent->membase;
 849  }
 850  
 851  /*! 
 852   * \fn int rtheap_init(rtheap_t *heap,
 853                         void *heapaddr,
 854  		       u_long heapsize,
 855  		       u_long pagesize,
 856  		       int suprt);
 857   * \brief Initialize a memory heap.
 858   *
 859   * Initializes a memory heap suitable for dynamic memory allocation
 860   * requests.  The heap manager can operate in two modes, whether
 861   * time-bounded if the heap storage area and size are statically
 862   * defined at initialization time, or dynamically extendable at the
 863   * expense of a less deterministic behaviour.
 864   *
 865   * @param heap The address of a heap descriptor the memory manager
 866   * will use to store the allocation data.  This descriptor must always
 867   * be valid while the heap is active therefore it must be allocated in
 868   * permanent memory.
 869   *
 870   * @param heapaddr The address of a statically-defined heap storage
 871   * area. If this parameter is non-zero, all allocations will be made
 872   * from the given area in fully time-bounded mode. In such a case, the
 873   * heap is non-extendable. If a null address is passed, the heap
 874   * manager will attempt to extend the heap each time a memory
 875   * starvation is encountered. In the latter case, the heap manager
 876   * will request additional chunks of core memory to Linux when needed,
 877   * voiding the real-time guarantee for the caller.
 878   *
 879   * @param heapsize If heapaddr is non-zero, heapsize gives the size in
 880   * bytes of the statically-defined storage area. Otherwise, heapsize
 881   * defines the standard length of each extent that will be requested
 882   * to Linux when a memory starvation is encountered for the heap.
 883   * heapsize must be a multiple of pagesize and lower than 16
 884   * Mbytes. Depending on the Linux allocation service used, requests
 885   * for extent memory might be limited in size. For instance, heapsize
 886   * must be lower than 128Kb for kmalloc()-based allocations. In the
 887   * current implementation, heapsize must be large enough to contain an
 888   * internal header. The following formula gives the size of this
 889   * header: hdrsize = (sizeof(rtextent_t) + ((heapsize -
 890   * sizeof(rtextent_t))) / (pagesize + 1) + 15) & ~15;
 891   *
 892   * @param pagesize The size in bytes of the fundamental memory page
 893   * which will be used to subdivide the heap internally. Choosing the
 894   * right page size is important regarding performance and memory
 895   * fragmentation issues, so it might be a good idea to take a look at
 896   * http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf to pick the
 897   * best one for your needs. In the current implementation, pagesize
 898   * must be a power of two in the range [ 8 .. 32768] inclusive.
 899   *
 900   * @return 0 is returned upon success, or one of the following
 901   * error codes:
 902   * - RTHEAP_PARAM is returned whenever a parameter is invalid.
 903   * - RTHEAP_NOMEM is returned if no initial extent can be allocated
 904   * for a dynamically extendable heap (i.e. heapaddr == NULL).
 905   *
 906   * Side-effect: This routine does not call the rescheduling procedure.
 907   *
 908   * Context: This routine must be called on behalf of a thread context.
 909   */
 910  
 911  int rtheap_init (rtheap_t *heap, void *heapaddr, u_long heapsize, u_long pagesize, int suprt)
 912  {
 913  	u_long hdrsize, pmapsize, shiftsize, pageshift;
 914  	rtextent_t *extent;
 915  	int n;
 916  
 917  	/*
 918  	 * Perform some parametrical checks first.
 919  	 * Constraints are:
 920  	 * PAGESIZE must be >= 2 ** MINLOG2.
 921  	 * PAGESIZE must be <= 2 ** MAXLOG2.
 922  	 * PAGESIZE must be a power of 2.
 923  	 * HEAPSIZE must be large enough to contain the static part of an
 924  	 * extent header.
 925  	 * HEAPSIZE must be a multiple of PAGESIZE.
 926  	 * HEAPSIZE must be lower than RTHEAP_MAXEXTSZ.
 927  	 */
 928  	if ((pagesize < (1 << RTHEAP_MINLOG2)) ||
 929  	    (pagesize > (1 << RTHEAP_MAXLOG2)) ||
 930  	    (pagesize & (pagesize - 1)) != 0 ||
 931  	    heapsize <= sizeof(rtextent_t) ||
 932  	    heapsize > RTHEAP_MAXEXTSZ ||
 933  	    (heapsize & (pagesize - 1)) != 0) {
 934  		return RTHEAP_PARAM;
 935  	}
 936  
 937  	/* Determine the page map overhead inside the given extent
 938  	   size. We need to reserve a byte in a page map for each page
 939  	   which is addressable into this extent. The page map is itself
 940  	   stored in the extent space, right after the static part of its
 941  	   header, and before the first allocatable page. */
 942  	pmapsize = ((heapsize - sizeof(rtextent_t)) * sizeof(u_char)) / (pagesize + sizeof(u_char));
 943  
 944  	/* The overall header size is: static_part + page_map rounded to
 945  	   the minimum alignment size. */
 946  	hdrsize = (sizeof(rtextent_t) + pmapsize + RTHEAP_MINALIGNSZ - 1) & ~(RTHEAP_MINALIGNSZ - 1);
 947  
 948  	/* An extent must contain at least two addressable pages to cope
 949  	   with allocation sizes between pagesize and 2 * pagesize. */
 950  	if (hdrsize + 2 * pagesize > heapsize) {
 951  		return RTHEAP_PARAM;
 952  	}
 953  
 954  	/* Compute the page shiftmask from the page size (i.e. log2 value). */
 955  	for (pageshift = 0, shiftsize = pagesize; shiftsize > 1; shiftsize >>= 1, pageshift++);
 956  
 957  	heap->pagesize   = pagesize;
 958  	heap->pageshift  = pageshift;
 959  	heap->hdrsize    = hdrsize;
 960  
 961  	heap->extentsize = heapsize;
 962  	if (!heapaddr && suprt) {
 963  		if (heapsize <= KMALLOC_LIMIT || (heapaddr = alloc_extent(heapsize, suprt)) == NULL) {
 964  			heap->extentsize = KMALLOC_LIMIT;
 965  			heapaddr = NULL;
 966  		}
 967  	}
 968  
 969  	heap->npages     = (heap->extentsize - hdrsize) >> pageshift;
 970  	heap->maxcont    = heap->npages*pagesize;
 971  	heap->flags      =
 972  	heap->ubytes     = 0;
 973  	INIT_LIST_HEAD(&heap->extents);
 974  	spin_lock_init(&heap->lock);
 975  
 976  	for (n = 0; n < RTHEAP_NBUCKETS; n++) {
 977  		heap->buckets[n] = NULL;
 978  	}
 979  
 980  	if (heapaddr) {
 981  		extent = (rtextent_t *)heapaddr;
 982  		init_extent(heap, extent);
 983  		list_add_tail(&extent->link, &heap->extents);
 984  	} else {
 985  		u_long init_size = 0;
 986  		while (init_size < heapsize) {
 987  			if (!(extent = (rtextent_t *)alloc_extent(heap->extentsize, suprt))) {
 988  				struct list_head *holder, *nholder;
 989  				list_for_each_safe(holder, nholder, &heap->extents) {
 990  					extent = list_entry(holder, rtextent_t, link);
 991  					free_extent(extent, heap->extentsize, suprt);
 992  				}
 993  				return RTHEAP_NOMEM;
 994  			}
 995  			init_extent(heap, extent);
 996  			list_add_tail(&extent->link, &heap->extents);
 997  			init_size += heap->extentsize;
 998  		}
 999  	}
1000  	return 0;
1001  }
1002  
1003  /*! 
1004   * \fn void rtheap_destroy(rtheap_t *heap);
1005   * \brief Destroys a memory heap.
1006   *
1007   * Destroys a memory heap. Dynamically allocated extents are returned
1008   * to Linux.
1009   *
1010   * @param heap The descriptor address of the destroyed heap.
1011   *
1012   * Side-effect: This routine does not call the rescheduling procedure.
1013   *
1014   * Context: This routine must be called on behalf of a thread context.
1015   */
1016  
1017  void rtheap_destroy (rtheap_t *heap, int suprt)
1018  {
1019  	struct list_head *holder, *nholder;
1020  
1021  	list_for_each_safe(holder, nholder, &heap->extents) {
1022  		free_extent(list_entry(holder, rtextent_t, link), heap->extentsize, suprt);
1023  	}
1024  }
1025  
1026  /*
1027   * get_free_range() -- Obtain a range of contiguous free pages to
1028   * fulfill an allocation of 2 ** log2size. Each extent is searched,
1029   * and a new one is allocated if needed, provided the heap is
1030   * extendable. Must be called with the heap lock set.
1031   */
1032  
1033  static caddr_t get_free_range (rtheap_t *heap,
1034  			       u_long bsize,
1035  			       int log2size,
1036  			       int mode)
1037  {
1038      caddr_t block, eblock, freepage, lastpage, headpage, freehead = NULL;
1039      u_long pagenum, pagecont, freecont;
1040      struct list_head *holder;
1041      rtextent_t *extent;
1042  
1043      list_for_each(holder,&heap->extents) {
1044  
1045  	extent = list_entry(holder,rtextent_t,link);
1046  	freepage = extent->freelist;
1047  
1048  	while (freepage != NULL)
1049  	    {
1050  	    headpage = freepage;
1051      	    freecont = 0;
1052  
1053  	    /* Search for a range of contiguous pages in the free page
1054  	       list of the current extent. The range must be 'bsize'
1055  	       long. */
1056  	    do
1057  		{
1058  		lastpage = freepage;
1059  		freepage = *((caddr_t *)freepage);
1060  		freecont += heap->pagesize;
1061  		}
1062  	    while (freepage == lastpage + heap->pagesize && freecont < bsize);
1063  
1064  	    if (freecont >= bsize)
1065  		{
1066  		/* Ok, got it. Just update the extent's free page
1067  		   list, then proceed to the next step. */
1068  
1069  		if (headpage == extent->freelist)
1070  		    extent->freelist = *((caddr_t *)lastpage);
1071  		else   
1072  		    *((caddr_t *)freehead) = *((caddr_t *)lastpage);
1073  
1074  		goto splitpage;
1075  		}
1076  
1077  	    freehead = lastpage;
1078  	    }
1079      }
1080  
1081      /* No available free range in the existing extents so far. If we
1082         cannot extend the heap, we have failed and we are done with
1083         this request. */
1084  
1085      return NULL;
1086  
1087  splitpage:
1088  
1089      /* At this point, headpage is valid and points to the first page
1090         of a range of contiguous free pages larger or equal than
1091         'bsize'. */
1092  
1093      if (bsize < heap->pagesize)
1094  	{
1095  	/* If the allocation size is smaller than the standard page
1096  	   size, split the page in smaller blocks of this size,
1097  	   building a free list of free blocks. */
1098  
1099  	for (block = headpage, eblock = headpage + heap->pagesize - bsize;
1100  	     block < eblock; block += bsize)
1101  	    *((caddr_t *)block) = block + bsize;
1102  
1103  	*((caddr_t *)eblock) = NULL;
1104  	}
1105      else   
1106          *((caddr_t *)headpage) = NULL;
1107  
1108      pagenum = (headpage - extent->membase) >> heap->pageshift;
1109  
1110      /* Update the extent's page map.  If log2size is non-zero
1111         (i.e. bsize <= 2 * pagesize), store it in the first page's slot
1112         to record the exact block size (which is a power of
1113         two). Otherwise, store the special marker RTHEAP_PLIST,
1114         indicating the start of a block whose size is a multiple of the
1115         standard page size, but not necessarily a power of two.  In any
1116         case, the following pages slots are marked as 'continued'
1117         (PCONT). */
1118  
1119      extent->pagemap[pagenum] = log2size ? log2size : RTHEAP_PLIST;
1120  
1121      for (pagecont = bsize >> heap->pageshift; pagecont > 1; pagecont--)
1122  	extent->pagemap[pagenum + pagecont - 1] = RTHEAP_PCONT;
1123  
1124      return headpage;
1125  }
1126  
1127  /*! 
1128   * \fn void *rtheap_alloc(rtheap_t *heap, u_long size, int flags);
1129   * \brief Allocate a memory block from a memory heap.
1130   *
1131   * Allocates a contiguous region of memory from an active memory heap.
1132   * Such allocation is guaranteed to be time-bounded if the heap is
1133   * non-extendable (see rtheap_init()). Otherwise, it might trigger a
1134   * dynamic extension of the storage area through an internal request
1135   * to the Linux allocation service (kmalloc/vmalloc).
1136   *
1137   * @param heap The descriptor address of the heap to get memory from.
1138   *
1139   * @param size The size in bytes of the requested block. Sizes lower
1140   * or equal to the page size are rounded either to the minimum
1141   * allocation size if lower than this value, or to the minimum
1142   * alignment size if greater or equal to this value. In the current
1143   * implementation, with MINALLOC = 16 and MINALIGN = 16, a 15 bytes
1144   * request will be rounded to 16 bytes, and a 17 bytes request will be
1145   * rounded to 32.
1146   *
1147   * @param flags A set of flags affecting the operation. Unless
1148   * RTHEAP_EXTEND is passed and the heap is extendable, this service
1149   * will return NULL without attempting to extend the heap dynamically
1150   * upon memory starvation.
1151   *
1152   * @return The address of the allocated region upon success, or NULL
1153   * if no memory is available from the specified non-extendable heap,
1154   * or no memory can be obtained from Linux to extend the heap.
1155   *
1156   * Side-effect: This routine does not call the rescheduling procedure.
1157   *
1158   * Context: This routine can always be called on behalf of a thread
1159   * context. It can also be called on behalf of an IST context if the
1160   * heap storage area has been statically-defined at initialization
1161   * time (see rtheap_init()).
1162   */
1163  
1164  void *rtheap_alloc (rtheap_t *heap, u_long size, int mode)
1165  
1166  {
1167      u_long bsize, flags;
1168      caddr_t block;
1169      int log2size;
1170  
1171      if (size == 0)
1172  	return NULL;
1173  
1174      if (size <= heap->pagesize)
1175  	/* Sizes lower or equal to the page size are rounded either to
1176  	   the minimum allocation size if lower than this value, or to
1177  	   the minimum alignment size if greater or equal to this
1178  	   value. In other words, with MINALLOC = 15 and MINALIGN =
1179  	   16, a 15 bytes request will be rounded to 16 bytes, and a
1180  	   17 bytes request will be rounded to 32. */
1181  	{
1182  	if (size <= RTHEAP_MINALIGNSZ)
1183  	    size = (size + RTHEAP_MINALLOCSZ - 1) & ~(RTHEAP_MINALLOCSZ - 1);
1184  	else
1185  	    size = (size + RTHEAP_MINALIGNSZ - 1) & ~(RTHEAP_MINALIGNSZ - 1);
1186  	}
1187      else
1188  	/* Sizes greater than the page size are rounded to a multiple
1189  	   of the page size. */
1190  	size = (size + heap->pagesize - 1) & ~(heap->pagesize - 1);
1191  
1192      /* It becomes more space efficient to directly allocate pages from
1193         the free page list whenever the requested size is greater than
1194         2 times the page size. Otherwise, use the bucketed memory
1195         blocks. */
1196  
1197      if (size <= heap->pagesize * 2)
1198  	{
1199  	/* Find the first power of two greater or equal to the rounded
1200  	   size. The log2 value of this size is also computed. */
1201  
1202  	for (bsize = (1 << RTHEAP_MINLOG2), log2size = RTHEAP_MINLOG2;
1203  	     bsize < size; bsize <<= 1, log2size++)
1204  	    ; /* Loop */
1205  
1206  	flags = rt_spin_lock_irqsave(&heap->lock);
1207  
1208  	block = heap->buckets[log2size - RTHEAP_MINLOG2];
1209  
1210  	if (block == NULL)
1211  	    {
1212  	    block = get_free_range(heap,bsize,log2size,mode);
1213  
1214  	    if (block == NULL)
1215  		goto release_and_exit;
1216  	    }
1217  
1218  	heap->buckets[log2size - RTHEAP_MINLOG2] = *((caddr_t *)block);
1219  	heap->ubytes += bsize;
1220  	}
1221      else
1222          {
1223          if (size > heap->maxcont)
1224              return NULL;
1225  
1226  	flags = rt_spin_lock_irqsave(&heap->lock);
1227  
1228  	/* Directly request a free page range. */
1229  	block = get_free_range(heap,size,0,mode);
1230  
1231  	if (block)   
1232  	    heap->ubytes += size;
1233  	}
1234  
1235  release_and_exit:
1236  
1237      rt_spin_unlock_irqrestore(flags,&heap->lock);
1238  
1239      return block;
1240  }
1241  
1242  /*! 
1243   * \fn int rtheap_free(rtheap_t *heap, void *block);
1244   * \brief Release a memory block to a memory heap.
1245   *
1246   * Releases a memory region to the memory heap it was previously
1247   * allocated from.
1248   *
1249   * @param heap The descriptor address of the heap to release memory
1250   * to.
1251   *
1252   * @param block The address of the region to release returned by a
1253   * previous call to rtheap_alloc().
1254   *
1255   * @return 0 is returned upon success, or RTHEAP_PARAM is returned
1256   * whenever the block is not a valid region of the specified heap.
1257   *
1258   * Side-effect: This routine does not call the rescheduling procedure.
1259   *
1260   * Context: This routine can be called on behalf of a thread or IST
1261   * context
1262   */
1263  
1264  int rtheap_free (rtheap_t *heap, void *block)
1265  
1266  {
1267      u_long pagenum, pagecont, boffset, bsize, flags;
1268      caddr_t freepage, lastpage, nextpage, tailpage;
1269      rtextent_t *extent = NULL;
1270      struct list_head *holder;
1271      int log2size, npages;
1272  
1273      flags = rt_spin_lock_irqsave(&heap->lock);
1274  
1275      /* Find the extent from which the returned block is
1276         originating. If the heap is non-extendable, then a single
1277         extent is scanned at most. */
1278  
1279      list_for_each(holder,&heap->extents) {
1280  
1281          extent = list_entry(holder,rtextent_t,link);
1282  
1283  	if ((caddr_t)block >= extent->membase &&
1284  	    (caddr_t)block < extent->memlim)
1285  	    break;
1286      }
1287  
1288      if (!holder)
1289  	goto unlock_and_fail;
1290  
1291      /* Compute the heading page number in the page map. */
1292      pagenum = ((caddr_t)block - extent->membase) >> heap->pageshift;
1293      boffset = ((caddr_t)block - (extent->membase + (pagenum << heap->pageshift)));
1294  
1295      switch (extent->pagemap[pagenum])
1296  	{
1297  	case RTHEAP_PFREE: /* Unallocated page? */
1298  	case RTHEAP_PCONT:  /* Not a range heading page? */
1299  
1300  unlock_and_fail:
1301  
1302  	    rt_spin_unlock_irqrestore(flags,&heap->lock);
1303  	    return RTHEAP_PARAM;
1304  
1305  	case RTHEAP_PLIST:
1306  
1307  	    npages = 1;
1308  
1309  	    while (npages < heap->npages &&
1310  		   extent->pagemap[pagenum + npages] == RTHEAP_PCONT)
1311  		npages++;
1312  
1313  	    bsize = npages * heap->pagesize;
1314  
1315  	    /* Link all freed pages in a single sub-list. */
1316  
1317  	    for (freepage = (caddr_t)block,
1318  		     tailpage = (caddr_t)block + bsize - heap->pagesize;
1319  		 freepage < tailpage; freepage += heap->pagesize)
1320  		*((caddr_t *)freepage) = freepage + heap->pagesize;
1321  
1322  	    /* Mark the released pages as free in the extent's page map. */
1323  
1324  	    for (pagecont = 0; pagecont < npages; pagecont++)
1325  		extent->pagemap[pagenum + pagecont] = RTHEAP_PFREE;
1326  
1327  	    /* Return the sub-list to the free page list, keeping
1328  	       an increasing address order to favor coalescence. */
1329      
1330  	    for (nextpage = extent->freelist, lastpage = NULL;
1331  		 nextpage != NULL && nextpage < (caddr_t)block;
1332  		 lastpage = nextpage, nextpage = *((caddr_t *)nextpage))
1333  		; /* Loop */
1334  
1335  	    *((caddr_t *)tailpage) = nextpage;
1336  
1337  	    if (lastpage)
1338  		*((caddr_t *)lastpage) = (caddr_t)block;
1339  	    else
1340  		extent->freelist = (caddr_t)block;
1341  
1342  	    break;
1343  
1344  	default:
1345  
1346  	    log2size = extent->pagemap[pagenum];
1347  	    bsize = (1 << log2size);
1348  
1349  	    if ((boffset & (bsize - 1)) != 0) /* Not a block start? */
1350  		goto unlock_and_fail;
1351  
1352  	    /* Return the block to the bucketed memory space. */
1353  
1354  	    *((caddr_t *)block) = heap->buckets[log2size - RTHEAP_MINLOG2];
1355  	    heap->buckets[log2size - RTHEAP_MINLOG2] = block;
1356  
1357  	    break;
1358  	}
1359  
1360      heap->ubytes -= bsize;
1361  
1362      rt_spin_unlock_irqrestore(flags,&heap->lock);
1363  
1364      return 0;
1365  }
1366  
1367  /*
1368   * IMPLEMENTATION NOTES:
1369   *
1370   * The implementation follows the algorithm described in a USENIX
1371   * 1988 paper called "Design of a General Purpose Memory Allocator for
1372   * the 4.3BSD Unix Kernel" by Marshall K. McKusick and Michael
1373   * J. Karels. You can find it at various locations on the net,
1374   * including http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf.
1375   * A minor variation allows this implementation to have 'extendable'
1376   * heaps when needed, with multiple memory extents providing autonomous
1377   * page address spaces. When the non-extendable form is used, the heap
1378   * management routines show bounded worst-case execution time.
1379   *
1380   * The data structures hierarchy is as follows:
1381   *
1382   * HEAP {
1383   *      block_buckets[]
1384   *      extent_queue -------+
1385   * }                        |
1386   *                          V
1387   *                       EXTENT #1 {
1388   *                              <static header>
1389   *                              page_map[npages]
1390   *                              page_array[npages][pagesize]
1391   *                       } -+
1392   *                          |
1393   *                          |
1394   *                          V
1395   *                       EXTENT #n {
1396   *                              <static header>
1397   *                              page_map[npages]
1398   *                              page_array[npages][pagesize]
1399   *                       }
1400   */
1401  
1402  /*@}*/
1403  
1404  /****************************** END BSD GPMA ********************************/
1405  
1406  #endif /* CONFIG_RTAI_USE_TLSF */
1407  
1408  int __rtai_heap_init (void)
1409  {
1410  	rtai_global_heap_size = (rtai_global_heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
1411  	if (rtheap_init(&rtai_global_heap, NULL, rtai_global_heap_size, PAGE_SIZE, 0)) {
1412  		printk(KERN_INFO "RTAI[malloc]: failed to initialize the global heap (size=%d bytes).\n", rtai_global_heap_size);
1413  		return 1;
1414  	}
1415  	rtai_global_heap_adr = rtai_global_heap.extents.next;
1416  	printk(KERN_INFO "RTAI[malloc]: global heap size = %d bytes, <%s>.\n", rtai_global_heap_size, CONFIG_RTAI_USE_TLSF ? "TLSF" : "BSD");
1417  	return 0;
1418  }
1419  
1420  void __rtai_heap_exit (void)
1421  {
1422  	rtheap_destroy(&rtai_global_heap, 0);
1423  	printk("RTAI[malloc]: unloaded.\n");
1424  }
1425  
1426  #ifndef CONFIG_RTAI_MALLOC_BUILTIN
1427  module_init(__rtai_heap_init);
1428  module_exit(__rtai_heap_exit);
1429  #endif /* !CONFIG_RTAI_MALLOC_BUILTIN */
1430  
1431  #ifndef CONFIG_KBUILD
1432  #define CONFIG_KBUILD
1433  #endif
1434  
1435  #ifdef CONFIG_KBUILD
1436  EXPORT_SYMBOL(rtheap_init);
1437  EXPORT_SYMBOL(rtheap_destroy);
1438  EXPORT_SYMBOL(rtheap_alloc);
1439  EXPORT_SYMBOL(rtheap_free);
1440  EXPORT_SYMBOL(rtai_global_heap);
1441  EXPORT_SYMBOL(rtai_global_heap_adr);
1442  EXPORT_SYMBOL(rtai_global_heap_size);
1443  #endif /* CONFIG_KBUILD */