/ CFStorage.c
CFStorage.c
   1  /*
   2   * Copyright (c) 2015 Apple Inc. All rights reserved.
   3   *
   4   * @APPLE_LICENSE_HEADER_START@
   5   *
   6   * This file contains Original Code and/or Modifications of Original Code
   7   * as defined in and that are subject to the Apple Public Source License
   8   * Version 2.0 (the 'License'). You may not use this file except in
   9   * compliance with the License. Please obtain a copy of the License at
  10   * http://www.opensource.apple.com/apsl/ and read it before using this
  11   * file.
  12   *
  13   * The Original Code and all software distributed under the License are
  14   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18   * Please see the License for the specific language governing rights and
  19   * limitations under the License.
  20   *
  21   * @APPLE_LICENSE_HEADER_END@
  22   */
  23  
  24  /*	CFStorage.c
  25   Copyright (c) 1999-2014, Apple Inc. All rights reserved.
  26   Responsibility: Ali Ozer
  27   */
  28  
  29  /*
  30   2-3 tree storing arbitrary sized values.
  31   
  32   ??? Currently elementSize cannot be greater than storage->maxLeafCapacity, which is less than or equal to __CFStorageMaxLeafCapacity
  33   
  34   CFStorage is thread-safe for multiple readers, but not thread safe for simultaneous reading and  writing.
  35   */
  36  
  37  
  38  /* pammon notes on FrozenStorage
  39   A CFStorage can be frozen, or more specifically, some or all of the nodes can be frozen.  Frozen nodes can be safely shared between CFStorage instances.  CFStorages containing frozen nodes are still considered mutable: frozen nodes are discarded and recreated on demand.  It is a form of copy-on-write.
  40   
  41   Freezing is usually one-way: there is no going back.   However, if a node's reference count is 1, we know that no other CFStorage can reference it; and if we are in a mutating function, we know that it can be safely unfrozen.
  42   
  43   If a node is frozen, all of its descendants should be considered frozen.  
  44   
  45   The root node, which is defined inline within the CFStorage struct itself, can never be frozen.
  46   
  47   */
  48  
  49  #define NO_SHIFTER ((uint32_t)(-1))
  50  
  51  #include <CoreFoundation/CFStorage.h>
  52  #include "CFInternal.h"
  53  #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_WINDOWS
  54  #include <dispatch/dispatch.h>
  55  #endif
  56  
  57  #if DEPLOYMENT_TARGET_WINDOWS
  58  // No C99 support
  59  #define restrict
  60  
  61  // Replace bzero
  62  #define bzero(dst, size)    ZeroMemory(dst, size)
  63  
  64  #endif
  65  
  66  #if !defined(PAGE_SIZE)
  67  #define PAGE_SIZE 4096
  68  #endif
  69  
  70  // Above 15K, malloc switches (??? or used to in early Leopard) to using vm allocates for blocks; it seems best to avoid that.
  71  // Also, tests with StorageTimer.c done in 4/07 indicate that 4096 * 3 is better than smaller or larger node sizes.
  72  #define __CFStorageMaxLeafCapacity (4096 * 3)
  73  
  74  #define COPYMEM(src,dst,n) objc_memmove_collectable((dst), (src), (n))
  75  #define PAGE_LIMIT ((CFIndex)PAGE_SIZE / 2)
  76  
  77  CF_INLINE int32_t roundToPage(int32_t num) {
  78      return (num + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
  79  }
  80  
  81  typedef const struct __CFStorage *ConstCFStorageRef;
  82  
  83  /* Each node in the storage.  isLeaf determines whether the node is a leaf node or a node inside the tree. If latter, number of children are determined by the number of non-NULL entries in child[]. (NULL entries are at the end.)
  84   */
  85  typedef struct __CFStorageNode {
  86      CFIndex numBytes;	/* Number of actual bytes in this node and all its children */
  87      uint32_t refCount;    /* Refcount of the node.  Is always at least 1 for a normal node.  For root nodes, which are stored directly in the CFStorage, this is 0.  CFStorageRetain/ReleaseNode checks for a refcount of 0 and does not retain/release such nodes. */
  88      bool isFrozen;	    /* Indicates that the node is frozen, i.e. may be shared. */
  89      bool isLeaf;
  90      union {
  91          struct {
  92              CFIndex capacityInBytes;	// capacityInBytes is capacity of memory; this is either 0, or >= numBytes
  93              uint8_t *memory;
  94  	    CFRange cachedRange;       //the absolute range of this node, in "value" units.  This is valid only if this node is referenced by storage->cacheNode, and used by the cache.  In general this is not valid, and the offset needs to be passed down from the tree
  95          } leaf;
  96          struct {
  97              struct __CFStorageNode *child[3];
  98          } notLeaf;
  99      } info;
 100  } CFStorageNode;
 101  
 102  
 103  /* A struct used for insertion into frozen nodes, which may need to return a new version of themselves, and a new friend!  The child field is a replacement for the child that was inserted into; if the child does not need to be replaced (i.e. it was unfrozen and there was space to perform the insertion) then the child that was inserted into is returned here.  The sibling field is a new child that should also be inserted (or NULL if none).  */
 104  typedef struct {
 105      CFStorageNode *child;
 106      CFStorageNode *sibling;
 107  } CFStorageDoubleNodeReturn;
 108  
 109  CF_INLINE CFStorageDoubleNodeReturn CFStorageDoubleNodeReturnMake(CFStorageNode *child, CFStorageNode *sibling) {
 110      CFStorageDoubleNodeReturn v;
 111      v.child = child;
 112      v.sibling = sibling;
 113      return v;
 114  }
 115  
 116  /* The CFStorage object.
 117   */
 118  struct __CFStorage {
 119      CFRuntimeBase base;
 120      CFIndex valueSize;
 121      uint32_t byteToValueShifter;
 122      CFLock_t cacheReaderMemoryAllocationLock;
 123      bool alwaysFrozen;
 124      CFStorageNode * volatile cacheNode;
 125      CFIndex maxLeafCapacity;	    // In terms of bytes
 126      CFStorageNode rootNode;
 127      CFOptionFlags nodeHint;	    // __kCFAllocatorGCScannedMemory or 0.
 128  };
 129  
 130  /* Helper function to return the intersection of two ranges */
 131  static inline CFRange intersectionRange(CFRange a, CFRange b) {
 132      CFIndex start = __CFMax(a.location, b.location);
 133      CFIndex end = __CFMin(a.location + a.length, b.location + b.length);
 134      if (end <= start) {
 135  	return CFRangeMake(0, 0);
 136      }
 137      else {
 138  	return CFRangeMake(start, end - start);
 139      }
 140  }
 141  
 142  /* Allocates the memory and initializes the capacity in a leaf.   This locks not for mutations (mutations are not thread-safe in general), but for lazy allocation of storage during reading.
 143   */
 144  CF_INLINE void __CFStorageAllocLeafNodeMemory(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex cap, bool compact) {
 145      if (cap > PAGE_LIMIT) {
 146          cap = roundToPage(cap);
 147  	if (cap > storage->maxLeafCapacity) cap = storage->maxLeafCapacity;
 148      } else {
 149          cap = (((cap + 63) / 64) * 64);
 150      }
 151      /* We must be careful here, because another thread may be trying to allocate this memory at the same time (8203146).  This may happen if two threads both attempt to read from a lazily-allocated node. */
 152      if ((compact ? (cap != node->info.leaf.capacityInBytes) : (cap > node->info.leaf.capacityInBytes))) {
 153  	__CFLock(&(storage->cacheReaderMemoryAllocationLock));
 154  	/* Check again now that we've acquired the lock.  We know that we can do this because two simulaneous readers will always pass the same capacity.  This is the fix for 8203146.  This probably needs a memory barrier. */
 155  	if ((compact ? (cap != node->info.leaf.capacityInBytes) : (cap > node->info.leaf.capacityInBytes))) {
 156  	    __CFAssignWithWriteBarrier((void **)&node->info.leaf.memory, _CFAllocatorReallocateGC(allocator, node->info.leaf.memory, cap, storage->nodeHint));	// This will free...
 157  	    if (__CFOASafe) __CFSetLastAllocationEventName(node->info.leaf.memory, "CFStorage (node bytes)");
 158  	    node->info.leaf.capacityInBytes = cap;
 159  	}
 160  	__CFUnlock(&(storage->cacheReaderMemoryAllocationLock));	    
 161      }
 162  }
 163  
 164  #if 0
 165  #define ASSERT(x) do { if (! (x)) { fprintf(stderr, "Assertion %s failed on line %d\n", #x, __LINE__); HALT; } } while (0)
 166  #else
 167  #define ASSERT(x) do { if (0 && ! (x)) { } } while(0)
 168  #endif
 169  
 170  static void __CFStorageCheckIntegrity(CFStorageRef storage);
 171  #define CHECK_INTEGRITY() do { if (0) __CFStorageCheckIntegrity(storage); } while (0)
 172  
 173  static void __CFStorageCheckNodeIntegrity(ConstCFStorageRef storage, const CFStorageNode *node);
 174  #define CHECK_NODE_INTEGRITY(X) do { if (0) __CFStorageCheckNodeIntegrity(storage, X); } while (0)
 175  
 176  #pragma mark Byte <-> Value Functions
 177  
 178  CF_INLINE CFIndex __CFStorageConvertByteToValue(ConstCFStorageRef storage, CFIndex byte) {
 179      if (storage->byteToValueShifter != NO_SHIFTER) {
 180  	return byte >> storage->byteToValueShifter;
 181      }
 182      return byte / storage->valueSize;
 183  }
 184  
 185  CF_INLINE CFRange __CFStorageConvertBytesToValueRange(ConstCFStorageRef storage, CFIndex offset, CFIndex length) {
 186      if (storage->byteToValueShifter != NO_SHIFTER) {
 187  	return CFRangeMake(offset >> storage->byteToValueShifter, length >> storage->byteToValueShifter);
 188      }
 189      return CFRangeMake(offset / storage->valueSize, length / storage->valueSize);
 190  }
 191  
 192  CF_INLINE CFIndex __CFStorageConvertValueToByte(ConstCFStorageRef storage, CFIndex value) {
 193      if (storage->byteToValueShifter != NO_SHIFTER) {
 194  	return value << storage->byteToValueShifter;
 195      }
 196      return value * storage->valueSize;
 197  }
 198  
 199  CF_INLINE CFRange __CFStorageConvertValuesToByteRange(ConstCFStorageRef storage, CFIndex offset, CFIndex length) {
 200      if (storage->byteToValueShifter != NO_SHIFTER) {
 201  	return CFRangeMake(offset << storage->byteToValueShifter, length << storage->byteToValueShifter);
 202      }
 203      return CFRangeMake(offset * storage->valueSize, length * storage->valueSize);
 204  }
 205  
 206  
 207  #pragma mark Node reference counting and freezing
 208  
 209  CF_INLINE CFStorageNode *__CFStorageRetainNode(CFStorageNode *node) {
 210      if (node->refCount > 0) OSAtomicIncrement32((int32_t *)&node->refCount);
 211      return node;
 212  }
 213  
 214  /* A faster version of __CFStorageRetainNode that is not thread safe.  This can be used from the Unfrozen (aka unshared) calls, or when a node was just allocated and there's no opportunity for it to have escaped yet */
 215  CF_INLINE CFStorageNode *__CFStorageRetainNodeThreadUnsafe(CFStorageNode *node) {
 216      if (node->refCount > 0) node->refCount++;
 217      return node;
 218  }
 219  
 220  static void __CFStorageDeallocateNode(CFStorageRef storage, CFStorageNode *node);
 221  
 222  CF_INLINE void __CFStorageReleaseNode(CFStorageRef storage, CFStorageNode *node) {
 223      if (node->refCount > 0) {
 224  	uint32_t newRefCount = OSAtomicDecrement32((int32_t *)&node->refCount);
 225  	if (newRefCount == 0) {
 226  	    __CFStorageDeallocateNode(storage, node);
 227  	}
 228      }
 229  }
 230  
 231  CF_INLINE void __CFStorageReleaseNodeWithNullCheck(CFStorageRef storage, CFStorageNode *node) {
 232      if (node) __CFStorageReleaseNode(storage, node);
 233  }
 234  
 235  static void __CFStorageDeallocateNode(CFStorageRef storage, CFStorageNode *node) {
 236      CFAllocatorRef allocator = CFGetAllocator(storage);
 237      if (node->isLeaf) {
 238  	if (node->info.leaf.memory) _CFAllocatorDeallocateGC(allocator, node->info.leaf.memory);
 239      } else {
 240  	__CFStorageReleaseNodeWithNullCheck(storage, node->info.notLeaf.child[0]);
 241  	__CFStorageReleaseNodeWithNullCheck(storage, node->info.notLeaf.child[1]);
 242  	__CFStorageReleaseNodeWithNullCheck(storage, node->info.notLeaf.child[2]);
 243      }
 244      _CFAllocatorDeallocateGC(allocator, node);
 245  }
 246  
 247  static inline void __CFStorageFreezeNode(CFStorageNode *node) {
 248      node->isFrozen = true;
 249  }
 250  
 251  /* If a node is frozen, but its reference count is 1, then we are the only reference to it and we can unfreeze it.  In general, it's unsafe to do this because it may race with other threads that depend on it being frozen (e.g. we are about to copy the node).  However, we do not support concurrent access while mutating a CFStorage, so if we are in a mutation function, know we have exclusive access and we can unfreeze the node. */
 252  static inline bool __CFStorageThawNodeDuringMutation(CFStorageRef storage, CFStorageNode *node) {
 253      if (node->refCount == 1) {
 254  	node->isFrozen = false;
 255  	return true;
 256      }
 257      return false;
 258  }
 259  
 260  static inline void __CFStorageSetChild(CFStorageNode *parentNode, CFIndex childIndex, CFStorageNode *newChild) {
 261      ASSERT(! parentNode->isLeaf);
 262      ASSERT(childIndex < 3);
 263      __CFAssignWithWriteBarrier((void **)&parentNode->info.notLeaf.child[childIndex], newChild);
 264  }
 265  
 266  static inline void __CFStorageGetChildren(const CFStorageNode *parent, CFStorageNode ** restrict resultArray, bool shouldRetain, bool shouldFreeze) {
 267      ASSERT(! parent->isLeaf);
 268      CFIndex i;
 269      for (i=0; i < 3; i++) {
 270  	CFStorageNode *node = parent->info.notLeaf.child[i];
 271  	if (node != NULL && shouldRetain) __CFStorageRetainNode(node);
 272  	if (node != NULL && shouldFreeze) __CFStorageFreezeNode(node);
 273  	resultArray[i] = node;
 274      }
 275  }
 276  
 277  #pragma mark Storage cache handling
 278  
 279  
 280  /* Sets the cache to point at the specified node. loc and len are in terms of values, not bytes. To clear the cache set these two to 0.
 281   At least one of node or memory should be non-NULL. memory is consulted first when using the cache.
 282   */
 283  CF_INLINE void __CFStorageSetCache(CFStorageRef storage, CFStorageNode *node, CFIndex locInBytes) {    
 284      if (node) {
 285  	ASSERT(node->isLeaf);
 286  	node->info.leaf.cachedRange = __CFStorageConvertBytesToValueRange(storage, locInBytes, node->numBytes);
 287      }
 288      storage->cacheNode = node;
 289  }
 290  
 291  /* Gets the location for the specified absolute loc from the cached info.
 292   Returns NULL if the location is not in the cache.
 293   */
 294  CF_INLINE uint8_t *__CFStorageGetFromCache(CFStorageRef storage, CFIndex loc, CFRange * restrict validConsecutiveValueRange, bool requireUnfrozenNode) {
 295      CFStorageNode * const cachedNode = storage->cacheNode; /* It's important we read from this field no more than once, for thread safety with other concurrent reads; that is why the field is marked volatile. */
 296      if (! cachedNode) return NULL; /* No cache */
 297      
 298      /* We only allow caching leaf nodes. */
 299      ASSERT(cachedNode->isLeaf);
 300      
 301      /* If the node is frozen, and we require an unfrozen node, then return NULL */
 302      if (requireUnfrozenNode && cachedNode->isFrozen) return NULL;
 303      
 304      /* If there's no memory allocated yet, then allocate it now*/
 305      if (! cachedNode->info.leaf.memory) {
 306  	__CFStorageAllocLeafNodeMemory(CFGetAllocator(storage), storage, cachedNode, cachedNode->numBytes, false);
 307      }
 308      
 309      /* If the node's range starts after loc, or ends before or at loc, return NULL */
 310      CFIndex nodeOffset = cachedNode->info.leaf.cachedRange.location;
 311      CFIndex nodeLength = cachedNode->info.leaf.cachedRange.length;
 312      if (loc < nodeOffset || loc >= nodeOffset + nodeLength) {
 313  	return NULL;
 314      }
 315      
 316      /* The cache is valid, so return it */
 317      validConsecutiveValueRange->location = nodeOffset;
 318      validConsecutiveValueRange->length = nodeLength;
 319      uint8_t *result = cachedNode->info.leaf.memory + __CFStorageConvertValueToByte(storage, loc - nodeOffset);
 320      return result;
 321  }
 322  
 323  
 324  /* Returns the number of the child containing the desired value and the relative index of the value in that child.
 325   forInsertion = true means that we are looking for the child in which to insert or delete; this changes the behavior when the index is at the end of a child
 326   relativeByteNum (not optional, for performance reasons) returns the relative byte number of the specified byte in the child.
 327   Don't call with leaf nodes!
 328   */
 329  CF_INLINE CFStorageNode *__CFStorageFindChild(const CFStorageNode * restrict node, CFIndex byteNum, bool forInsertionOrDeletion, CFIndex * restrict childNum, CFIndex * restrict relativeByteNum) {
 330      if (forInsertionOrDeletion) byteNum--;	/* If for insertion, we do <= checks, not <, so this accomplishes the same thing */
 331      CFStorageNode *result;
 332      result = node->info.notLeaf.child[0];
 333      if (byteNum < result->numBytes) *childNum = 0;
 334      else {
 335  	byteNum -= result->numBytes;
 336  	result = node->info.notLeaf.child[1];
 337          if (byteNum < result->numBytes) *childNum = 1;
 338          else {
 339              byteNum -= result->numBytes;
 340              *childNum = 2;
 341  	    result = node->info.notLeaf.child[2];
 342          }
 343      }
 344      if (forInsertionOrDeletion) byteNum++;
 345      *relativeByteNum = byteNum;
 346      return result;
 347  }
 348  
 349  static CFStorageNode *__CFStorageCopyNode(CFStorageRef storage, const CFStorageNode *node);
 350  
 351  /* Finds the location where the specified byte is stored. If validConsecutiveByteRange is not NULL, returns
 352   the range of bytes that are consecutive with this one.
 353   !!! Assumes the byteNum is within the range of this node.
 354   */
 355  static void *__CFStorageFindByte(CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFIndex absoluteByteOffsetOfNode, CFStorageNode **resultNode, CFRange *validConsecutiveByteRange, bool requireUnfreezing) {
 356      if (node->isLeaf) {
 357          *validConsecutiveByteRange = CFRangeMake(absoluteByteOffsetOfNode, node->numBytes);
 358  	*resultNode = node;
 359          __CFStorageAllocLeafNodeMemory(CFGetAllocator(storage), storage, node, node->numBytes, false);
 360          return node->info.leaf.memory + byteNum;
 361      } else {
 362          CFIndex childNum;
 363          CFIndex relativeByteNum;
 364          CFStorageNode *child = __CFStorageFindChild(node, byteNum, false, &childNum, &relativeByteNum);
 365  	if (requireUnfreezing && child->isFrozen && ! __CFStorageThawNodeDuringMutation(storage, child)) {
 366  	    /* Replace the child with an unfrozen variant */
 367  	    CFStorageNode *unfrozenReplacement = __CFStorageCopyNode(storage, child);
 368  	    /* Release the old node, set the new one */
 369  	    __CFStorageSetChild(node, childNum, unfrozenReplacement);
 370  	    __CFStorageReleaseNode(storage, child);
 371  	    child = unfrozenReplacement;
 372  	}
 373          return __CFStorageFindByte(storage, child, relativeByteNum, absoluteByteOffsetOfNode + (byteNum - relativeByteNum), resultNode, validConsecutiveByteRange, requireUnfreezing);
 374      }
 375  }
 376  
 377  /* Guts of CFStorageGetValueAtIndex(); note that validConsecutiveValueRange is not optional.
 378   Consults and updates cache.
 379   */
 380  CF_INLINE void *__CFStorageGetValueAtIndex(CFStorageRef storage, CFIndex idx, CFRange *validConsecutiveValueRange, bool requireUnfreezing) {
 381      uint8_t *result;
 382      if (!(result = __CFStorageGetFromCache(storage, idx, validConsecutiveValueRange, requireUnfreezing))) {
 383  	CFStorageNode *resultNode;
 384  	CFRange rangeInBytes;
 385  	result = (uint8_t *)__CFStorageFindByte(storage, &storage->rootNode, __CFStorageConvertValueToByte(storage, idx), 0, &resultNode, &rangeInBytes, requireUnfreezing);
 386          __CFStorageSetCache(storage, resultNode, rangeInBytes.location);
 387  	*validConsecutiveValueRange = __CFStorageConvertBytesToValueRange(storage, rangeInBytes.location, rangeInBytes.length);
 388      }
 389      return result;
 390  }
 391  
 392  /* Copies data in the range srcRange from srcLeaf to index dstLocation in dstLeaf.  Both srcLeaf and dstLeaf must be leaf nodes, and dstLeaf must not be frozen.  If srcRange has a nonzero length, then both must have their memory properly allocated.  This does not modify the numBytes of srcLeaf or dstLeaf.
 393   */
 394  static void __CFLeafCopyRangeToOffset(const CFStorageNode *srcLeaf, CFRange srcRange, CFStorageNode *dstLeaf, CFIndex dstLocation) {
 395      ASSERT(srcLeaf->isLeaf);
 396      ASSERT(dstLeaf->isLeaf);
 397      if (srcRange.length > 0) {
 398  	ASSERT(srcLeaf->info.leaf.memory != NULL);
 399  	ASSERT(dstLeaf->info.leaf.memory != NULL);
 400  	ASSERT(srcRange.location + srcRange.length <= srcLeaf->numBytes);
 401  	ASSERT(dstLocation + srcRange.length <= dstLeaf->info.leaf.capacityInBytes);
 402  	COPYMEM(srcLeaf->info.leaf.memory + srcRange.location, dstLeaf->info.leaf.memory + dstLocation, srcRange.length);
 403      }
 404  }
 405  
 406  #pragma mark Node creation and destruction
 407  
 408  // returns a node with a refCount of 1, and an auto_zone_retain() under GC
 409  static CFStorageNode *__CFStorageCreateNode(CFAllocatorRef allocator, CFStorageRef storage, bool isLeaf, CFIndex numBytes) {
 410      CFStorageNode *newNode = (CFStorageNode *)CFAllocatorAllocate(allocator, sizeof(CFStorageNode), __kCFAllocatorGCScannedMemory);
 411      if (__CFOASafe) __CFSetLastAllocationEventName(newNode, "CFStorage (node)");
 412      if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
 413  	auto_zone_release(objc_collectableZone(), newNode); //remove the implicit retain so we can be collected
 414      }
 415      newNode->refCount = 1;
 416      newNode->isFrozen = storage->alwaysFrozen;
 417      newNode->isLeaf = isLeaf;
 418      newNode->numBytes = numBytes;
 419      if (isLeaf) {
 420          newNode->info.leaf.capacityInBytes = 0;
 421          newNode->info.leaf.memory = NULL;
 422      } else {
 423          newNode->info.notLeaf.child[0] = newNode->info.notLeaf.child[1] = newNode->info.notLeaf.child[2] = NULL;
 424      }
 425      return newNode;
 426  }
 427  
 428  /* Creates an (unfrozen) copy of the given node.  This is shallow in the sense that it shares children for branches, but deep in that it copies memory for leaves.  */
 429  static CFStorageNode *__CFStorageCopyNode(CFStorageRef storage, const CFStorageNode *node) {
 430      CFAllocatorRef allocator = CFGetAllocator(storage);
 431      CFStorageNode *result = __CFStorageCreateNode(allocator, storage, node->isLeaf, node->numBytes);
 432      if (node->isLeaf) {
 433  	if (node->info.leaf.memory != NULL) {
 434  	    __CFStorageAllocLeafNodeMemory(allocator, storage, result, result->numBytes, false);
 435  	    COPYMEM(node->info.leaf.memory, result->info.leaf.memory, result->numBytes);
 436  	}
 437      }
 438      else {
 439  	CFStorageNode *child = node->info.notLeaf.child[0];
 440  	__CFStorageSetChild(result, 0, __CFStorageRetainNode(child));
 441  	if ((child = node->info.notLeaf.child[1])) __CFStorageSetChild(result, 1, __CFStorageRetainNode(child));
 442  	if ((child = node->info.notLeaf.child[2])) __CFStorageSetChild(result, 2, __CFStorageRetainNode(child));
 443  	
 444  	/* If we are copying children from a frozen node to an unfrozen node, we need to freeze the children */
 445  	if (node->isFrozen) {
 446  	    __CFStorageFreezeNode(result->info.notLeaf.child[0]);
 447  	    if ((child = result->info.notLeaf.child[1])) __CFStorageFreezeNode(child);
 448  	    if ((child = result->info.notLeaf.child[2])) __CFStorageFreezeNode(child);
 449  	}
 450      }
 451      return result;
 452  }
 453  
 454  
 455  static void __CFStorageDeallocateNode(CFStorageRef storage, CFStorageNode *node);
 456  
 457  
 458  #pragma mark Insertion and Deletion prototypes
 459  /* Prototypes for deletion and insertion.  The *Frozen and *Unfrozen variants should only be called for nodes that we know are frozen or unfrozen.  Frozen nodes may only have frozen children, so it makes sense for the Frozen functions to call other Frozen functions.  Unfrozen nodes may have frozen or unfrozen children, so they should call the non-suffixed variants (which dispatch on whether the node is frozen or not).  
 460   
 461   The only acceptable time to directly call the Unfrozen variant is for the root node of a CFStorage, because root nodes may never be frozen.  The isRootNode parameter determines whether we are in this case.
 462   
 463   The Insertion functions return two nodes.  As an awful performance hack, if the first node returned from __CFStorageInsert* is the same as the node passed in, that node is *not* retained, so should not be relased.  If it is a different node, it is retained.
 464   */
 465  static CFStorageDoubleNodeReturn __CFStorageInsert(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum);
 466  static CFStorageNode *__CFStorageDelete(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFRange range, bool compact);
 467  
 468  static CFStorageDoubleNodeReturn __CFStorageInsertFrozen(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum);
 469  static CFStorageNode *__CFStorageDeleteFrozen(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFRange range);
 470  
 471  static CFStorageDoubleNodeReturn __CFStorageInsertUnfrozen(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum);
 472  static CFStorageNode *__CFStorageDeleteUnfrozen(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFRange range, bool compact, bool isRootNode);
 473  
 474  #pragma mark Frozen Deletion
 475  
 476  static CFStorageNode *__CFStorageDeleteLeafFrozen(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFRange range) {
 477      ASSERT(node->isLeaf);
 478      const CFIndex rangeToDeleteEnd = range.location + range.length;
 479      ASSERT(rangeToDeleteEnd <= node->numBytes);
 480      CFIndex remainingBytes = node->numBytes - range.length;
 481      if (remainingBytes == 0) {
 482  	/* The range to delete encompasses our entire range of bytes.  Return NULL to indicate that we should be deleted. */
 483  	return NULL;
 484      }
 485      else {
 486  	/* Need to create a new node */
 487  	CFStorageNode *newNode = __CFStorageCreateNode(allocator, storage, true, remainingBytes);
 488  	if (node->info.leaf.memory) {
 489  	    /* Our node had memory allocated, so copy in the non-deleted portion */
 490  	    CFRange nonDeletedPrefix = CFRangeMake(0, range.location);
 491  	    CFRange nonDeletedSuffix = CFRangeMake(rangeToDeleteEnd, node->numBytes - rangeToDeleteEnd);
 492  	    ASSERT(nonDeletedPrefix.length + nonDeletedSuffix.length == remainingBytes);
 493  	    __CFStorageAllocLeafNodeMemory(allocator, storage, newNode, remainingBytes, false);  // no point in compacting since we're freshly allocated
 494  	    __CFLeafCopyRangeToOffset(node, nonDeletedPrefix, newNode, 0);
 495  	    __CFLeafCopyRangeToOffset(node, nonDeletedSuffix, newNode, nonDeletedPrefix.length);
 496  	}
 497  	return newNode;
 498      }
 499  }
 500  
 501  /* Helper function for both frozen and unfrozen branch deletion.  Walks the children of the node, calling __CFStorageDelete (or __CFStorageDeleteFrozen if childrenAreDefinitelyFrozen is YES), and assigning the results back to newChildren.  Returns the number of new children.  The newChildren nodes all acquire a reference count! 
 502   */
 503  static inline CFIndex __CFStoragePopulateBranchChildrenAfterDeletion(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFRange range, CFStorageNode *newChildren[3], bool childrenAreDefinitelyFrozen, bool compact) {
 504      CFIndex newChildIndex = 0;
 505      CFIndex childByteOffset = 0; //will track the current start byte of this child
 506      for (CFIndex existingChildIndex = 0; existingChildIndex < 3; existingChildIndex++) {
 507  	CFStorageNode *existingChild = node->info.notLeaf.child[existingChildIndex];
 508  	if (! existingChild) break; //no more children
 509  	const CFIndex existingChildLength = existingChild->numBytes;
 510  	/* The child's range is {byteOffset, existingChildLength}.  Figure out what part of the range to delete is intersected by this child's range */
 511  	CFRange deletionRangeIntersectedByChild = intersectionRange(range, CFRangeMake(childByteOffset, existingChildLength));
 512  	if (! deletionRangeIntersectedByChild.length) {
 513  	    /* The range to delete does not overlap this child's range, so preserve the child */
 514  	    newChildren[newChildIndex++] = __CFStorageRetainNode(existingChild); //bump the refcount like we promised we would
 515  	    if (childrenAreDefinitelyFrozen) {
 516  		/* Because we are about to add this child from a frozen node to a possibly unfrozen node, mark the child as frozen */
 517  		__CFStorageFreezeNode(existingChild);
 518  	    }
 519  	}
 520  	else {
 521  	    /* We have something from this child to delete */
 522  	    CFRange rangeOfChildToDelete = CFRangeMake(deletionRangeIntersectedByChild.location - childByteOffset, deletionRangeIntersectedByChild.length);
 523  	    CFStorageNode *newChild;
 524  	    if (childrenAreDefinitelyFrozen) {
 525  		newChild = __CFStorageDeleteFrozen(allocator, storage, existingChild, rangeOfChildToDelete);
 526  	    }
 527  	    else {
 528  		newChild = __CFStorageDelete(allocator, storage, existingChild, rangeOfChildToDelete, compact);
 529  	    }
 530  	    /* We may get null back if we deleted the entire child */
 531  	    if (newChild != NULL) {
 532  		newChildren[newChildIndex++] = newChild; // Transfers the reference count
 533  	    }
 534  	    
 535  	    if (rangeOfChildToDelete.length == existingChildLength) {
 536  		ASSERT(newChild == NULL); //should have deleted everything
 537  	    }
 538  	    else {
 539  		ASSERT(newChild != NULL);
 540  		ASSERT(newChild->numBytes == existingChildLength - rangeOfChildToDelete.length);
 541  	    }
 542  	}
 543  	childByteOffset += existingChildLength;
 544      }
 545      return newChildIndex;
 546  }
 547  
 548  static CFStorageNode *__CFStorageDeleteBranchFrozen(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFRange range) {
 549      ASSERT(! node->isLeaf);
 550      ASSERT(range.location + range.length <= node->numBytes);
 551      if (range.length == node->numBytes) {
 552  	/* They're deleting everything, so return NULL to indicate that this node should be deleted. */
 553  	return NULL;
 554      }
 555      
 556      /* Construct our new children in this array. */
 557      CFStorageNode *newChildren[3];
 558      CFIndex newChildIndex = __CFStoragePopulateBranchChildrenAfterDeletion(allocator, storage, node, range, newChildren, true/*childrenAreDefinitelyFrozen*/, false/*compact*/);
 559      
 560      /* We do not have to freeze anything in newChildren.  __CFStoragePopulateBranchChildrenAfterDeletion() will properly freeze any existing children, and new children we get should not be marked frozen. */
 561      
 562      /* Now we have the children of the new node in newChildren.  We expect to have at least one child (if we got no children, we should have returned NULL up above because they deleted everything. */
 563      ASSERT(newChildIndex >= 1);
 564      if (newChildIndex == 1) {
 565  	/* Only one child, so just return it, transferring its retain count */
 566  	return newChildren[0];
 567      }
 568      else {
 569  	CFStorageNode *result = __CFStorageCreateNode(allocator, storage, false, 0);
 570  	while (newChildIndex--) {
 571  	    __CFStorageSetChild(result, newChildIndex, newChildren[newChildIndex]); //transfers the reference count
 572  	}
 573  	result->numBytes = node->numBytes - range.length;
 574  	return result;
 575      }
 576  }
 577  
 578  /* Returns a new node, or NULL if the entire thing was deleted.
 579   */
 580  static CFStorageNode *__CFStorageDeleteFrozen(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFRange range) {
 581      if (node->isLeaf) {
 582  	return __CFStorageDeleteLeafFrozen(allocator, storage, node, range);
 583      }
 584      else {
 585  	return __CFStorageDeleteBranchFrozen(allocator, storage, node, range);
 586      }
 587  }
 588  
 589  #pragma mark Unfrozen Deletion
 590  
 591  /* The boolean compact indicates whether leaf nodes that get smaller should be realloced.
 592   */
 593  static CFStorageNode *__CFStorageDeleteUnfrozen(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFRange range, bool compact, bool isRootNode) {
 594      ASSERT(! node->isFrozen);
 595      ASSERT(range.location + range.length <= node->numBytes);
 596      CHECK_NODE_INTEGRITY(node);
 597      
 598      if (range.length == node->numBytes) {
 599  	/* We are deleting everything, so return NULL */
 600  	return NULL;
 601      }
 602      
 603      if (node->isLeaf) {
 604  	node->numBytes -= range.length;
 605  	// If this node had memory allocated, readjust the bytes...
 606  	if (node->info.leaf.memory) {
 607  	    COPYMEM(node->info.leaf.memory + range.location + range.length, node->info.leaf.memory + range.location, node->numBytes - range.location);
 608  	    if (compact) __CFStorageAllocLeafNodeMemory(allocator, storage, node, node->numBytes, true);
 609  	}
 610  	CHECK_NODE_INTEGRITY(node);
 611  	return __CFStorageRetainNodeThreadUnsafe(node); //we can use the ThreadUnsafe calls because this is the Unfrozen variant, so we are not shared
 612      } else {
 613  	CFStorageNode *newChildren[3] = {NULL, NULL, NULL};
 614  	CFIndex newChildIndex = __CFStoragePopulateBranchChildrenAfterDeletion(allocator, storage, node, range, newChildren, false/*childrenAreDefinitelyFrozen*/, compact);
 615  	node->numBytes -= range.length;
 616  	ASSERT(newChildIndex >= 1); //we expect to have at least one child; else we would have deleted everything up above
 617  	
 618  	/* Release all of our existing children.  Either we are about to return a new child in place of us; or we are about to set our children to the new ones */
 619  	__CFStorageReleaseNode(storage, node->info.notLeaf.child[0]);
 620  	__CFStorageReleaseNodeWithNullCheck(storage, node->info.notLeaf.child[1]);
 621  	__CFStorageReleaseNodeWithNullCheck(storage, node->info.notLeaf.child[2]);
 622  	node->info.notLeaf.child[0] = node->info.notLeaf.child[1] = node->info.notLeaf.child[2] = NULL;
 623  	
 624  	if (newChildIndex == 1) {
 625  	    /* We have only one child, so return it, transferring the refcount that __CFStoragePopulate gives it */
 626  	    return newChildren[0];
 627  	}
 628  	else {
 629  	    CFIndex i;
 630  	    for (i=0; i < 3; i++) {
 631  		__CFStorageSetChild(node, i, newChildren[i]); //set the new child, transferring the refcount to us (or set NULL)
 632  	    }
 633  	    CHECK_NODE_INTEGRITY(node);
 634  	    return __CFStorageRetainNodeThreadUnsafe(node);
 635  	}
 636      }
 637  }
 638  
 639  #pragma mark Frozen Insertion
 640  
 641  /* Insertion into an frozen leaf.  We return two nodes, either of which may be 'node', or possibly two new nodes.  This always sets the cache. */
 642  static CFStorageDoubleNodeReturn __CFStorageInsertLeafFrozen(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum) {
 643      /* Inserting into a frozen leaf.  If we can fit the new data along with our existing data into a single node, then do so (i.e. if we can return one node, do it).  Otherwise, all of the data would have to fit into a second node (we are never called with more data than storage->maxLeafCapacity) so just make a new node with the data and return that. */
 644      CFStorageNode *leftResult, *rightResult;
 645      CHECK_NODE_INTEGRITY(node);
 646      ASSERT(byteNum <= node->numBytes);
 647      CFIndex newTotalSize = size + node->numBytes;
 648      if (newTotalSize <= storage->maxLeafCapacity) {
 649  	/* We can fit into a single node */
 650  	rightResult = NULL;
 651  	leftResult = __CFStorageCreateNode(allocator, storage, true, newTotalSize);
 652  	if (node->info.leaf.memory != NULL) { // Beware lazy memory allocation
 653  	    __CFStorageAllocLeafNodeMemory(allocator, storage, leftResult, newTotalSize, false);
 654  	    COPYMEM(node->info.leaf.memory, leftResult->info.leaf.memory, byteNum); //copy first byteNum bytes from existing node
 655  	    //middle we don't touch
 656  	    COPYMEM(node->info.leaf.memory + byteNum, leftResult->info.leaf.memory + byteNum + size, node->numBytes - byteNum); //copy last part from existing node
 657  	}
 658  	__CFStorageSetCache(storage, leftResult, absoluteByteNum - byteNum);
 659      }
 660      else {
 661  	/* We cannot fit into a single node.  See if we can preserve self (i.e. we're inserting at beginning or end). */
 662  	if (byteNum == node->numBytes) {
 663  	    /* Inserting at end, so left is our existing node and right is the new node.   Do not retain left node, because it is the same as the given node */
 664  	    leftResult = (CFStorageNode *)node;
 665  	    rightResult = __CFStorageCreateNode(allocator, storage, true, size);
 666  	    __CFStorageSetCache(storage, rightResult, absoluteByteNum);
 667  	}
 668  	else if (byteNum == 0) {
 669  	    /* Inserting at beginning, so right is our existing node and left is the new node.  Do retain left node, because it is different than the given node. */
 670  	    rightResult = __CFStorageRetainNode((CFStorageNode *)node);
 671  	    leftResult = __CFStorageCreateNode(allocator, storage, true, size);
 672  	    __CFStorageSetCache(storage, leftResult, absoluteByteNum);
 673  	}
 674  	else {
 675  	    /* Inserting in middle.  We will need to create two nodes because we overflow one.  We could be lazy and only allocate up to byteNum, but since it's likely that they're about to insert into us and we'd have to reallocate, just allocate everything requested up front. */
 676  	    CFIndex leftAmount = storage->maxLeafCapacity, rightAmount = newTotalSize - storage->maxLeafCapacity;
 677  	    leftResult = __CFStorageCreateNode(allocator, storage, true, leftAmount);
 678  	    rightResult = __CFStorageCreateNode(allocator, storage, true, rightAmount);
 679  	    __CFStorageAllocLeafNodeMemory(allocator, storage, leftResult, leftAmount, false);
 680  	    __CFStorageAllocLeafNodeMemory(allocator, storage, rightResult, rightAmount, false);
 681  	    ASSERT(node->info.leaf.capacityInBytes >= node->numBytes);
 682  	    
 683  	    /* The existing node has length node->numBytes, so it has the following range: {0, node->numBytes}
 684  	     
 685               We are inserting (garbage) data of length 'size' into offset 'byteNum'.  Therefore we end up with the following two logical ranges, expressed as {start, length}:
 686               {0, byteNum}, {byteNum + size, node->numBytes - byteNum}
 687  	     
 688               We need to divide these among our new nodes with the following logical ranges:
 689               {0, leftAmount}, {leftAmount, rightAmount}
 690  	     
 691               The first range must be fit entirely within the left  node (byteNum <= leftAmount).  The second range may or may be divided between the left and right nodes.     
 692  	     */
 693  	    
 694  	    ASSERT(byteNum <= leftAmount);
 695  	    COPYMEM(node->info.leaf.memory, leftResult->info.leaf.memory, byteNum);
 696  	    
 697  	    const CFRange leftNodeRange = {0, leftAmount}, rightNodeRange = {leftAmount, rightAmount};
 698  	    const CFRange preservedData = {byteNum + size, node->numBytes - byteNum};
 699  	    CFRange overlap;
 700  	    if ((overlap = intersectionRange(leftNodeRange, preservedData)).length > 0) COPYMEM(node->info.leaf.memory + overlap.location - size, leftResult->info.leaf.memory + overlap.location, overlap.length);
 701  	    if ((overlap = intersectionRange(rightNodeRange, preservedData)).length > 0) COPYMEM(node->info.leaf.memory + overlap.location - size, rightResult->info.leaf.memory + overlap.location - leftAmount, overlap.length);
 702  	    __CFStorageSetCache(storage, leftResult, absoluteByteNum - byteNum);
 703  	}
 704      }
 705      return CFStorageDoubleNodeReturnMake(leftResult, rightResult);
 706  }
 707  
 708  static CFStorageDoubleNodeReturn __CFStorageInsertBranchFrozen(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum) {
 709      /* Inserting into a frozen branch.  We definitely will need to make a new copy of us, so make that up front.  We may or may not need to make a new sibling.  Note that in some cases, we may be able to get away with not creating a new copy of us, e.g. inserting at the very end of the tree.  In that case, we could preserve us and make a sibling containing exactly one node.  However, we do not really want to have branches with exactly one child; because then why not just return the child?  And then the whole tree can become unbalanced.  So then instead, always distribute the children equally among our nodes. */
 710      CHECK_NODE_INTEGRITY(node);
 711      CFStorageNode *copyOfMe = __CFStorageCreateNode(allocator, storage, false, 0);
 712      CFStorageNode *siblingOfMe = NULL;
 713      CFIndex relativeByteNum;
 714      CFIndex childNum;
 715      CFStorageNode *child = __CFStorageFindChild(node, byteNum, true, &childNum, &relativeByteNum);
 716      ASSERT(childNum >= 0 && childNum <= 2);
 717      CFStorageDoubleNodeReturn childReturn = __CFStorageInsertFrozen(allocator, storage, child, relativeByteNum, size, absoluteByteNum);
 718      ASSERT(childReturn.child); //we always get at least one back
 719      
 720      /* Make a local array of all new children (retained).  We'll then move them to the new nodes. */
 721      CFStorageNode *newChildren[4] = {NULL};
 722      __CFStorageGetChildren(node, newChildren, true/*retain*/, true/*freeze*/);
 723      if (newChildren[childNum] != childReturn.child) {
 724  	__CFStorageReleaseNode(storage, newChildren[childNum]);
 725  	newChildren[childNum] = childReturn.child; // Transfers the retain
 726      }
 727      if (childReturn.sibling != NULL) {
 728  	if (childNum < 2) newChildren[3] = newChildren[2];
 729  	if (childNum < 1) newChildren[2] = newChildren[1];
 730  	newChildren[childNum + 1] = childReturn.sibling; // Transfers the retain
 731      }
 732      
 733      /* First two always go to our clone */
 734      __CFStorageSetChild(copyOfMe, 0, newChildren[0]);
 735      __CFStorageSetChild(copyOfMe, 1, newChildren[1]);
 736      if (newChildren[3] == NULL) {
 737  	/* We have three or fewer children to distribute, i.e. we don't need a sibling.  Put them all into copy of me.  Our clone's byte count is larger than our own by 'size'. */
 738  	__CFStorageSetChild(copyOfMe, 2, newChildren[2]);
 739  	copyOfMe->numBytes = node->numBytes + size;	
 740      }
 741      else {
 742  	/* We have four children to distribute.  The first two go to us, the last two go to our new sibling.  */
 743  	siblingOfMe = __CFStorageCreateNode(allocator, storage, false, 0);
 744  	__CFStorageSetChild(siblingOfMe, 0, newChildren[2]);
 745  	__CFStorageSetChild(siblingOfMe, 1, newChildren[3]);
 746  	copyOfMe->numBytes = newChildren[0]->numBytes + newChildren[1]->numBytes;
 747  	siblingOfMe->numBytes = newChildren[2]->numBytes + newChildren[3]->numBytes;
 748      }
 749      CHECK_NODE_INTEGRITY(node);
 750      CHECK_NODE_INTEGRITY(copyOfMe);
 751      if (siblingOfMe) CHECK_NODE_INTEGRITY(siblingOfMe);
 752      return CFStorageDoubleNodeReturnMake(copyOfMe, siblingOfMe);
 753  }
 754  
 755  static CFStorageDoubleNodeReturn __CFStorageInsertFrozen(CFAllocatorRef allocator, CFStorageRef storage, const CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum) {
 756      if (node->isLeaf) {
 757  	return __CFStorageInsertLeafFrozen(allocator, storage, node, byteNum, size, absoluteByteNum); 
 758      }
 759      else {
 760  	return __CFStorageInsertBranchFrozen(allocator, storage, node, byteNum, size, absoluteByteNum);
 761      }
 762  }
 763  
 764  
 765  #pragma mark Unfrozen Insertion
 766  
 767  /* Insertion into an unfrozen leaf.  We return two nodes, one of which is 'node'.  This always sets the cache. */
 768  static CFStorageDoubleNodeReturn __CFStorageInsertLeafUnfrozen(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum) {
 769      if (size + node->numBytes > storage->maxLeafCapacity) {	// Need to create more child nodes
 770  	CFStorageNode *newNode;
 771  	if (byteNum == node->numBytes) {	// Inserting at end; easy...
 772  	    newNode = __CFStorageCreateNode(allocator, storage, true, size);
 773  	    __CFStorageSetCache(storage, newNode, absoluteByteNum);
 774  	} else if (byteNum == 0) {	// Inserting at front; also easy, but we need to swap node and newNode
 775  	    newNode = __CFStorageCreateNode(allocator, storage, true, 0);
 776  	    
 777  	    /* Transfer our memory to the new node */
 778  	    newNode->numBytes = node->numBytes;
 779  	    newNode->info.leaf.capacityInBytes = node->info.leaf.capacityInBytes;
 780  	    __CFAssignWithWriteBarrier((void **)&newNode->info.leaf.memory, node->info.leaf.memory);
 781  	    
 782  	    /* Stomp on our existing node */
 783  	    node->numBytes = size;
 784  	    node->info.leaf.capacityInBytes = 0;
 785  	    node->info.leaf.memory = NULL;
 786  	    
 787  	    /* Cache our existing node */
 788  	    __CFStorageSetCache(storage, node, absoluteByteNum);
 789  	} else if (byteNum + size <= storage->maxLeafCapacity) {	// Inserting at middle; inserted region will fit into existing child
 790  	    // Create new node to hold the overflow
 791  	    newNode = __CFStorageCreateNode(allocator, storage, true, node->numBytes - byteNum);
 792  	    if (node->info.leaf.memory) {	// We allocate memory lazily...
 793  		__CFStorageAllocLeafNodeMemory(allocator, storage, newNode, node->numBytes - byteNum, false);
 794  		COPYMEM(node->info.leaf.memory + byteNum, newNode->info.leaf.memory, node->numBytes - byteNum);
 795  		__CFStorageAllocLeafNodeMemory(allocator, storage, node, byteNum + size, false);
 796  	    }
 797  	    node->numBytes = byteNum + size;
 798  	    __CFStorageSetCache(storage, node, absoluteByteNum - byteNum);
 799  	} else {	// Inserting some of new into one node, rest into another; remember that the assumption is size <= storage->maxLeafCapacity
 800  	    newNode = __CFStorageCreateNode(allocator, storage, true, node->numBytes + size - storage->maxLeafCapacity);	// New stuff
 801  	    if (node->info.leaf.memory) {	// We allocate memory lazily...
 802  		__CFStorageAllocLeafNodeMemory(allocator, storage, newNode, node->numBytes + size - storage->maxLeafCapacity, false);
 803  		COPYMEM(node->info.leaf.memory + byteNum, newNode->info.leaf.memory + byteNum + size - storage->maxLeafCapacity, node->numBytes - byteNum);
 804  		__CFStorageAllocLeafNodeMemory(allocator, storage, node, storage->maxLeafCapacity, false);
 805  	    }
 806  	    __CFStorageSetCache(storage, node, absoluteByteNum - byteNum);
 807  	    node->numBytes = storage->maxLeafCapacity;
 808  	}
 809  	return CFStorageDoubleNodeReturnMake(node, newNode); // We do not retain 'node' because it is the given node.
 810      } else {	// No need to create new nodes!
 811  	if (node->info.leaf.memory) {
 812  	    __CFStorageAllocLeafNodeMemory(allocator, storage, node, node->numBytes + size, false);
 813  	    COPYMEM(node->info.leaf.memory + byteNum, node->info.leaf.memory + byteNum + size, node->numBytes - byteNum);
 814  	}
 815  	node->numBytes += size;
 816  	__CFStorageSetCache(storage, node, absoluteByteNum - byteNum);
 817  	return CFStorageDoubleNodeReturnMake(node, NULL); //return the existing node, meaning the parent does not have to do anything.  Do not retain it because it is the given node.
 818      }
 819  }
 820  
 821  static CFStorageDoubleNodeReturn __CFStorageInsertBranchUnfrozen(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum) {
 822      CFIndex relativeByteNum;
 823      CFIndex childNum; // we will insert after childNum, i.e. if childNum is 0, any new node becomes index 1.  This can have value 0, 1, or 2.
 824      CFStorageNode *childNode = __CFStorageFindChild(node, byteNum, true, &childNum, &relativeByteNum);
 825      CFStorageDoubleNodeReturn newNodes = __CFStorageInsert(allocator, storage, childNode, relativeByteNum, size, absoluteByteNum);
 826      CFStorageDoubleNodeReturn result = {node, NULL}; // the default return value meaning we did all of the work ourselves and our parent does not need to do anything
 827      ASSERT(childNode != NULL);
 828      ASSERT(newNodes.child != NULL);
 829      
 830      if (newNodes.child != childNode) {
 831  	/* We got back a replacement for the current child, so replace it. */
 832  	__CFStorageReleaseNode(storage, childNode);
 833  	__CFStorageSetChild(node, childNum, newNodes.child);
 834      }
 835      
 836      if (newNodes.sibling == NULL) {
 837  	/* The insertion happened successfully without requiring us to add any new nodes. */
 838  	node->numBytes += size;
 839      } else {
 840  	/* We got back an additional node to insert. */
 841  	CFStorageNode *newChild = newNodes.sibling;
 842  	if (node->info.notLeaf.child[2] == NULL) {	// There's an empty slot for the new node, cool
 843  	    if (childNum == 0) __CFStorageSetChild(node, 2, node->info.notLeaf.child[1]); // Make room
 844  	    __CFStorageSetChild(node, childNum+1, newChild);
 845  	    node->numBytes += size;
 846  	} else {
 847  	    CFStorageNode *anotherNode = __CFStorageCreateNode(allocator, storage, false, 0);	// Create another node
 848  	    if (childNum == 0) {	// Last two children go to new node
 849  		__CFStorageSetChild(anotherNode, 0, node->info.notLeaf.child[1]);
 850  		__CFStorageSetChild(anotherNode, 1, node->info.notLeaf.child[2]);
 851  		__CFStorageSetChild(node, 1, newChild);
 852  		node->info.notLeaf.child[2] = NULL;
 853  	    } else if (childNum == 1) {	// Last child goes to new node
 854  		__CFStorageSetChild(anotherNode, 0, newChild);
 855  		__CFStorageSetChild(anotherNode, 1, node->info.notLeaf.child[2]);
 856  		node->info.notLeaf.child[2] = NULL;
 857  	    } else {	// New node contains the new comers...
 858  		__CFStorageSetChild(anotherNode, 0, node->info.notLeaf.child[2]);
 859  		__CFStorageSetChild(anotherNode, 1, newChild);
 860  		node->info.notLeaf.child[2] = NULL;
 861  	    }
 862  	    node->numBytes = node->info.notLeaf.child[0]->numBytes + node->info.notLeaf.child[1]->numBytes;
 863  	    anotherNode->numBytes = anotherNode->info.notLeaf.child[0]->numBytes + anotherNode->info.notLeaf.child[1]->numBytes;
 864  	    /* node should not be retained because it is the passed in node.  anotherNode was created so we transfer its retain count */
 865  	    result.sibling = anotherNode;
 866  	}
 867      } 
 868      return result;
 869  }
 870  
 871  
 872  
 873  /* Returns NULL or additional node to come after this node
 874   Assumption: size is never > storage->maxLeafCapacity
 875   */
 876  static CFStorageDoubleNodeReturn __CFStorageInsertUnfrozen(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum) {
 877      ASSERT(! node->isFrozen);
 878      if (node->isLeaf) {
 879  	return __CFStorageInsertLeafUnfrozen(allocator, storage, node, byteNum, size, absoluteByteNum);
 880      } else {
 881  	return __CFStorageInsertBranchUnfrozen(allocator, storage, node, byteNum, size, absoluteByteNum);
 882      }
 883  }
 884  
 885  #pragma mark Frozen or Unfrozen Dispatch Functions
 886  
 887  static CFStorageDoubleNodeReturn __CFStorageInsert(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum) {
 888      if (node->isFrozen && ! __CFStorageThawNodeDuringMutation(storage, node)) {
 889  	return __CFStorageInsertFrozen(allocator, storage, node, byteNum, size, absoluteByteNum);
 890      }
 891      else {
 892  	return __CFStorageInsertUnfrozen(allocator, storage, node, byteNum, size, absoluteByteNum);
 893      }
 894  }
 895  
 896  static CFStorageNode *__CFStorageDelete(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFRange range, bool compact) {
 897      if (node->isFrozen && ! __CFStorageThawNodeDuringMutation(storage, node)) {
 898  	return __CFStorageDeleteFrozen(allocator, storage, node, range);
 899      }
 900      else {
 901  	return __CFStorageDeleteUnfrozen(allocator, storage, node, range, compact, false/*isRootNode*/);
 902      }
 903  }
 904  
 905  
 906  #pragma mark Utility functions
 907  
 908  CF_INLINE CFIndex __CFStorageGetCount(CFStorageRef storage) {
 909      return __CFStorageConvertByteToValue(storage, storage->rootNode.numBytes);
 910  }
 911  
 912  static Boolean __CFStorageEqual(CFTypeRef cf1, CFTypeRef cf2) {
 913      CFStorageRef storage1 = (CFStorageRef)cf1;
 914      CFStorageRef storage2 = (CFStorageRef)cf2;
 915      CFIndex loc, count, valueSize;
 916      CFRange range1, range2;
 917      uint8_t *ptr1, *ptr2;
 918      
 919      count = __CFStorageGetCount(storage1);
 920      if (count != __CFStorageGetCount(storage2)) return false;
 921      
 922      valueSize = __CFStorageGetValueSize(storage1);
 923      if (valueSize != __CFStorageGetValueSize(storage2)) return false;
 924      
 925      loc = range1.location = range1.length = range2.location = range2.length = 0;
 926      ptr1 = ptr2 = NULL;
 927      
 928      while (loc < count) {
 929  	CFIndex cntThisTime;
 930  	if (loc >= range1.location + range1.length) ptr1 = (uint8_t *)CFStorageGetValueAtIndex(storage1, loc, &range1);
 931  	if (loc >= range2.location + range2.length) ptr2 = (uint8_t *)CFStorageGetValueAtIndex(storage2, loc, &range2);
 932  	cntThisTime = range1.location + range1.length;
 933  	if (range2.location + range2.length < cntThisTime) cntThisTime = range2.location + range2.length;
 934  	cntThisTime -= loc;
 935  	if (memcmp(ptr1, ptr2, valueSize * cntThisTime) != 0) return false;
 936  	ptr1 += valueSize * cntThisTime;
 937  	ptr2 += valueSize * cntThisTime;
 938  	loc += cntThisTime;
 939      }
 940      return true;
 941  }
 942  
 943  static CFHashCode __CFStorageHash(CFTypeRef cf) {
 944      CFStorageRef Storage = (CFStorageRef)cf;
 945      return __CFStorageGetCount(Storage);
 946  }
 947  
 948  static void __CFStorageDescribeNode(CFStorageNode *node, CFMutableStringRef str, CFIndex level) {
 949      int cnt;
 950      for (cnt = 0; cnt < level; cnt++) CFStringAppendCString(str, "  ", CFStringGetSystemEncoding());
 951      
 952      if (node->isLeaf) {
 953          CFStringAppendFormat(str, NULL, CFSTR("Leaf %ld/%ld (%p) refcount: %u frozen: %s\n"), node->numBytes, node->info.leaf.capacityInBytes, node, node->refCount, node->isFrozen ? "yes" : "no");
 954      } else {
 955          CFStringAppendFormat(str, NULL, CFSTR("Node %ld (%p) refcount: %u frozen: %s\n"), node->numBytes, node, node->refCount, node->isFrozen ? "yes" : "no");
 956          for (cnt = 0; cnt < 3; cnt++) if (node->info.notLeaf.child[cnt]) __CFStorageDescribeNode(node->info.notLeaf.child[cnt], str, level+1);
 957      }
 958  }
 959  
 960  static CFIndex __CFStorageGetNodeCapacity(CFStorageNode *node) {
 961      if (!node) return 0;
 962      if (node->isLeaf) return node->info.leaf.capacityInBytes;
 963      return __CFStorageGetNodeCapacity(node->info.notLeaf.child[0]) + __CFStorageGetNodeCapacity(node->info.notLeaf.child[1]) + __CFStorageGetNodeCapacity(node->info.notLeaf.child[2]);
 964  }
 965  
 966  CFIndex __CFStorageGetCapacity(CFStorageRef storage) {
 967      return __CFStorageConvertByteToValue(storage, __CFStorageGetNodeCapacity(&storage->rootNode));
 968  }
 969  
 970  CFIndex __CFStorageGetValueSize(CFStorageRef storage) {
 971      return storage->valueSize;
 972  }
 973  
 974  static CFStringRef __CFStorageCopyDescription(CFTypeRef cf) {
 975      CFStorageRef storage = (CFStorageRef)cf;
 976      CFMutableStringRef result;
 977      CFAllocatorRef allocator = CFGetAllocator(storage);
 978      result = CFStringCreateMutable(allocator, 0);
 979      CFStringAppendFormat(result, NULL, CFSTR("<CFStorage %p [%p]>[count = %lu, capacity = %lu]\n"), storage, allocator, (unsigned long)__CFStorageGetCount(storage), (unsigned long)__CFStorageGetCapacity(storage));
 980      __CFStorageDescribeNode(&storage->rootNode, result, 0);
 981      return result;
 982  }
 983  
 984  /* Returns true if enumeration should stop, false if it should continue. */
 985  static bool __CFStorageEnumerateNodesInByteRangeWithBlock(CFStorageRef storage, CFStorageNode *node, CFIndex globalOffsetOfNode, CFRange range, CFIndex concurrencyToken, CFStorageApplierBlock applier) {
 986      bool stop = false;
 987      if (node->isLeaf) {
 988  	CFIndex start = range.location;
 989  	CFIndex length = __CFMin(range.length, node->numBytes - start);
 990  	if (! node->info.leaf.memory) {
 991  	    __CFStorageAllocLeafNodeMemory(CFGetAllocator(storage), storage, node, node->numBytes, false);
 992  	}
 993  	applier(node->info.leaf.memory + start, __CFStorageConvertBytesToValueRange(storage, globalOffsetOfNode + start, length), &stop);
 994      }
 995      else {
 996  	CFStorageNode *children[3] = {node->info.notLeaf.child[0], node->info.notLeaf.child[1], node->info.notLeaf.child[2]};
 997  	const CFIndex lengths[3] = {children[0]->numBytes, children[1] ? children[1]->numBytes : 0, children[2] ? children[2]->numBytes : 0};
 998  	const CFIndex offsets[3] = {0, lengths[0], lengths[0] + lengths[1]};	
 999  	const CFRange overlaps[3] = {intersectionRange(CFRangeMake(offsets[0], lengths[0]), range), intersectionRange(CFRangeMake(offsets[1], lengths[1]), range), intersectionRange(CFRangeMake(offsets[2], lengths[2]), range)};
1000  	CFIndex numOverlappingChildren = (!! overlaps[0].length + !! overlaps[1].length + !! overlaps[2].length);
1001  	if (numOverlappingChildren > 1) concurrencyToken--;
1002  	
1003  	if (concurrencyToken >= 0 && numOverlappingChildren > 1) {
1004  	    CFIndex numChildren = 1 + !!children[1] + !!children[2];
1005  	    const CFRange * overlapsPtr = overlaps; //blocks don't let us reference arrays :(
1006  	    const CFIndex * offsetsPtr = offsets;
1007  	    CFStorageNode ** childrenPtr = children;
1008  #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_WINDOWS
1009  	    __block bool blockStop = false;
1010  	    dispatch_apply(numChildren, __CFDispatchQueueGetGenericMatchingCurrent(), ^(size_t ind) {
1011  		if (! blockStop && overlapsPtr[ind].length > 0) {
1012  		    if (__CFStorageEnumerateNodesInByteRangeWithBlock(storage, childrenPtr[ind], globalOffsetOfNode + offsetsPtr[ind], CFRangeMake(overlapsPtr[ind].location - offsetsPtr[ind], overlapsPtr[ind].length), concurrencyToken, applier)) {
1013  			blockStop = true;
1014  		    }
1015  		}
1016  	    });
1017  	    stop = blockStop;
1018  #else
1019          for (CFIndex ind = 0; ind < numChildren; ind++) {
1020              if (overlapsPtr[ind].length > 0) {
1021                  if (__CFStorageEnumerateNodesInByteRangeWithBlock(storage, childrenPtr[ind], globalOffsetOfNode + offsetsPtr[ind], CFRangeMake(overlapsPtr[ind].location - offsetsPtr[ind], overlapsPtr[ind].length), concurrencyToken, applier)) {
1022                      stop = true;
1023                      break;
1024  		        }
1025  		    }
1026  	    }
1027  #endif
1028  	} else {
1029  	    if (overlaps[0].length > 0) {
1030  		stop = stop || __CFStorageEnumerateNodesInByteRangeWithBlock(storage, children[0], globalOffsetOfNode + offsets[0], CFRangeMake(overlaps[0].location - offsets[0], overlaps[0].length), concurrencyToken, applier);
1031  	    }
1032  	    if (overlaps[1].length > 0) {
1033  		stop = stop || __CFStorageEnumerateNodesInByteRangeWithBlock(storage, children[1], globalOffsetOfNode + offsets[1], CFRangeMake(overlaps[1].location - offsets[1], overlaps[1].length), concurrencyToken, applier);
1034  	    }
1035  	    if (overlaps[2].length > 0) {
1036  		stop = stop || __CFStorageEnumerateNodesInByteRangeWithBlock(storage, children[2], globalOffsetOfNode + offsets[2], CFRangeMake(overlaps[2].location - offsets[2], overlaps[2].length), concurrencyToken, applier);
1037  	    }
1038  	}
1039      }
1040      return stop;
1041  }
1042  
1043  static CFStorageNode *_CFStorageFindNodeContainingByteRange(ConstCFStorageRef storage, const CFStorageNode *node, CFRange nodeRange, CFIndex globalOffsetOfNode, CFRange *outGlobalByteRangeOfResult) {
1044      if (! node->isLeaf) {
1045  	/* See how many children are overlapped by this range.  If it's only 1, call us recursively on that node; otherwise we're it! */
1046  	CFStorageNode *children[3] = {node->info.notLeaf.child[0], node->info.notLeaf.child[1], node->info.notLeaf.child[2]};
1047  	const CFIndex lengths[3] = {children[0]->numBytes, children[1] ? children[1]->numBytes : 0, children[2] ? children[2]->numBytes : 0};
1048  	const CFIndex offsets[3] = {0, lengths[0], lengths[0] + lengths[1]};	
1049  	const CFRange overlaps[3] = {intersectionRange(CFRangeMake(offsets[0], lengths[0]), nodeRange), intersectionRange(CFRangeMake(offsets[1], lengths[1]), nodeRange), intersectionRange(CFRangeMake(offsets[2], lengths[2]), nodeRange)};
1050  	CFIndex numOverlappingChildren = (!! overlaps[0].length + !! overlaps[1].length + !! overlaps[2].length);
1051  	ASSERT(numOverlappingChildren > 0);
1052  	if (numOverlappingChildren == 1) {
1053  	    CFIndex overlappingChild = (overlaps[0].length ? 0 : (overlaps[1].length ? 1 : 2));
1054  	    return _CFStorageFindNodeContainingByteRange(storage, children[overlappingChild], CFRangeMake(nodeRange.location - offsets[overlappingChild], nodeRange.length), globalOffsetOfNode + offsets[overlappingChild], outGlobalByteRangeOfResult);
1055  	}
1056      }
1057      
1058      /* Either we are a leaf, in which case we contain the range, or we are a branch with multiple overlapping children.  Either way, we are the minimum node containing the range in question. */
1059      *outGlobalByteRangeOfResult = CFRangeMake(globalOffsetOfNode, node->numBytes);
1060      return (CFStorageNode *)node;
1061      
1062      
1063  }
1064  
1065  /* Frees all memory associated with the root node, effectively emptying the CFStorage */
1066  static void __CFStorageClearRootNode(CFStorageRef storage) {
1067      CFAllocatorRef allocator = CFGetAllocator(storage);
1068      /* Have to release our children if we are a branch, or free our memory if we are a leaf */
1069      if (storage->rootNode.isLeaf) {
1070  	_CFAllocatorDeallocateGC(allocator, storage->rootNode.info.leaf.memory);
1071      }
1072      else {
1073  	__CFStorageReleaseNodeWithNullCheck(storage, storage->rootNode.info.notLeaf.child[0]);
1074  	__CFStorageReleaseNodeWithNullCheck(storage, storage->rootNode.info.notLeaf.child[1]);
1075  	__CFStorageReleaseNodeWithNullCheck(storage, storage->rootNode.info.notLeaf.child[2]);
1076      }
1077      storage->rootNode.isLeaf = true;
1078      storage->rootNode.numBytes = 0;
1079      storage->rootNode.info.leaf.capacityInBytes = 0;
1080      storage->rootNode.info.leaf.memory = NULL;
1081  }
1082  
1083  static void __CFStorageDeallocate(CFTypeRef cf) {
1084      /* CFStorage is used in CFArray.  Under GC, CFArray references us strongly, but not retained.  Thus we may be finalized before the array.  When the array itself is finalized, it will call any custom deallocate callback on all of its contents, which means it has to walk the array.  Thus CFStorage must be careful to not perturb its structure in Deallocate under GC.
1085       
1086       CFStorage nodes have a reference count, and if a node has a reference count of one, and we are in a mutating function, we conclude that this CFStorage has exclusive ownership of the node, and we can treat it as mutable even if it's marked as frozen (see __CFStorageThawNodeDuringMutation).  Therefore it would be nice if we could decrement our nodes' refcounts in Deallocate.  However, if we did so, then another CFStorage might treat a node that we reference as mutable and modify it, which must not happen, because we must not perturb the structure of a CFStorage in Deallocate.  Thus we just "leak" a reference count under GC.  Of course, these reference counts don't actually keep the memory alive in GC, so it's not a real leak.
1087       */
1088      CFStorageRef storage = (CFStorageRef)cf;
1089      if (! CF_IS_COLLECTABLE_ALLOCATOR(CFGetAllocator(storage))) {
1090          __CFStorageClearRootNode(storage);
1091      }
1092  }
1093  
1094  static CFTypeID __kCFStorageTypeID = _kCFRuntimeNotATypeID;
1095  
1096  static const CFRuntimeClass __CFStorageClass = {
1097      _kCFRuntimeScannedObject,
1098      "CFStorage",
1099      NULL,	// init
1100      NULL,	// copy
1101      __CFStorageDeallocate,
1102      __CFStorageEqual,
1103      __CFStorageHash,
1104      NULL,	// 
1105      __CFStorageCopyDescription
1106  };
1107  
1108  /*** Public API ***/
1109  
1110  CFStorageRef CFStorageCreate(CFAllocatorRef allocator, CFIndex valueSize) {
1111      CFStorageRef storage;
1112      CFIndex size = sizeof(struct __CFStorage) - sizeof(CFRuntimeBase);
1113      storage = (CFStorageRef)_CFRuntimeCreateInstance(allocator, CFStorageGetTypeID(), size, NULL);
1114      if (NULL == storage) {
1115  	return NULL;
1116      }
1117      storage->valueSize = valueSize;
1118      /* if valueSize is a power of 2, then set the shifter to the log base 2 of valueSize.  Otherwise set it to NO_SHIFTER */
1119      if (valueSize > 0 && !(valueSize & (valueSize - 1))) {
1120  #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED
1121  	storage->byteToValueShifter = __builtin_ctzl(valueSize);
1122  #else
1123  	CFIndex tempSize = valueSize;
1124  	storage->byteToValueShifter = 0;
1125  	while (tempSize > 1) {
1126  	    storage->byteToValueShifter++;
1127  	    tempSize >>= 1;
1128  	}
1129  #endif
1130      }
1131      else {
1132  	storage->byteToValueShifter = NO_SHIFTER;
1133      }
1134      
1135      CF_LOCK_INIT_FOR_STRUCTS(storage->cacheReaderMemoryAllocationLock);
1136      storage->maxLeafCapacity = __CFStorageMaxLeafCapacity;
1137      if (valueSize && ((storage->maxLeafCapacity % valueSize) != 0)) {	
1138          storage->maxLeafCapacity = (storage->maxLeafCapacity / valueSize) * valueSize;	// Make it fit perfectly (3406853)
1139      }
1140      memset(&(storage->rootNode), 0, sizeof(CFStorageNode));
1141      storage->rootNode.isLeaf = true;
1142      storage->rootNode.refCount = 0;
1143      if (valueSize >= sizeof(void *)) {
1144  	storage->nodeHint = __kCFAllocatorGCScannedMemory;
1145      }
1146      else {
1147  	// Don't scan nodes if the value size is smaller than a pointer (8198596)
1148  	storage->nodeHint = 0;
1149      }
1150      if (__CFOASafe) __CFSetLastAllocationEventName(storage, "CFStorage");
1151      return storage;    
1152  }
1153  
1154  CFStorageRef CFStorageCreateWithSubrange(CFStorageRef mutStorage, CFRange range) {
1155      const ConstCFStorageRef storage = mutStorage; //we expect this to never modify the storage, so use a const variable to help enforce that
1156      CFStorageRef result = CFStorageCreate(CFGetAllocator(storage), storage->valueSize);
1157      
1158      if (range.length > 0) {
1159  	/* Start by finding the node that contains the entire range.  Bump the reference count of its children and add them to the root of our new copy. */
1160  	const CFRange byteRange = __CFStorageConvertValuesToByteRange(storage, range.location, range.length);
1161  	CFRange byteRangeOfContainingNode;
1162  	CFStorageNode *nodeContainingEntireRange = _CFStorageFindNodeContainingByteRange(storage, &storage->rootNode, byteRange, 0, &byteRangeOfContainingNode);
1163  	ASSERT(nodeContainingEntireRange != NULL);
1164  	
1165  	/* If the result is a leaf, insert the portion we care about */
1166  	if (nodeContainingEntireRange->isLeaf) {
1167  	    CFStorageInsertValues(result, CFRangeMake(0, range.length));
1168  	    if (nodeContainingEntireRange->info.leaf.memory) {
1169  		CFIndex offsetIntoNode = byteRange.location - byteRangeOfContainingNode.location;
1170  		ASSERT(offsetIntoNode >= 0);
1171  		CFStorageReplaceValues(result, CFRangeMake(0, range.length), nodeContainingEntireRange->info.leaf.memory + offsetIntoNode);
1172  	    }
1173  	}
1174  	else {
1175  	    /* The result is not a leaf.  Insert all of its children into our root. */
1176  	    ASSERT(byteRangeOfContainingNode.length == nodeContainingEntireRange->numBytes);
1177  	    result->rootNode.isLeaf = false;
1178  	    result->rootNode.numBytes = byteRangeOfContainingNode.length;
1179  	    result->rootNode.info.notLeaf.child[0] = result->rootNode.info.notLeaf.child[1] = result->rootNode.info.notLeaf.child[2] = NULL;
1180  	    for (CFIndex i=0; i < 3; i++) {
1181  		CFStorageNode *newNode = nodeContainingEntireRange->info.notLeaf.child[i];
1182  		if (! newNode) break;
1183  		__CFStorageFreezeNode(newNode);
1184  		__CFStorageSetChild(&result->rootNode, i, __CFStorageRetainNode(newNode));
1185  	    }
1186  	    
1187  	    /* Now delete from the beginning or end to trim this to the right size */
1188  	    CFRange rangeOfContainingNode = __CFStorageConvertBytesToValueRange(result, byteRangeOfContainingNode.location, byteRangeOfContainingNode.length);
1189  	    CFIndex prefixToTrim = range.location - rangeOfContainingNode.location;
1190  	    CFIndex suffixToTrim = (rangeOfContainingNode.location + rangeOfContainingNode.length) - (range.location + range.length);
1191  	    ASSERT(prefixToTrim >= 0);
1192  	    ASSERT(suffixToTrim >= 0);
1193  	    ASSERT(CFStorageGetCount(result) == rangeOfContainingNode.length);
1194  	    if (suffixToTrim > 0) CFStorageDeleteValues(result, CFRangeMake(rangeOfContainingNode.length - suffixToTrim, suffixToTrim));
1195  	    if (prefixToTrim > 0) CFStorageDeleteValues(result, CFRangeMake(0, prefixToTrim));
1196  	}
1197      }
1198      
1199      ASSERT(CFStorageGetCount(result) == range.length);
1200      return result;
1201  }
1202  
1203  CFTypeID CFStorageGetTypeID(void) {
1204      static dispatch_once_t initOnce;
1205      dispatch_once(&initOnce, ^{ __kCFStorageTypeID = _CFRuntimeRegisterClass(&__CFStorageClass); });
1206      return __kCFStorageTypeID;
1207  }
1208  
1209  CFIndex CFStorageGetCount(CFStorageRef storage) {
1210      return __CFStorageGetCount(storage);
1211  }
1212  
1213  /* Returns pointer to the specified value
1214   index and validConsecutiveValueRange are in terms of values
1215   */
1216  void *CFStorageGetValueAtIndex(CFStorageRef storage, CFIndex idx, CFRange *validConsecutiveValueRange) {
1217      CFRange dummy;
1218      return __CFStorageGetValueAtIndex(storage, idx, validConsecutiveValueRange ? validConsecutiveValueRange : &dummy, true/*requireUnfreezing*/);
1219  }
1220  
1221  const void *CFStorageGetConstValueAtIndex(CFStorageRef storage, CFIndex idx, CFRange *validConsecutiveValueRange) {
1222      CFRange dummy;
1223      return __CFStorageGetValueAtIndex(storage, idx, validConsecutiveValueRange ? validConsecutiveValueRange : &dummy, false/*requireUnfreezing*/);
1224  }
1225  
1226  
1227  /* Makes space for range.length values at location range.location
1228   This function deepens the tree if necessary...
1229   */
1230  void CFStorageInsertValues(CFStorageRef storage, CFRange range) {
1231      CFIndex numBytesToInsert = __CFStorageConvertValueToByte(storage, range.length);
1232      CFIndex byteNum = __CFStorageConvertValueToByte(storage, range.location);
1233      const CFIndex expectedByteCount = storage->rootNode.numBytes + numBytesToInsert;
1234      const CFAllocatorRef allocator = CFGetAllocator(storage);
1235      const CFIndex insertionChunkSize = storage->maxLeafCapacity;
1236      while (numBytesToInsert > 0) {
1237  	CHECK_INTEGRITY();
1238          const CFIndex insertThisTime = __CFMin(numBytesToInsert, insertionChunkSize);
1239          CFStorageDoubleNodeReturn newNodes = __CFStorageInsertUnfrozen(allocator, storage, &storage->rootNode, byteNum, insertThisTime, byteNum); //we don't have to call the frozen variant because the root node is never frozen
1240  	ASSERT(newNodes.child == &storage->rootNode);// unfrozen variant should always give us our node back.  We may have another node to insert in newNodes.sibling
1241          if (newNodes.sibling != NULL) {
1242  	    CFStorageNode *newNode = newNodes.sibling;
1243  	    /* Need to create a new root node.  Copy our existing root node's contents to a new heap node. */
1244  	    CFStorageNode *heapRoot = __CFStorageCreateNode(allocator, storage, storage->rootNode.isLeaf, storage->rootNode.numBytes);	// Will copy the (static) rootNode over to this
1245  	    objc_memmove_collectable(&heapRoot->info, &storage->rootNode.info, sizeof heapRoot->info);
1246  	    
1247  	    /* Our root is about to become a branch.  If our root node is currently a leaf, we need to clear the cache, because if the cache points at the root then the cache is about to start pointing at a branch node (which is not allowed) */
1248  	    if (storage->rootNode.isLeaf) {
1249  		__CFStorageSetCache(storage, NULL, 0);
1250  		storage->rootNode.isLeaf = false;
1251  	    }
1252  	    
1253  	    /* Set the new children in our root.  Note that it's important that we overwrite the root node's info, because we wanted to transfer the refcounts of our children (or our allocated memory, if we are a leaf) to the new heap root */
1254  	    __CFStorageSetChild(&storage->rootNode, 0, heapRoot);
1255  	    __CFStorageSetChild(&storage->rootNode, 1, newNode);
1256              storage->rootNode.info.notLeaf.child[2] = NULL;
1257              storage->rootNode.numBytes = heapRoot->numBytes + newNode->numBytes;
1258  	}
1259          numBytesToInsert -= insertThisTime;
1260          byteNum += insertThisTime;
1261  	ASSERT(storage->rootNode.numBytes + numBytesToInsert == expectedByteCount);
1262      }
1263      ASSERT(expectedByteCount == storage->rootNode.numBytes);
1264      CHECK_INTEGRITY();
1265  }
1266  
1267  /* Deletes the values in the specified range
1268   This function gets rid of levels if necessary...
1269   */
1270  void CFStorageDeleteValues(CFStorageRef storage, CFRange range) {
1271      CHECK_INTEGRITY();
1272      CFAllocatorRef allocator = CFGetAllocator(storage);
1273      CFRange byteRange = __CFStorageConvertValuesToByteRange(storage, range.location, range.length);
1274      const CFIndex expectedByteCount = storage->rootNode.numBytes - byteRange.length;
1275      
1276      /* We don't try to mantain the cache across deletion */
1277      __CFStorageSetCache(storage, NULL, 0);
1278      
1279      /* The root node can never be frozen, so it's always OK to modify it */
1280      ASSERT(! storage->rootNode.isFrozen);    
1281      CFStorageNode *newRoot = __CFStorageDeleteUnfrozen(allocator, storage, &storage->rootNode, byteRange, true/*compact*/, true/*isRootNode*/);
1282      
1283      /* There are three return values we can get:
1284       NULL -> everything was deleted
1285       the root node -> something was deleted, but no nodes became empty, so we don't have to replace any children
1286       a different node -> represents the new root
1287       */
1288      if (newRoot == NULL) {
1289  	__CFStorageClearRootNode(storage);
1290      }
1291      else if (newRoot == &storage->rootNode) {
1292  	/* No need to replace any children, nothing to do for this case */
1293      }
1294      else {
1295  	/* Got a legitimately new root back.  If it is unfrozen, we can just acquire its guts.  If it is frozen, we have more work to do.  Note that we do not have to worry about releasing any existing children of the root, beacuse __CFStorageDeleteUnfrozen already did that.  Also note that if we got a legitimately new root back, we must be a branch node, because if we were a leaf node, we would have been unfrozen and gotten ourself back. */
1296  	storage->rootNode.numBytes = newRoot->numBytes;
1297  	storage->rootNode.isLeaf = newRoot->isLeaf;
1298  	bzero(&storage->rootNode.info, sizeof storage->rootNode.info); //be a little paranoid here
1299  	if (newRoot->isLeaf) {
1300  	    if (! newRoot->isFrozen) {
1301  		/* If the leaf is not frozen, we can just steal its memory (if any)!  If it is frozen, we must copy it. */
1302  		__CFAssignWithWriteBarrier((void **)&storage->rootNode.info.leaf.memory, newRoot->info.leaf.memory);
1303  		/* Clear out the old node, because we stole its memory and we don't want it to deallocate it when teh node is destroyed below. */
1304  		bzero(&newRoot->info, sizeof newRoot->info);
1305  	    }
1306  	    else {
1307  		/* The leaf is frozen, so we have to copy its memory.   */
1308  		if (newRoot->info.leaf.memory) {
1309  		    __CFStorageAllocLeafNodeMemory(allocator, storage, &storage->rootNode, storage->rootNode.numBytes, false);
1310  		    COPYMEM(newRoot->info.leaf.memory, storage->rootNode.info.leaf.memory, newRoot->numBytes);
1311  		}
1312  	    }
1313  	} else {
1314  	    /* New root is a branch. */
1315  	    ASSERT(newRoot->info.notLeaf.child[0] && newRoot->info.notLeaf.child[1]); //never expect to get back a node with only one child
1316  	    __CFStorageSetChild(&storage->rootNode, 0, __CFStorageRetainNode(newRoot->info.notLeaf.child[0]));
1317  	    __CFStorageSetChild(&storage->rootNode, 1, __CFStorageRetainNode(newRoot->info.notLeaf.child[1]));
1318  	    if (newRoot->info.notLeaf.child[2]) __CFStorageSetChild(&storage->rootNode, 2, __CFStorageRetainNode(newRoot->info.notLeaf.child[2]));
1319  	}	
1320      }
1321      __CFStorageReleaseNodeWithNullCheck(storage, newRoot); //balance the retain from __CFStorageDeleteUnfrozen
1322      ASSERT(expectedByteCount == storage->rootNode.numBytes);
1323      CHECK_INTEGRITY();
1324  }
1325  
1326  void CFStorageGetValues(CFStorageRef storage, CFRange range, void *values) {
1327      CHECK_INTEGRITY();
1328      while (range.length > 0) {
1329          CFRange leafRange;
1330          void *storagePtr = __CFStorageGetValueAtIndex(storage, range.location, &leafRange, false/*requireUnfreezing*/);
1331          CFIndex cntThisTime = __CFMin(range.length, leafRange.length - (range.location - leafRange.location));
1332  	CFIndex byteCntThisTime = __CFStorageConvertValueToByte(storage, cntThisTime);
1333          COPYMEM(storagePtr, values, byteCntThisTime);
1334          values = (uint8_t *)values + byteCntThisTime;
1335          range.location += cntThisTime;
1336          range.length -= cntThisTime;
1337      }
1338  }
1339  
1340  unsigned long _CFStorageFastEnumeration(CFStorageRef storage, struct __objcFastEnumerationStateEquivalent *state, void *stackbuffer, unsigned long count) {
1341      // without trying to understand the data structure, each time through search for block containing index
1342      CFRange leafRange;
1343      if (state->state == 0) { /* first time, get length */
1344          state->extra[0] = __CFStorageGetCount(storage);
1345      }
1346      if (state->state >= state->extra[0]) return 0;
1347      state->itemsPtr = (unsigned long *)CFStorageGetValueAtIndex(storage, state->state, &leafRange);
1348      state->state += leafRange.length;
1349      return leafRange.length;
1350  }
1351  
1352  void CFStorageApplyFunction(CFStorageRef storage, CFRange range, CFStorageApplierFunction applier, void *context) {
1353      CHECK_INTEGRITY();
1354      CFIndex valueSize = storage->valueSize;
1355      CFStorageApplyBlock(storage, range, 0, ^(const void *storagePtr, CFRange subrange, bool *stop){
1356  	while (subrange.length--) {
1357  	    applier(storagePtr, context);
1358  	    storagePtr = valueSize + (const char *)storagePtr;
1359  	}
1360      });
1361  }
1362  
1363  void CFStorageApplyBlock(CFStorageRef storage, CFRange range, CFStorageEnumerationOptionFlags options, CFStorageApplierBlock applier) {
1364      if (! range.length) return;
1365      CFRange byteRange = __CFStorageConvertValuesToByteRange(storage, range.location, range.length);
1366      /* As we descend the tree, if we find we need to go down two or more children, and the concurrency token is not zero, then we decrement the concurrency token and do it concurrently.  Since we have 3 children, a concurrency token of 3 yields up to 3**3 == 27 threads, which is a lot!  Concurrency benefits start to kick in around one million elements */
1367      CFIndex concurrencyToken = 0;
1368      if ((options & kCFStorageEnumerationConcurrent) && (range.length >= 1024 * 1024)) {
1369  	concurrencyToken = 3;
1370      }
1371      __CFStorageEnumerateNodesInByteRangeWithBlock(storage, &storage->rootNode, 0/*globalOffsetOfNode*/, byteRange, concurrencyToken, applier);
1372  }
1373  
1374  void CFStorageReplaceValues(CFStorageRef storage, CFRange range, const void *values) {
1375      CHECK_INTEGRITY();
1376      while (range.length > 0) {
1377          CFRange leafRange;
1378          void *storagePtr = __CFStorageGetValueAtIndex(storage, range.location, &leafRange, true/*requireUnfreezing*/);
1379  	ASSERT(range.location >= leafRange.location);
1380  	ASSERT(range.location < leafRange.location + leafRange.length);
1381          CFIndex cntThisTime = __CFMin(range.length, leafRange.length - (range.location - leafRange.location));
1382  	CFIndex byteCntThisTime = __CFStorageConvertValueToByte(storage, cntThisTime);
1383          COPYMEM(values, storagePtr, byteCntThisTime);
1384  	values = (const uint8_t *)values + byteCntThisTime;
1385          range.location += cntThisTime;
1386          range.length -= cntThisTime;
1387      }
1388  }
1389  
1390  static void __CFStorageApplyNodeBlockInterior(CFStorageRef storage, CFStorageNode *node, void (^block)(CFStorageRef storage, CFStorageNode *node)) {
1391      block(storage, node);
1392      if (! node->isLeaf) {
1393  	CFStorageNode *childNode;
1394  	if ((childNode = node->info.notLeaf.child[0])) __CFStorageApplyNodeBlockInterior(storage, childNode, block);
1395  	if ((childNode = node->info.notLeaf.child[1])) __CFStorageApplyNodeBlockInterior(storage, childNode, block);
1396  	if ((childNode = node->info.notLeaf.child[2])) __CFStorageApplyNodeBlockInterior(storage, childNode, block);
1397      }
1398  }
1399  
1400  static void __CFStorageApplyNodeBlock(CFStorageRef storage, void (^block)(CFStorageRef storage, CFStorageNode *node)) {
1401      __CFStorageApplyNodeBlockInterior(storage, &storage->rootNode, block);
1402  }
1403  
1404  static CFIndex __CFStorageEstimateTotalAllocatedSize(CFStorageRef storage) __attribute__((unused));
1405  static CFIndex __CFStorageEstimateTotalAllocatedSize(CFStorageRef storage) {
1406      __block CFIndex nodeResult = 0;
1407      __block CFIndex contentsResult = 0;
1408      __CFStorageApplyNodeBlock(storage, ^(CFStorageRef storage, CFStorageNode *node) {
1409  	if (node != &storage->rootNode) {
1410  	    nodeResult += malloc_size(node);
1411  	    if (node->isLeaf && node->info.leaf.memory != NULL) {
1412  		contentsResult += malloc_size(node->info.leaf.memory);
1413  	    }
1414  	}
1415      });
1416      return nodeResult + contentsResult;
1417  }
1418  
1419  void __CFStorageSetAlwaysFrozen(CFStorageRef storage, bool alwaysFrozen) {
1420      storage->alwaysFrozen = alwaysFrozen;
1421  }
1422  
1423  static CFIndex __CFStorageCheckNodeCachedLengthIntegrity(ConstCFStorageRef storage, const CFStorageNode *node) {
1424      if (node->isLeaf) {
1425  	ASSERT(node->numBytes > 0 || node == &storage->rootNode);
1426  	return node->numBytes;
1427      } else {
1428  	/* Branch */
1429  	CFStorageNode *childNode;
1430  	CFIndex expectedResult = __CFStorageCheckNodeCachedLengthIntegrity(storage, node->info.notLeaf.child[0]);
1431  	if ((childNode = node->info.notLeaf.child[1])) {
1432  	    expectedResult += __CFStorageCheckNodeCachedLengthIntegrity(storage, childNode);
1433  	    if ((childNode = node->info.notLeaf.child[2])) {
1434  		expectedResult += __CFStorageCheckNodeCachedLengthIntegrity(storage, childNode);
1435  	    }	    
1436  	}
1437  	ASSERT(expectedResult == node->numBytes);
1438  	return expectedResult;
1439      }
1440  }
1441  
1442  static void __CFStorageCheckNodeIntegrity(ConstCFStorageRef storage, const CFStorageNode *node) {
1443      ASSERT(node->isFrozen == 0 || node->isFrozen == 1);
1444      
1445      __CFStorageCheckNodeCachedLengthIntegrity(storage, node);
1446      /* If we are a branch, make sure that we have at least one child, and that there is not a non-NULL child after a NULL child */
1447      if (! node->isLeaf) {
1448  	ASSERT(node->info.notLeaf.child[0] != NULL);
1449  	if (node->info.notLeaf.child[1] == NULL) ASSERT(node->info.notLeaf.child[2] == NULL);	
1450      }
1451  }
1452  
1453  static void __CFStorageCheckIntegrity(CFStorageRef storage) {
1454      __CFStorageApplyNodeBlock(storage, ^(CFStorageRef storage, CFStorageNode *node) {
1455  	__CFStorageCheckNodeIntegrity(storage, node);
1456      });
1457  }
1458  
1459  /* Used by CFArray.c */
1460  
1461  static void __CFStorageNodeSetUnscanned(CFStorageNode *node, auto_zone_t *zone) {
1462      if (node->isLeaf) {
1463          auto_zone_set_unscanned(zone, node->info.leaf.memory);
1464      } else {
1465          CFStorageNode **children = node->info.notLeaf.child;
1466          if (children[0]) __CFStorageNodeSetUnscanned(children[0], zone);
1467          if (children[1]) __CFStorageNodeSetUnscanned(children[1], zone);
1468          if (children[2]) __CFStorageNodeSetUnscanned(children[2], zone);
1469      }
1470  }
1471  
1472  CF_PRIVATE void _CFStorageSetWeak(CFStorageRef storage) {
1473      storage->nodeHint = 0;
1474      __CFStorageNodeSetUnscanned(&storage->rootNode, (auto_zone_t *)objc_collectableZone());
1475  }
1476  
1477  #undef COPYMEM
1478  #undef PAGE_LIMIT
1479