/ Sources / BlocksRuntime / runtime.c
runtime.c
  1  // This source file is part of the Swift.org open source project
  2  //
  3  // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
  4  // Licensed under Apache License v2.0 with Runtime Library Exception
  5  //
  6  // See http://swift.org/LICENSE.txt for license information
  7  // See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
  8  //
  9  
 10  #include "Block_private.h"
 11  #include <stdio.h>
 12  #include <stdlib.h>
 13  #include <string.h>
 14  #include <stdint.h>
 15  #if TARGET_OS_WIN32
 16  #include <Windows.h>
 17  #include <Psapi.h>
 18  #else
 19  #include <dlfcn.h>
 20  #endif
 21  #if __has_include(<os/assumes.h>)
 22  #include <os/assumes.h>
 23  #else
 24  #include <assert.h> 
 25  #endif
 26  #ifndef os_assumes
 27  #define os_assumes(_x) _x
 28  #endif
 29  #ifndef os_assert
 30  #define os_assert(_x) assert(_x)
 31  #endif
 32  
 33  #if !defined(__has_builtin)
 34  #define __has_builtin(builtin) 0
 35  #endif
 36  
 37  #if __has_builtin(__sync_bool_compare_and_swap)
 38  #define OSAtomicCompareAndSwapInt(Old, New, Ptr)                               \
 39    __sync_bool_compare_and_swap(Ptr, Old, New)
 40  #elif TARGET_OS_WIN32
 41  #define _CRT_SECURE_NO_WARNINGS 1
 42  #include <Windows.h>
 43  static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi,
 44                                                 int volatile *dst) {
 45    // fixme barrier is overkill -- see objc-os.h
 46    int original = InterlockedCompareExchange((LONG volatile *)dst, newi, oldi);
 47    return (original == oldi);
 48  }
 49  #endif
 50  
 51  /***********************
 52  Globals
 53  ************************/
 54  
 55  static void *_Block_copy_class = _NSConcreteMallocBlock;
 56  static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
 57  static int _Block_copy_flag = BLOCK_NEEDS_FREE;
 58  static int _Byref_flag_initial_value = BLOCK_BYREF_NEEDS_FREE | 4;  // logical 2
 59  
 60  static bool isGC = false;
 61  
 62  /*******************************************************************************
 63  Internal Utilities
 64  ********************************************************************************/
 65  
 66  
 67  static int32_t latching_incr_int(volatile int32_t *where) {
 68      while (1) {
 69          int32_t old_value = *where;
 70          if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
 71              return BLOCK_REFCOUNT_MASK;
 72          }
 73          if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
 74              return old_value+2;
 75          }
 76      }
 77  }
 78  
 79  static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
 80      while (1) {
 81          int32_t old_value = *where;
 82          if (old_value & BLOCK_DEALLOCATING) {
 83              // if deallocating we can't do this
 84              return false;
 85          }
 86          if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
 87              // if latched, we're leaking this block, and we succeed
 88              return true;
 89          }
 90          if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
 91              // otherwise, we must store a new retained value without the deallocating bit set
 92              return true;
 93          }
 94      }
 95  }
 96  
 97  
 98  // return should_deallocate?
 99  static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
100      while (1) {
101          int32_t old_value = *where;
102          if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
103              return false; // latched high
104          }
105          if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
106              return false;   // underflow, latch low
107          }
108          int32_t new_value = old_value - 2;
109          bool result = false;
110          if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) {
111              new_value = old_value - 1;
112              result = true;
113          }
114          if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
115              return result;
116          }
117      }
118  }
119  
120  // hit zero?
121  static bool latching_decr_int_now_zero(volatile int32_t *where) {
122      while (1) {
123          int32_t old_value = *where;
124          if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
125              return false; // latched high
126          }
127          if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
128              return false;   // underflow, latch low
129          }
130          int32_t new_value = old_value - 2;
131          if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
132              return (new_value & BLOCK_REFCOUNT_MASK) == 0;
133          }
134      }
135  }
136  
137  
138  /***********************
139  GC support stub routines
140  ************************/
141  #if !TARGET_OS_WIN32
142  #pragma mark GC Support Routines
143  #endif
144  
145  
146  
147  static void *_Block_alloc_default(size_t size, const bool initialCountIsOne, const bool isObject) {
148      return malloc(size);
149  }
150  
151  static void _Block_assign_default(void *value, void **destptr) {
152      *destptr = value;
153  }
154  
155  static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
156  }
157  
158  static void _Block_do_nothing(const void *aBlock) { }
159  
160  static void _Block_retain_object_default(const void *ptr) {
161  }
162  
163  static void _Block_release_object_default(const void *ptr) {
164  }
165  
166  static void _Block_assign_weak_default(const void *ptr, void *dest) {
167  #if !TARGET_OS_WIN32
168      *(long *)dest = (long)ptr;
169  #else
170      *(void **)dest = (void *)ptr;
171  #endif
172  }
173  
174  static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
175      memmove(dst, src, (size_t)size);
176  }
177  
178  static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
179      void **destp = (void **)dest;
180      void **srcp = (void **)src;
181      while (size) {
182          _Block_assign_default(*srcp, destp);
183          destp++;
184          srcp++;
185          size -= sizeof(void *);
186      }
187  }
188  
189  static void _Block_destructInstance_default(const void *aBlock) {}
190  
191  /**************************************************************************
192  GC support callout functions - initially set to stub routines
193  ***************************************************************************/
194  
195  static void *(*_Block_allocator)(size_t, const bool isOne, const bool isObject) = _Block_alloc_default;
196  static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
197  static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
198  static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
199  static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
200  static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
201  static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
202  static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
203  static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
204  
205  
206  /**************************************************************************
207  GC support SPI functions - called from ObjC runtime and CoreFoundation
208  ***************************************************************************/
209  
210  // Public SPI
211  // Called from objc-auto to turn on GC.
212  // version 3, 4 arg, but changed 1st arg
213  void _Block_use_GC( void *(*alloc)(size_t, const bool isOne, const bool isObject),
214                      void (*setHasRefcount)(const void *, const bool),
215                      void (*gc_assign)(void *, void **),
216                      void (*gc_assign_weak)(const void *, void *),
217                      void (*gc_memmove)(void *, void *, unsigned long)) {
218  
219      isGC = true;
220      _Block_allocator = alloc;
221      _Block_deallocator = _Block_do_nothing;
222      _Block_assign = gc_assign;
223      _Block_copy_flag = BLOCK_IS_GC;
224      _Block_copy_class = _NSConcreteAutoBlock;
225      // blocks with ctors & dtors need to have the dtor run from a class with a finalizer
226      _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
227      _Block_setHasRefcount = setHasRefcount;
228      _Byref_flag_initial_value = BLOCK_BYREF_IS_GC;   // no refcount
229      _Block_retain_object = _Block_do_nothing;
230      _Block_release_object = _Block_do_nothing;
231      _Block_assign_weak = gc_assign_weak;
232      _Block_memmove = gc_memmove;
233  }
234  
235  // transitional
236  void _Block_use_GC5( void *(*alloc)(size_t, const bool isOne, const bool isObject),
237                      void (*setHasRefcount)(const void *, const bool),
238                      void (*gc_assign)(void *, void **),
239                      void (*gc_assign_weak)(const void *, void *)) {
240      // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then
241      _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
242  }
243  
244   
245  // Called from objc-auto to alternatively turn on retain/release.
246  // Prior to this the only "object" support we can provide is for those
247  // super special objects that live in libSystem, namely dispatch queues.
248  // Blocks and Block_byrefs have their own special entry points.
249  void _Block_use_RR( void (*retain)(const void *),
250                      void (*release)(const void *)) {
251      _Block_retain_object = retain;
252      _Block_release_object = release;
253  #if TARGET_OS_WIN32
254      HANDLE hProcess = GetCurrentProcess();
255      HMODULE hModule[1024];
256      DWORD cbNeeded = 0;
257  
258      if (!EnumProcessModules(hProcess, hModule, sizeof(hModule), &cbNeeded))
259        return;
260      if (cbNeeded > sizeof(hModule))
261        return;
262  
263      for (unsigned I = 0; I < (cbNeeded / sizeof(HMODULE)); ++I) {
264        _Block_destructInstance =
265            (void (*)(const void *))GetProcAddress(hModule[I],
266                                                   "objc_destructInstance");
267        if (_Block_destructInstance)
268          break;
269      }
270  #else
271      _Block_destructInstance = dlsym(RTLD_DEFAULT, "objc_destructInstance");
272  #endif
273  }
274  
275  // Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions
276  // without defining a new entry point.
277  void _Block_use_RR2(const Block_callbacks_RR *callbacks) {
278      _Block_retain_object = callbacks->retain;
279      _Block_release_object = callbacks->release;
280      _Block_destructInstance = callbacks->destructInstance;
281  }
282  
283  /****************************************************************************
284  Accessors for block descriptor fields
285  *****************************************************************************/
286  #if 0
287  static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock)
288  {
289      return aBlock->descriptor;
290  }
291  #endif
292  
293  static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock)
294  {
295      if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL;
296      uint8_t *desc = (uint8_t *)aBlock->descriptor;
297      desc += sizeof(struct Block_descriptor_1);
298      return (struct Block_descriptor_2 *)desc;
299  }
300  
301  static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock)
302  {
303      if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL;
304      uint8_t *desc = (uint8_t *)aBlock->descriptor;
305      desc += sizeof(struct Block_descriptor_1);
306      if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
307          desc += sizeof(struct Block_descriptor_2);
308      }
309      return (struct Block_descriptor_3 *)desc;
310  }
311  
312  static __inline bool _Block_has_layout(struct Block_layout *aBlock) {
313      if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return false;
314      uint8_t *desc = (uint8_t *)aBlock->descriptor;
315      desc += sizeof(struct Block_descriptor_1);
316      if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
317          desc += sizeof(struct Block_descriptor_2);
318      }
319      return ((struct Block_descriptor_3 *)desc)->layout != NULL;
320  }    
321  
322  static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
323  {
324      struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
325      if (!desc) return;
326  
327      (*desc->copy)(result, aBlock); // do fixup
328  }
329  
330  static void _Block_call_dispose_helper(struct Block_layout *aBlock)
331  {
332      struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
333      if (!desc) return;
334  
335      (*desc->dispose)(aBlock);
336  }
337  
338  /*******************************************************************************
339  Internal Support routines for copying
340  ********************************************************************************/
341  
342  #if !TARGET_OS_WIN32
343  #pragma mark Copy/Release support
344  #endif
345  
346  // Copy, or bump refcount, of a block.  If really copying, call the copy helper if present.
347  static void *_Block_copy_internal(const void *arg, const bool wantsOne) {
348      struct Block_layout *aBlock;
349  
350      if (!arg) return NULL;
351      
352      
353      // The following would be better done as a switch statement
354      aBlock = (struct Block_layout *)arg;
355      if (aBlock->flags & BLOCK_NEEDS_FREE) {
356          // latches on high
357          latching_incr_int(&aBlock->flags);
358          return aBlock;
359      }
360      else if (aBlock->flags & BLOCK_IS_GC) {
361          // GC refcounting is expensive so do most refcounting here.
362          if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 2)) {
363              // Tell collector to hang on this - it will bump the GC refcount version
364              _Block_setHasRefcount(aBlock, true);
365          }
366          return aBlock;
367      }
368      else if (aBlock->flags & BLOCK_IS_GLOBAL) {
369          return aBlock;
370      }
371  
372      // Its a stack block.  Make a copy.
373      if (!isGC) {
374          struct Block_layout *result = malloc(aBlock->descriptor->size);
375          if (!result) return NULL;
376          memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
377          // reset refcount
378          result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING);    // XXX not needed
379          result->flags |= BLOCK_NEEDS_FREE | 2;  // logical refcount 1
380          result->isa = _NSConcreteMallocBlock;
381          _Block_call_copy_helper(result, aBlock);
382          return result;
383      }
384      else {
385          // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
386          // This allows the copy helper routines to make non-refcounted block copies under GC
387          int32_t flags = aBlock->flags;
388          bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
389          struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR || _Block_has_layout(aBlock));
390          if (!result) return NULL;
391          memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
392          // reset refcount
393          // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
394          flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING);   // XXX not needed
395          if (wantsOne)
396              flags |= BLOCK_IS_GC | 2;
397          else
398              flags |= BLOCK_IS_GC;
399          result->flags = flags;
400          _Block_call_copy_helper(result, aBlock);
401          if (hasCTOR) {
402              result->isa = _NSConcreteFinalizingBlock;
403          }
404          else {
405              result->isa = _NSConcreteAutoBlock;
406          }
407          return result;
408      }
409  }
410  
411  
412  
413  
414  
415  // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
416  
417  // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
418  // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
419  // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
420  // Otherwise we need to copy it and update the stack forwarding pointer
421  static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
422      struct Block_byref **destp = (struct Block_byref **)dest;
423      struct Block_byref *src = (struct Block_byref *)arg;
424          
425      if (src->forwarding->flags & BLOCK_BYREF_IS_GC) {
426          ;   // don't need to do any more work
427      }
428      else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
429          // src points to stack
430          bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
431          // if its weak ask for an object (only matters under GC)
432          struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
433          copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
434          copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
435          src->forwarding = copy;  // patch stack to point to heap copy
436          copy->size = src->size;
437          if (isWeak) {
438              copy->isa = &_NSConcreteWeakBlockVariable;  // mark isa field so it gets weak scanning
439          }
440          if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
441              // Trust copy helper to copy everything of interest
442              // If more than one field shows up in a byref block this is wrong XXX
443              struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1);
444              struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1);
445              copy2->byref_keep = src2->byref_keep;
446              copy2->byref_destroy = src2->byref_destroy;
447  
448              if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
449                  struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1);
450                  struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1);
451                  copy3->layout = src3->layout;
452              }
453  
454              (*src2->byref_keep)(copy, src);
455          }
456          else {
457              // just bits.  Blast 'em using _Block_memmove in case they're __strong
458              // This copy includes Block_byref_3, if any.
459              _Block_memmove(copy+1, src+1,
460                             src->size - sizeof(struct Block_byref));
461          }
462      }
463      // already copied to heap
464      else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
465          latching_incr_int(&src->forwarding->flags);
466      }
467      // assign byref data block pointer into new Block
468      _Block_assign(src->forwarding, (void **)destp);
469  }
470  
471  // Old compiler SPI
472  static void _Block_byref_release(const void *arg) {
473      struct Block_byref *byref = (struct Block_byref *)arg;
474      int32_t refcount;
475  
476      // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
477      byref = byref->forwarding;
478      
479      // To support C++ destructors under GC we arrange for there to be a finalizer for this
480      // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
481      if ((byref->flags & BLOCK_BYREF_NEEDS_FREE) == 0) {
482          return; // stack or GC or global
483      }
484      refcount = byref->flags & BLOCK_REFCOUNT_MASK;
485  	os_assert(refcount);
486      if (latching_decr_int_should_deallocate(&byref->flags)) {
487          if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
488              struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1);
489              (*byref2->byref_destroy)(byref);
490          }
491          _Block_deallocator((struct Block_layout *)byref);
492      }
493  }
494  
495  
496  /************************************************************
497   *
498   * API supporting SPI
499   * _Block_copy, _Block_release, and (old) _Block_destroy
500   *
501   ***********************************************************/
502  
503  #if !TARGET_OS_WIN32
504  #pragma mark SPI/API
505  #endif
506  
507  void *_Block_copy(const void *arg) {
508      return _Block_copy_internal(arg, true);
509  }
510  
511  
512  // API entry point to release a copied Block
513  void _Block_release(const void *arg) {
514      struct Block_layout *aBlock = (struct Block_layout *)arg;
515      if (!aBlock 
516          || (aBlock->flags & BLOCK_IS_GLOBAL)
517          || ((aBlock->flags & (BLOCK_IS_GC|BLOCK_NEEDS_FREE)) == 0)
518          ) return;
519      if (aBlock->flags & BLOCK_IS_GC) {
520          if (latching_decr_int_now_zero(&aBlock->flags)) {
521              // Tell GC we no longer have our own refcounts.  GC will decr its refcount
522              // and unless someone has done a CFRetain or marked it uncollectable it will
523              // now be subject to GC reclamation.
524              _Block_setHasRefcount(aBlock, false);
525          }
526      }
527      else if (aBlock->flags & BLOCK_NEEDS_FREE) {
528          if (latching_decr_int_should_deallocate(&aBlock->flags)) {
529              _Block_call_dispose_helper(aBlock);
530              _Block_destructInstance(aBlock);
531              _Block_deallocator(aBlock);
532          }
533      }
534  }
535  
536  bool _Block_tryRetain(const void *arg) {
537      struct Block_layout *aBlock = (struct Block_layout *)arg;
538      return latching_incr_int_not_deallocating(&aBlock->flags);
539  }
540  
541  bool _Block_isDeallocating(const void *arg) {
542      struct Block_layout *aBlock = (struct Block_layout *)arg;
543      return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
544  }
545  
546  // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
547  static void _Block_destroy(const void *arg) {
548      struct Block_layout *aBlock;
549      if (!arg) return;
550      aBlock = (struct Block_layout *)arg;
551      if (aBlock->flags & BLOCK_IS_GC) {
552          // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
553          return; // ignore, we are being called because of a DTOR
554      }
555      _Block_release(aBlock);
556  }
557  
558  
559  
560  /************************************************************
561   *
562   * SPI used by other layers
563   *
564   ***********************************************************/
565  
566  // SPI, also internal.  Called from NSAutoBlock only under GC
567  void *_Block_copy_collectable(const void *aBlock) {
568      return _Block_copy_internal(aBlock, false);
569  }
570  
571  
572  // SPI
573  size_t Block_size(void *aBlock) {
574      return ((struct Block_layout *)aBlock)->descriptor->size;
575  }
576  
577  bool _Block_use_stret(void *aBlock) {
578      struct Block_layout *layout = (struct Block_layout *)aBlock;
579  
580      int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
581      return (layout->flags & requiredFlags) == requiredFlags;
582  }
583  
584  // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
585  bool _Block_has_signature(void *aBlock) {
586      return _Block_signature(aBlock) ? true : false;
587  }
588  
589  const char * _Block_signature(void *aBlock)
590  {
591      struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
592      if (!desc3) return NULL;
593  
594      return desc3->signature;
595  }
596  
597  const char * _Block_layout(void *aBlock)
598  {
599      // Don't return extended layout to callers expecting GC layout
600      struct Block_layout *layout = (struct Block_layout *)aBlock;
601      if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL;
602  
603      struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
604      if (!desc3) return NULL;
605  
606      return desc3->layout;
607  }
608  
609  const char * _Block_extended_layout(void *aBlock)
610  {
611      // Don't return GC layout to callers expecting extended layout
612      struct Block_layout *layout = (struct Block_layout *)aBlock;
613      if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL;
614  
615      struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
616      if (!desc3) return NULL;
617  
618      // Return empty string (all non-object bytes) instead of NULL 
619      // so callers can distinguish "empty layout" from "no layout".
620      if (!desc3->layout) return "";
621      else return desc3->layout;
622  }
623  
624  #if !TARGET_OS_WIN32
625  #pragma mark Compiler SPI entry points
626  #endif
627  
628      
629  /*******************************************************
630  
631  Entry points used by the compiler - the real API!
632  
633  
634  A Block can reference four different kinds of things that require help when the Block is copied to the heap.
635  1) C++ stack based objects
636  2) References to Objective-C objects
637  3) Other Blocks
638  4) __block variables
639  
640  In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers.  The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign.  The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
641  
642  The flags parameter of _Block_object_assign and _Block_object_dispose is set to
643  	* BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
644  	* BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
645  	* BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
646  If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
647  
648  So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
649  
650  When  a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions.  Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor.  And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
651  
652  So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
653  	__block id                   128+3       (0x83)
654  	__block (^Block)             128+7       (0x87)
655      __weak __block id            128+3+16    (0x93)
656  	__weak __block (^Block)      128+7+16    (0x97)
657          
658  
659  ********************************************************/
660  
661  //
662  // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
663  // to do the assignment.
664  //
665  void _Block_object_assign(void *destAddr, const void *object, const int flags) {
666      switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
667        case BLOCK_FIELD_IS_OBJECT:
668          /*******
669          id object = ...;
670          [^{ object; } copy];
671          ********/
672              
673          _Block_retain_object(object);
674          _Block_assign((void *)object, destAddr);
675          break;
676  
677        case BLOCK_FIELD_IS_BLOCK:
678          /*******
679          void (^object)(void) = ...;
680          [^{ object; } copy];
681          ********/
682  
683          _Block_assign(_Block_copy_internal(object, false), destAddr);
684          break;
685      
686        case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
687        case BLOCK_FIELD_IS_BYREF:
688          /*******
689           // copy the onstack __block container to the heap
690           __block ... x;
691           __weak __block ... x;
692           [^{ x; } copy];
693           ********/
694          
695          _Block_byref_assign_copy(destAddr, object, flags);
696          break;
697          
698        case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
699        case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
700          /*******
701           // copy the actual field held in the __block container
702           __block id object;
703           __block void (^object)(void);
704           [^{ object; } copy];
705           ********/
706  
707          // under manual retain release __block object/block variables are dangling
708          _Block_assign((void *)object, destAddr);
709          break;
710  
711        case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
712        case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK  | BLOCK_FIELD_IS_WEAK:
713          /*******
714           // copy the actual field held in the __block container
715           __weak __block id object;
716           __weak __block void (^object)(void);
717           [^{ object; } copy];
718           ********/
719  
720          _Block_assign_weak(object, destAddr);
721          break;
722  
723        default:
724          break;
725      }
726  }
727  
728  // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
729  // to help dispose of the contents
730  // Used initially only for __attribute__((NSObject)) marked pointers.
731  void _Block_object_dispose(const void *object, const int flags) {
732      switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
733        case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
734        case BLOCK_FIELD_IS_BYREF:
735          // get rid of the __block data structure held in a Block
736          _Block_byref_release(object);
737          break;
738        case BLOCK_FIELD_IS_BLOCK:
739          _Block_destroy(object);
740          break;
741        case BLOCK_FIELD_IS_OBJECT:
742          _Block_release_object(object);
743          break;
744        case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
745        case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
746        case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
747        case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK  | BLOCK_FIELD_IS_WEAK:
748          break;
749        default:
750          break;
751      }
752  }