/ lib / BlocksRuntime / runtime.c
runtime.c
  1  /*
  2   * runtime.c
  3   *
  4   * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
  5   * to any person obtaining a copy of this software and associated documentation
  6   * files (the "Software"), to deal in the Software without restriction,
  7   * including without limitation the rights to use, copy, modify, merge, publish,
  8   * distribute, sublicense, and/or sell copies of the Software, and to permit
  9   * persons to whom the Software is furnished to do so, subject to the following
 10   * conditions:
 11   * 
 12   * The above copyright notice and this permission notice shall be included in
 13   * all copies or substantial portions of the Software.
 14   * 
 15   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 18   * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 20   * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 21   * SOFTWARE.
 22   *
 23   */
 24  
 25  #include "Block_private.h"
 26  #include <stdio.h>
 27  #include <stdlib.h>
 28  #include <string.h>
 29  #include <stdint.h>
 30  
 31  #include "config.h"
 32  
 33  #ifdef HAVE_AVAILABILITY_MACROS_H
 34  #include <AvailabilityMacros.h>
 35  #endif /* HAVE_AVAILABILITY_MACROS_H */
 36  
 37  #ifdef HAVE_TARGET_CONDITIONALS_H
 38  #include <TargetConditionals.h>
 39  #endif /* HAVE_TARGET_CONDITIONALS_H */
 40  
 41  #if defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_INT) && defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG)
 42  
 43  #ifdef HAVE_LIBKERN_OSATOMIC_H
 44  #include <libkern/OSAtomic.h>
 45  #endif /* HAVE_LIBKERN_OSATOMIC_H */
 46  
 47  #elif defined(__WIN32__) || defined(_WIN32)
 48  #define _CRT_SECURE_NO_WARNINGS 1
 49  #include <windows.h>
 50  
 51  static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
 52      /* fixme barrier is overkill -- see objc-os.h */
 53      long original = InterlockedCompareExchange(dst, newl, oldl);
 54      return (original == oldl);
 55  }
 56  
 57  static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
 58      /* fixme barrier is overkill -- see objc-os.h */
 59      int original = InterlockedCompareExchange(dst, newi, oldi);
 60      return (original == oldi);
 61  }
 62  
 63  /*
 64   * Check to see if the GCC atomic built-ins are available.  If we're on
 65   * a 64-bit system, make sure we have an 8-byte atomic function
 66   * available.
 67   *
 68   */
 69  
 70  #elif defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_INT) && defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_LONG)
 71  
 72  static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
 73    return __sync_bool_compare_and_swap(dst, oldl, newl);
 74  }
 75  
 76  static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
 77    return __sync_bool_compare_and_swap(dst, oldi, newi);
 78  }
 79  
 80  #else
 81  #error unknown atomic compare-and-swap primitive
 82  #endif /* HAVE_OSATOMIC_COMPARE_AND_SWAP_INT && HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG */
 83  
 84  
 85  /*
 86   * Globals:
 87   */
 88  
 89  static void *_Block_copy_class = _NSConcreteMallocBlock;
 90  static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
 91  static int _Block_copy_flag = BLOCK_NEEDS_FREE;
 92  static int _Byref_flag_initial_value = BLOCK_NEEDS_FREE | 2;
 93  
 94  static const int WANTS_ONE = (1 << 16);
 95  
 96  static bool isGC = false;
 97  
 98  /*
 99   * Internal Utilities:
100   */
101  
102  #if 0
103  static unsigned long int latching_incr_long(unsigned long int *where) {
104      while (1) {
105          unsigned long int old_value = *(volatile unsigned long int *)where;
106          if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
107              return BLOCK_REFCOUNT_MASK;
108          }
109          if (OSAtomicCompareAndSwapLong(old_value, old_value+1, (volatile long int *)where)) {
110              return old_value+1;
111          }
112      }
113  }
114  #endif /* if 0 */
115  
116  static int latching_incr_int(int *where) {
117      while (1) {
118          int old_value = *(volatile int *)where;
119          if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
120              return BLOCK_REFCOUNT_MASK;
121          }
122          if (OSAtomicCompareAndSwapInt(old_value, old_value+1, (volatile int *)where)) {
123              return old_value+1;
124          }
125      }
126  }
127  
128  #if 0
129  static int latching_decr_long(unsigned long int *where) {
130      while (1) {
131          unsigned long int old_value = *(volatile int *)where;
132          if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
133              return BLOCK_REFCOUNT_MASK;
134          }
135          if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
136              return 0;
137          }
138          if (OSAtomicCompareAndSwapLong(old_value, old_value-1, (volatile long int *)where)) {
139              return old_value-1;
140          }
141      }
142  }
143  #endif /* if 0 */
144  
145  static int latching_decr_int(int *where) {
146      while (1) {
147          int old_value = *(volatile int *)where;
148          if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
149              return BLOCK_REFCOUNT_MASK;
150          }
151          if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
152              return 0;
153          }
154          if (OSAtomicCompareAndSwapInt(old_value, old_value-1, (volatile int *)where)) {
155              return old_value-1;
156          }
157      }
158  }
159  
160  
161  /*
162   * GC support stub routines:
163   */
164  #if 0
165  #pragma mark GC Support Routines
166  #endif /* if 0 */
167  
168  
169  static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
170      return malloc(size);
171  }
172  
173  static void _Block_assign_default(void *value, void **destptr) {
174      *destptr = value;
175  }
176  
177  static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
178  }
179  
180  static void _Block_do_nothing(const void *aBlock) { }
181  
182  static void _Block_retain_object_default(const void *ptr) {
183      if (!ptr) return;
184  }
185  
186  static void _Block_release_object_default(const void *ptr) {
187      if (!ptr) return;
188  }
189  
190  static void _Block_assign_weak_default(const void *ptr, void *dest) {
191      *(void **)dest = (void *)ptr;
192  }
193  
194  static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
195      memmove(dst, src, (size_t)size);
196  }
197  
198  static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
199      void **destp = (void **)dest;
200      void **srcp = (void **)src;
201      while (size) {
202          _Block_assign_default(*srcp, destp);
203          destp++;
204          srcp++;
205          size -= sizeof(void *);
206      }
207  }
208  
209  /*
210   * GC support callout functions - initially set to stub routines:
211   */
212  
213  static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
214  static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
215  static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
216  static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
217  static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
218  static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
219  static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
220  static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
221  
222  
223  /*
224   * GC support SPI functions - called from ObjC runtime and CoreFoundation:
225   */
226  
227  /* Public SPI
228   * Called from objc-auto to turn on GC.
229   * version 3, 4 arg, but changed 1st arg
230   */
231  void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
232                      void (*setHasRefcount)(const void *, const bool),
233                      void (*gc_assign)(void *, void **),
234                      void (*gc_assign_weak)(const void *, void *),
235                      void (*gc_memmove)(void *, void *, unsigned long)) {
236  
237      isGC = true;
238      _Block_allocator = alloc;
239      _Block_deallocator = _Block_do_nothing;
240      _Block_assign = gc_assign;
241      _Block_copy_flag = BLOCK_IS_GC;
242      _Block_copy_class = _NSConcreteAutoBlock;
243      /* blocks with ctors & dtors need to have the dtor run from a class with a finalizer */
244      _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
245      _Block_setHasRefcount = setHasRefcount;
246      _Byref_flag_initial_value = BLOCK_IS_GC;   // no refcount
247      _Block_retain_object = _Block_do_nothing;
248      _Block_release_object = _Block_do_nothing;
249      _Block_assign_weak = gc_assign_weak;
250      _Block_memmove = gc_memmove;
251  }
252  
253  /* transitional */
254  void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
255                      void (*setHasRefcount)(const void *, const bool),
256                      void (*gc_assign)(void *, void **),
257                      void (*gc_assign_weak)(const void *, void *)) {
258      /* until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then */
259      _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
260  }
261  
262   
263  /*
264   * Called from objc-auto to alternatively turn on retain/release.
265   * Prior to this the only "object" support we can provide is for those
266   * super special objects that live in libSystem, namely dispatch queues.
267   * Blocks and Block_byrefs have their own special entry points.
268   *
269   */
270  void _Block_use_RR( void (*retain)(const void *),
271                      void (*release)(const void *)) {
272      _Block_retain_object = retain;
273      _Block_release_object = release;
274  }
275  
276  /*
277   * Internal Support routines for copying:
278   */
279  
280  #if 0
281  #pragma mark Copy/Release support
282  #endif /* if 0 */
283  
284  /* Copy, or bump refcount, of a block.  If really copying, call the copy helper if present. */
285  static void *_Block_copy_internal(const void *arg, const int flags) {
286      struct Block_layout *aBlock;
287      const bool wantsOne = (WANTS_ONE & flags) == WANTS_ONE;
288  
289      //printf("_Block_copy_internal(%p, %x)\n", arg, flags);	
290      if (!arg) return NULL;
291      
292      
293      // The following would be better done as a switch statement
294      aBlock = (struct Block_layout *)arg;
295      if (aBlock->flags & BLOCK_NEEDS_FREE) {
296          // latches on high
297          latching_incr_int(&aBlock->flags);
298          return aBlock;
299      }
300      else if (aBlock->flags & BLOCK_IS_GC) {
301          // GC refcounting is expensive so do most refcounting here.
302          if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 1)) {
303              // Tell collector to hang on this - it will bump the GC refcount version
304              _Block_setHasRefcount(aBlock, true);
305          }
306          return aBlock;
307      }
308      else if (aBlock->flags & BLOCK_IS_GLOBAL) {
309          return aBlock;
310      }
311  
312      // Its a stack block.  Make a copy.
313      if (!isGC) {
314          struct Block_layout *result = malloc(aBlock->descriptor->size);
315          if (!result) return (void *)0;
316          memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
317          // reset refcount
318          result->flags &= ~(BLOCK_REFCOUNT_MASK);    // XXX not needed
319          result->flags |= BLOCK_NEEDS_FREE | 1;
320          result->isa = _NSConcreteMallocBlock;
321          if (result->flags & BLOCK_HAS_COPY_DISPOSE) {
322              //printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock);
323              (*aBlock->descriptor->copy)(result, aBlock); // do fixup
324          }
325          return result;
326      }
327      else {
328          // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
329          // This allows the copy helper routines to make non-refcounted block copies under GC
330          unsigned long int flags = aBlock->flags;
331          bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
332          struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR);
333          if (!result) return (void *)0;
334          memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
335          // reset refcount
336          // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
337          flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK);   // XXX not needed
338          if (wantsOne)
339              flags |= BLOCK_IS_GC | 1;
340          else
341              flags |= BLOCK_IS_GC;
342          result->flags = flags;
343          if (flags & BLOCK_HAS_COPY_DISPOSE) {
344              //printf("calling block copy helper...\n");
345              (*aBlock->descriptor->copy)(result, aBlock); // do fixup
346          }
347          if (hasCTOR) {
348              result->isa = _NSConcreteFinalizingBlock;
349          }
350          else {
351              result->isa = _NSConcreteAutoBlock;
352          }
353          return result;
354      }
355  }
356  
357  
358  /*
359   * Runtime entry points for maintaining the sharing knowledge of byref data blocks.
360   *
361   * A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
362   * Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
363   * We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
364   * Otherwise we need to copy it and update the stack forwarding pointer
365   * XXX We need to account for weak/nonretained read-write barriers.
366   */
367  
368  static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
369      struct Block_byref **destp = (struct Block_byref **)dest;
370      struct Block_byref *src = (struct Block_byref *)arg;
371          
372      //printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags);
373      //printf("src dump: %s\n", _Block_byref_dump(src));
374      if (src->forwarding->flags & BLOCK_IS_GC) {
375          ;   // don't need to do any more work
376      }
377      else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
378          //printf("making copy\n");
379          // src points to stack
380          bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
381          // if its weak ask for an object (only matters under GC)
382          struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
383          copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
384          copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
385          src->forwarding = copy;  // patch stack to point to heap copy
386          copy->size = src->size;
387          if (isWeak) {
388              copy->isa = &_NSConcreteWeakBlockVariable;  // mark isa field so it gets weak scanning
389          }
390          if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
391              // Trust copy helper to copy everything of interest
392              // If more than one field shows up in a byref block this is wrong XXX
393              copy->byref_keep = src->byref_keep;
394              copy->byref_destroy = src->byref_destroy;
395              (*src->byref_keep)(copy, src);
396          }
397          else {
398              // just bits.  Blast 'em using _Block_memmove in case they're __strong
399              _Block_memmove(
400                  (void *)&copy->byref_keep,
401                  (void *)&src->byref_keep,
402                  src->size - sizeof(struct Block_byref_header));
403          }
404      }
405      // already copied to heap
406      else if ((src->forwarding->flags & BLOCK_NEEDS_FREE) == BLOCK_NEEDS_FREE) {
407          latching_incr_int(&src->forwarding->flags);
408      }
409      // assign byref data block pointer into new Block
410      _Block_assign(src->forwarding, (void **)destp);
411  }
412  
413  // Old compiler SPI
414  static void _Block_byref_release(const void *arg) {
415      struct Block_byref *shared_struct = (struct Block_byref *)arg;
416      int refcount;
417  
418      // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
419      shared_struct = shared_struct->forwarding;
420      
421      //printf("_Block_byref_release %p called, flags are %x\n", shared_struct, shared_struct->flags);
422      // To support C++ destructors under GC we arrange for there to be a finalizer for this
423      // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
424      if ((shared_struct->flags & BLOCK_NEEDS_FREE) == 0) {
425          return; // stack or GC or global
426      }
427      refcount = shared_struct->flags & BLOCK_REFCOUNT_MASK;
428      if (refcount <= 0) {
429          printf("_Block_byref_release: Block byref data structure at %p underflowed\n", arg);
430      }
431      else if ((latching_decr_int(&shared_struct->flags) & BLOCK_REFCOUNT_MASK) == 0) {
432          //printf("disposing of heap based byref block\n");
433          if (shared_struct->flags & BLOCK_HAS_COPY_DISPOSE) {
434              //printf("calling out to helper\n");
435              (*shared_struct->byref_destroy)(shared_struct);
436          }
437          _Block_deallocator((struct Block_layout *)shared_struct);
438      }
439  }
440  
441  
442  /*
443   *
444   * API supporting SPI
445   * _Block_copy, _Block_release, and (old) _Block_destroy
446   *
447   */
448  
449  #if 0
450  #pragma mark SPI/API
451  #endif /* if 0 */
452  
453  void *_Block_copy(const void *arg) {
454      return _Block_copy_internal(arg, WANTS_ONE);
455  }
456  
457  
458  // API entry point to release a copied Block
459  void _Block_release(void *arg) {
460      struct Block_layout *aBlock = (struct Block_layout *)arg;
461      int32_t newCount;
462      if (!aBlock) return;
463      newCount = latching_decr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK;
464      if (newCount > 0) return;
465      // Hit zero
466      if (aBlock->flags & BLOCK_IS_GC) {
467          // Tell GC we no longer have our own refcounts.  GC will decr its refcount
468          // and unless someone has done a CFRetain or marked it uncollectable it will
469          // now be subject to GC reclamation.
470          _Block_setHasRefcount(aBlock, false);
471      }
472      else if (aBlock->flags & BLOCK_NEEDS_FREE) {
473          if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)(*aBlock->descriptor->dispose)(aBlock);
474          _Block_deallocator(aBlock);
475      }
476      else if (aBlock->flags & BLOCK_IS_GLOBAL) {
477          ;
478      }
479      else {
480          printf("Block_release called upon a stack Block: %p, ignored\n", (void *)aBlock);
481      }
482  }
483  
484  
485  
486  // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
487  static void _Block_destroy(const void *arg) {
488      struct Block_layout *aBlock;
489      if (!arg) return;
490      aBlock = (struct Block_layout *)arg;
491      if (aBlock->flags & BLOCK_IS_GC) {
492          // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
493          return; // ignore, we are being called because of a DTOR
494      }
495      _Block_release(aBlock);
496  }
497  
498  
499  
500  /*
501   *
502   * SPI used by other layers
503   *
504   */
505  
506  // SPI, also internal.  Called from NSAutoBlock only under GC
507  void *_Block_copy_collectable(const void *aBlock) {
508      return _Block_copy_internal(aBlock, 0);
509  }
510  
511  
512  // SPI
513  unsigned long int Block_size(void *arg) {
514      return ((struct Block_layout *)arg)->descriptor->size;
515  }
516  
517  
518  #if 0
519  #pragma mark Compiler SPI entry points
520  #endif /* if 0 */
521  
522      
523  /*******************************************************
524  
525  Entry points used by the compiler - the real API!
526  
527  
528  A Block can reference four different kinds of things that require help when the Block is copied to the heap.
529  1) C++ stack based objects
530  2) References to Objective-C objects
531  3) Other Blocks
532  4) __block variables
533  
534  In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers.  The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign.  The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
535  
536  The flags parameter of _Block_object_assign and _Block_object_dispose is set to
537  	* BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
538  	* BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
539  	* BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
540  If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16).
541  
542  So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
543  
544  When  a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions.  Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor.  And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
545  
546  So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
547  	__block id                   128+3
548          __weak block id              128+3+16
549  	__block (^Block)             128+7
550  	__weak __block (^Block)      128+7+16
551          
552  The implementation of the two routines would be improved by switch statements enumerating the eight cases.
553  
554  ********************************************************/
555  
556  /*
557   * When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
558   * to do the assignment.
559   */
560  void _Block_object_assign(void *destAddr, const void *object, const int flags) {
561      //printf("_Block_object_assign(*%p, %p, %x)\n", destAddr, object, flags);
562      if ((flags & BLOCK_BYREF_CALLER) == BLOCK_BYREF_CALLER) {
563          if ((flags & BLOCK_FIELD_IS_WEAK) == BLOCK_FIELD_IS_WEAK) {
564              _Block_assign_weak(object, destAddr);
565          }
566          else {
567              // do *not* retain or *copy* __block variables whatever they are
568              _Block_assign((void *)object, destAddr);
569          }
570      }
571      else if ((flags & BLOCK_FIELD_IS_BYREF) == BLOCK_FIELD_IS_BYREF)  {
572          // copying a __block reference from the stack Block to the heap
573          // flags will indicate if it holds a __weak reference and needs a special isa
574          _Block_byref_assign_copy(destAddr, object, flags);
575      }
576      // (this test must be before next one)
577      else if ((flags & BLOCK_FIELD_IS_BLOCK) == BLOCK_FIELD_IS_BLOCK) {
578          // copying a Block declared variable from the stack Block to the heap
579          _Block_assign(_Block_copy_internal(object, flags), destAddr);
580      }
581      // (this test must be after previous one)
582      else if ((flags & BLOCK_FIELD_IS_OBJECT) == BLOCK_FIELD_IS_OBJECT) {
583          //printf("retaining object at %p\n", object);
584          _Block_retain_object(object);
585          //printf("done retaining object at %p\n", object);
586          _Block_assign((void *)object, destAddr);
587      }
588  }
589  
590  // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
591  // to help dispose of the contents
592  // Used initially only for __attribute__((NSObject)) marked pointers.
593  void _Block_object_dispose(const void *object, const int flags) {
594      //printf("_Block_object_dispose(%p, %x)\n", object, flags);
595      if (flags & BLOCK_FIELD_IS_BYREF)  {
596          // get rid of the __block data structure held in a Block
597          _Block_byref_release(object);
598      }
599      else if ((flags & (BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_BLOCK) {
600          // get rid of a referenced Block held by this Block
601          // (ignore __block Block variables, compiler doesn't need to call us)
602          _Block_destroy(object);
603      }
604      else if ((flags & (BLOCK_FIELD_IS_WEAK|BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_OBJECT) {
605          // get rid of a referenced object held by this Block
606          // (ignore __block object variables, compiler doesn't need to call us)
607          _Block_release_object(object);
608      }
609  }
610  
611  
612  /*
613   * Debugging support:
614   */
615  #if 0
616  #pragma mark Debugging
617  #endif /* if 0 */
618  
619  
620  const char *_Block_dump(const void *block) {
621      struct Block_layout *closure = (struct Block_layout *)block;
622      static char buffer[512];
623      char *cp = buffer;
624      if (closure == NULL) {
625          sprintf(cp, "NULL passed to _Block_dump\n");
626          return buffer;
627      }
628      if (! (closure->flags & BLOCK_HAS_DESCRIPTOR)) {
629          printf("Block compiled by obsolete compiler, please recompile source for this Block\n");
630          exit(1);
631      }
632      cp += sprintf(cp, "^%p (new layout) =\n", (void *)closure);
633      if (closure->isa == NULL) {
634          cp += sprintf(cp, "isa: NULL\n");
635      }
636      else if (closure->isa == _NSConcreteStackBlock) {
637          cp += sprintf(cp, "isa: stack Block\n");
638      }
639      else if (closure->isa == _NSConcreteMallocBlock) {
640          cp += sprintf(cp, "isa: malloc heap Block\n");
641      }
642      else if (closure->isa == _NSConcreteAutoBlock) {
643          cp += sprintf(cp, "isa: GC heap Block\n");
644      }
645      else if (closure->isa == _NSConcreteGlobalBlock) {
646          cp += sprintf(cp, "isa: global Block\n");
647      }
648      else if (closure->isa == _NSConcreteFinalizingBlock) {
649          cp += sprintf(cp, "isa: finalizing Block\n");
650      }
651      else {
652          cp += sprintf(cp, "isa?: %p\n", (void *)closure->isa);
653      }
654      cp += sprintf(cp, "flags:");
655      if (closure->flags & BLOCK_HAS_DESCRIPTOR) {
656          cp += sprintf(cp, " HASDESCRIPTOR");
657      }
658      if (closure->flags & BLOCK_NEEDS_FREE) {
659          cp += sprintf(cp, " FREEME");
660      }
661      if (closure->flags & BLOCK_IS_GC) {
662          cp += sprintf(cp, " ISGC");
663      }
664      if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
665          cp += sprintf(cp, " HASHELP");
666      }
667      if (closure->flags & BLOCK_HAS_CTOR) {
668          cp += sprintf(cp, " HASCTOR");
669      }
670      cp += sprintf(cp, "\nrefcount: %u\n", closure->flags & BLOCK_REFCOUNT_MASK);
671      cp += sprintf(cp, "invoke: %p\n", (void *)(uintptr_t)closure->invoke);
672      {
673          struct Block_descriptor *dp = closure->descriptor;
674          cp += sprintf(cp, "descriptor: %p\n", (void *)dp);
675          cp += sprintf(cp, "descriptor->reserved: %lu\n", dp->reserved);
676          cp += sprintf(cp, "descriptor->size: %lu\n", dp->size);
677  
678          if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
679              cp += sprintf(cp, "descriptor->copy helper: %p\n", (void *)(uintptr_t)dp->copy);
680              cp += sprintf(cp, "descriptor->dispose helper: %p\n", (void *)(uintptr_t)dp->dispose);
681          }
682      }
683      return buffer;
684  }
685  
686  
687  const char *_Block_byref_dump(struct Block_byref *src) {
688      static char buffer[256];
689      char *cp = buffer;
690      cp += sprintf(cp, "byref data block %p contents:\n", (void *)src);
691      cp += sprintf(cp, "  forwarding: %p\n", (void *)src->forwarding);
692      cp += sprintf(cp, "  flags: 0x%x\n", src->flags);
693      cp += sprintf(cp, "  size: %d\n", src->size);
694      if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
695          cp += sprintf(cp, "  copy helper: %p\n", (void *)(uintptr_t)src->byref_keep);
696          cp += sprintf(cp, "  dispose helper: %p\n", (void *)(uintptr_t)src->byref_destroy);
697      }
698      return buffer;
699  }
700