@@ -83,8 +83,8 @@ virtual void virtfunc(void) {
8383 return arg;
8484}
8585
86- static volatile int32_t global;
87- static volatile int64_t w_global;
86+ static atomic_int global;
87+ static _Atomic ( int64_t ) w_global;
8888
8989#if TARGET_OS_EMBEDDED
9090static const size_t cnt = 5000000 ;
@@ -191,7 +191,7 @@ static void __attribute__((noinline))
191191main (void )
192192{
193193 pthread_mutex_t plock = PTHREAD_MUTEX_INITIALIZER;
194- OSSpinLock slock = OS_SPINLOCK_INIT ;
194+ os_unfair_lock slock = OS_UNFAIR_LOCK_INIT ;
195195 BasicObject *bo;
196196 BasicClass *bc;
197197 pthread_t pthr_pause;
@@ -219,8 +219,7 @@ static void __attribute__((noinline))
219219 cycles_per_nanosecond = (long double )freq / (long double )NSEC_PER_SEC;
220220
221221#if BENCH_SLOW
222- NSAutoreleasePool *pool = [[NSAutoreleasePool alloc ] init ];
223- assert (pool);
222+ @autoreleasepool {
224223#endif
225224
226225 /* Malloc has different logic for threaded apps. */
@@ -371,9 +370,7 @@ static void __attribute__((noinline))
371370 }
372371 print_result2 (s, " \" description\" ObjC call:" );
373372
374- [pool release ];
375-
376- pool = NULL ;
373+ } // For the autorelease pool
377374#endif
378375
379376 s = mach_absolute_time ();
@@ -554,30 +551,30 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
554551
555552 s = mach_absolute_time ();
556553 for (i = cnt; i; i--) {
557- __sync_lock_test_and_set (&global, 0 );
554+ atomic_xchg (&global, 0 );
558555 }
559556 print_result (s, " Atomic xchg:" );
560557
561558 s = mach_absolute_time ();
562559 for (i = cnt; i; i--) {
563- __sync_val_compare_and_swap (&global, 1 , 0 );
560+ atomic_cmpxchg (&global, 1 , 0 );
564561 }
565562 print_result (s, " Atomic cmpxchg:" );
566563
567564 s = mach_absolute_time ();
568565 for (i = cnt; i; i--) {
569- __sync_fetch_and_add (&global, 1 );
566+ atomic_fetch_add (&global, 1 );
570567 }
571568 print_result (s, " Atomic increment:" );
572569
573570 {
574- global = 0 ;
575- volatile int32_t *g = &global;
571+ global = ATOMIC_VAR_INIT ( 0 ) ;
572+ atomic_int *g = &global;
576573
577574 s = mach_absolute_time ();
578575 for (i = cnt; i; i--) {
579576 uint32_t result;
580- __sync_and_and_fetch (g, 1 );
577+ atomic_fetch_and (g, 1 );
581578 result = *g;
582579 if (result) {
583580 abort ();
@@ -587,57 +584,58 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
587584 }
588585
589586 {
590- global = 0 ;
591- volatile int32_t *g = &global;
587+ global = ATOMIC_VAR_INIT ( 0 ) ;
588+ atomic_int *g = &global;
592589
593590 s = mach_absolute_time ();
594591 for (i = cnt; i; i--) {
595592 uint32_t result;
596- result = __sync_and_and_fetch (g, 1 );
593+ result = atomic_fetch_and (g, 1 );
597594 if (result) {
598595 abort ();
599596 }
600597 }
601598 print_result (s, " Atomic and-and-fetch, using result:" );
602599 }
603600
604- global = 0 ;
601+ global = ATOMIC_VAR_INIT ( 0 ) ;
605602
606603 s = mach_absolute_time ();
607604 for (i = cnt; i; i--) {
608- OSAtomicIncrement32Barrier (&global);
605+ __c11_atomic_fetch_add (&global, 1 , memory_order_seq_cst );
609606 }
610- print_result (s, " OSAtomicIncrement32Barrier :" );
607+ print_result (s, " atomic_fetch_add with memory_order_seq_cst barrier :" );
611608
612- global = 0 ;
609+ global = ATOMIC_VAR_INIT ( 0 ) ;
613610
614611 s = mach_absolute_time ();
615612 for (i = cnt; i; i--) {
616- OSAtomicIncrement32 (&global);
613+ __c11_atomic_fetch_add (&global, 1 , memory_order_relaxed );
617614 }
618- print_result (s, " OSAtomicIncrement32 :" );
615+ print_result (s, " atomic_fetch_add with memory_order_relaxed barrier :" );
619616
620- w_global = 0 ;
617+ w_global = ATOMIC_VAR_INIT ( 0 ) ;
621618
622619 s = mach_absolute_time ();
623620 for (i = cnt; i; i--) {
624- OSAtomicIncrement64Barrier (&w_global );
621+ __c11_atomic_fetch_add (&wglobal, 1 , memory_order_seq_cst );
625622 }
626- print_result (s, " OSAtomicIncrement64Barrier :" );
623+ print_result (s, " 64-bit atomic_fetch_add with memory_order_seq_cst barrier :" );
627624
628- w_global = 0 ;
625+ w_global = ATOMIC_VAR_INIT ( 0 ) ;
629626
630627 s = mach_absolute_time ();
631628 for (i = cnt; i; i--) {
632- OSAtomicIncrement64 (&w_global );
629+ __c11_atomic_fetch_add (&wglobal, 1 , memory_order_relaxed );
633630 }
634- print_result (s, " OSAtomicIncrement64 :" );
631+ print_result (s, " 64-bit atomic_fetch_add with memory_order_seq_cst barrier :" );
635632
636- global = 0 ;
633+ global = ATOMIC_VAR_INIT ( 0 ) ;
637634
638635 s = mach_absolute_time ();
639636 for (i = cnt; i; i--) {
640- while (!__sync_bool_compare_and_swap (&global, 0 , 1 )) {
637+ atomic_int zero = ATOMIC_VAR_INIT (0 );
638+ while (!atomic_compare_exchange_weak (&global, &zero, 1 )) {
641639 do {
642640#if defined(__i386__) || defined(__x86_64__)
643641 __asm__ __volatile__ (" pause" );
@@ -646,16 +644,16 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
646644#endif
647645 } while (global);
648646 }
649- global = 0 ;
647+ global = ATOMIC_VAR_INIT ( 0 ) ;
650648 }
651649 print_result (s, " Inlined spin lock/unlock:" );
652650
653651 s = mach_absolute_time ();
654652 for (i = cnt; i; i--) {
655- OSSpinLockLock (&slock);
656- OSSpinLockUnlock (&slock);
653+ os_unfair_lock_lock (&slock);
654+ os_unfair_lock_unlock (&slock);
657655 }
658- print_result (s, " OSSpinLock/Unlock :" );
656+ print_result (s, " os_unfair_lock_lock/unlock :" );
659657
660658 s = mach_absolute_time ();
661659 for (i = cnt; i; i--) {
0 commit comments