@@ -409,112 +409,27 @@ void CAtomicsThreadFence(enum MemoryOrder order)
409409 atomic_thread_fence (order );
410410}
411411
412- // define struct + functions for nearly-atomic handling of Swift.Unmanaged
413-
414- #define __OPAQUE_UNMANAGED_LOCKED (uintptr_t)0x7
415- #define __OPAQUE_UNMANAGED_SPINMASK (char)0xc0
412+ // define struct + functions for handling of Swift.Unmanaged
416413
417414CLANG_ATOMICS_STRUCT (OpaqueUnmanagedHelper , atomic_uintptr_t , a , _Alignof(atomic_uintptr_t ))
418415CLANG_ATOMICS_IS_LOCK_FREE (OpaqueUnmanagedHelper )
419416CLANG_ATOMICS_POINTER_INITIALIZE (OpaqueUnmanagedHelper , const void * , _Nullable )
420-
421- // this should only be used for unlocking
422- CLANG_ATOMICS_POINTER_STORE (OpaqueUnmanagedHelper , const void * , _Nullable )
417+ CLANG_ATOMICS_POINTER_SWAP (OpaqueUnmanagedHelper , const void * , _Nullable )
423418
424419// this should only be used for debugging and testing
425420CLANG_ATOMICS_POINTER_LOAD (OpaqueUnmanagedHelper , const void * , _Nullable )
426421
427- static __inline__ __attribute__((__always_inline__ )) \
428- const void * _Nullable CAtomicsUnmanagedLockAndLoad (OpaqueUnmanagedHelper * _Nonnull atomic )
429- { // load the pointer value, and leave the pointer either LOCKED or NULL; spin for the lock if necessary
430- #ifndef __SSE2__
431- char c ;
432- c = 0 ;
433- #endif
434- uintptr_t pointer ;
435- pointer = atomic_load_explicit (& (atomic -> a ), __ATOMIC_ACQUIRE );
436- do { // don't fruitlessly invalidate the cache line if the value is locked
437- while (pointer == __OPAQUE_UNMANAGED_LOCKED )
438- {
439- #ifdef __SSE2__
440- _mm_pause ();
441- #else
442- c += 1 ;
443- if ((c & __OPAQUE_UNMANAGED_SPINMASK ) != 0 ) { sched_yield (); }
444- #endif
445- pointer = atomic_load_explicit (& (atomic -> a ), __ATOMIC_ACQUIRE );
446- }
447- // return immediately if pointer is NULL (importantly: without locking)
448- if (pointer == (uintptr_t ) NULL ) { return NULL ; }
449- } while (!atomic_compare_exchange_weak_explicit (& (atomic -> a ), & pointer , __OPAQUE_UNMANAGED_LOCKED , __ATOMIC_ACQ_REL , __ATOMIC_ACQUIRE ));
450-
451- return (void * ) pointer ;
452- }
453-
454- static __inline__ __attribute__((__always_inline__ )) \
455- __attribute__((overloadable )) \
456- const void * _Nullable CAtomicsExchange (OpaqueUnmanagedHelper * _Nonnull atomic ,
457- const void * _Nullable value , enum MemoryOrder order )
458- { // swap the pointer with `value`, spinning until the lock becomes unlocked if necessary
459- #ifndef __SSE2__
460- char c ;
461- c = 0 ;
462- #endif
463- uintptr_t pointer ;
464- pointer = atomic_load_explicit (& (atomic -> a ), order );
465- do { // don't fruitlessly invalidate the cache line if the value is locked
466- while (pointer == __OPAQUE_UNMANAGED_LOCKED )
467- {
468- #ifdef __SSE2__
469- _mm_pause ();
470- #else
471- c += 1 ;
472- if ((c & __OPAQUE_UNMANAGED_SPINMASK ) != 0 ) { sched_yield (); }
473- #endif
474- pointer = atomic_load_explicit (& (atomic -> a ), order );
475- }
476- } while (!atomic_compare_exchange_weak_explicit (& (atomic -> a ), & pointer , (uintptr_t )value , order , order ));
477-
478- return (void * ) pointer ;
479- }
480-
481422static __inline__ __attribute__((__always_inline__ )) \
482423__attribute__((overloadable )) \
483424_Bool CAtomicsCompareAndExchange (OpaqueUnmanagedHelper * _Nonnull atomic ,
484425 const void * _Nullable current , const void * _Nullable future ,
485426 enum CASType type , enum MemoryOrder order )
486427{
428+ uintptr_t pointer = (uintptr_t ) current ;
487429 if (type == __ATOMIC_CAS_TYPE_WEAK )
488- {
489- uintptr_t pointer = (uintptr_t ) current ;
490430 return atomic_compare_exchange_weak_explicit (& (atomic -> a ), & pointer , (uintptr_t )future , order , memory_order_relaxed );
491- }
492431 else
493- { // we should consider that __OPAQUE_UNMANAGED_LOCKED is a spurious value
494- #ifndef __SSE2__
495- char c ;
496- c = 0 ;
497- #endif
498- _Bool success ;
499- while (true)
500- {
501- uintptr_t pointer = (uintptr_t ) current ;
502- success = atomic_compare_exchange_strong_explicit (& (atomic -> a ), & pointer , (uintptr_t )future , order , memory_order_relaxed );
503- if (pointer != __OPAQUE_UNMANAGED_LOCKED ) { break ; }
504-
505- while (pointer == __OPAQUE_UNMANAGED_LOCKED )
506- { // don't fruitlessly invalidate the cache line if the value is locked
507- #ifdef __SSE2__
508- _mm_pause ();
509- #else
510- c += 1 ;
511- if ((c & __OPAQUE_UNMANAGED_SPINMASK ) != 0 ) { sched_yield (); }
512- #endif
513- pointer = atomic_load_explicit (& (atomic -> a ), __ATOMIC_RELAXED );
514- }
515- }
516- return success ;
517- }
432+ return atomic_compare_exchange_strong_explicit (& (atomic -> a ), & pointer , (uintptr_t )future , order , memory_order_relaxed );
518433}
519434
520435#endif
0 commit comments