{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
- AssertPointerAlignment(expected, 8);
#endif
return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
}
* Full barrier semantics (even when value is unchanged).
*/
static inline uint64
-pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target_)
+pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target)
{
uint64 currval;
#endif
currval = pg_atomic_read_u64_impl(ptr);
- if (currval >= target_)
+ if (currval >= target)
{
pg_memory_barrier();
return currval;
}
-#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
- AssertPointerAlignment(&currval, 8);
-#endif
-
- while (currval < target_)
+ while (currval < target)
{
- if (pg_atomic_compare_exchange_u64_impl(ptr, &currval, target_))
- break;
+ if (pg_atomic_compare_exchange_u64(ptr, &currval, target))
+ return target;
}
- return Max(target_, currval);
+ return currval;
}
#undef INSIDE_ATOMICS_H
uint32 condition_register;
bool ret;
+ AssertPointerAlignment(expected, 8);
+
/* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/; s/cmpw/cmpd/ */
#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
if (__builtin_constant_p(*expected) &&
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
+ AssertPointerAlignment(expected, 8);
return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
{
bool ret;
uint64 current;
+
+ AssertPointerAlignment(expected, 8);
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected;
*expected = current;