-
Interlockedxxx and Interlockedxxx64 series apis of Windows -
The OSAtomicXXX series api of macosx -
Gcc __sync_val_compare_and_swap and __sync_val_compare_and_swap_8 __builtin interface -
X86 and x86_64 architecture lock Assembly instruction -
Cross platform atomic interface of tbox
Use of tbox interface
//Equivalent to atom: b=* a++; tb_atomic_t a = 0; tb_long_t b = tb_atomic_fetch_and_add(&a, 1);
//Equivalent to atom: b=++* a; tb_atomic_t a = 0; tb_long_t b = tb_atomic_add_and_fetch(&a, 1);
tb_long_t b = tb_atomic_fetch_and_inc(&a); tb_long_t b = tb_atomic_inc_and_fetch(&a);
Windows interface encapsulation
static __tb_inline__ tb_long_t tb_atomic_fetch_and_add_windows(tb_atomic_t* a, tb_long_t v) { return (tb_long_t)InterlockedExchangeAdd((LONG __tb_volatile__*)a, v); } static __tb_inline__ tb_long_t tb_atomic_inc_and_fetch_windows(tb_atomic_t* a) { return (tb_long_t)InterlockedIncrement((LONG __tb_volatile__*)a); }
Encapsulation of gcc interface
static __tb_inline__ tb_long_t tb_atomic_fetch_and_add_sync(tb_atomic_t* a, tb_long_t v) { return __sync_fetch_and_add(a, v); }
X86 and x86_64 architecture assembly implementation
static __tb_inline__ tb_long_t tb_atomic_fetch_and_add_x86(tb_atomic_t* a, tb_long_t v) { /* * xaddl v, [a]: * * o = [a] * [a] += v; * v = o; * * cf, ef, of, sf, zf, pf... maybe changed */ __tb_asm__ __tb_volatile__ ( #if TB_CPU_BITSIZE == 64 "lock xaddq %0, %1 \n" //!< xaddq v, [a] #else "lock xaddl %0, %1 \n" //!< xaddl v, [a] #endif : "+r" (v) : "m" (*a) : "cc", "memory" ); return v; }
-
Used to realize spin lock -
Used to implement lockless queues -
Status synchronization between threads -
Used to implement a single example
Implementation of spin lock
static __tb_inline_force__ tb_bool_t tb_spinlock_init(tb_spinlock_ref_t lock) { // init *lock = 0; // ok return tb_true; } static __tb_inline_force__ tb_void_t tb_spinlock_exit(tb_spinlock_ref_t lock) { // exit *lock = 0; } static __tb_inline_force__ tb_void_t tb_spinlock_enter(tb_spinlock_ref_t lock) { /*Try to read the status value of the lock. If the lock (status 0) has not been obtained, obtain it (set to 1) *If the other thread has obtained the lock (status 1), it will wait circularly to try to obtain it again * *Note: The whole state reading and setting are atomic and cannot be interrupted */ tb_size_t tryn = 5; while (tb_atomic_fetch_and_pset((tb_atomic_t*)lock, 0, 1)) { //If the lock is not obtained, after five attempts, it is not successful, then let the CPU switch to other threads to run, and then try to obtain the lock again if (! tryn--) { // yield tb_sched_yield(); // reset tryn tryn = 5; } } } static __tb_inline_force__ tb_void_t tb_spinlock_leave(tb_spinlock_ref_t lock) { //Release the lock. No atom is needed here. Set it to half broken. The value position is 0. The other thread is still waiting. No effect is received *((tb_atomic_t*)lock) = 0; }
//Get lock tb_spinlock_enter(&lock); //Some synchronization operations // .. //Release lock tb_spinlock_leave(&lock);
class pthread_once
Implementation of
//Initialization function can only be called once static tb_void_t tb_once_func(tb_cpointer_t priv) { //Initialize some singleton objects and global variables //Or execute some initialization calls } //Thread function static tb_int_t tb_thread_func(tb_cpointer_t priv) { //Global storage lock, initialized to 0 static tb_atomic_t lock = 0; if (tb_thread_once(&lock, tb_once_func, "user data")) { // ok } }
tb_bool_t tb_thread_once(tb_atomic_t* lock, tb_bool_t (*func)(tb_cpointer_t), tb_cpointer_t priv) { // check tb_check_return_val(lock && func, tb_false); /*The atom obtains the lock status * *0: func has not been called *1: Lock has been obtained, func is being called by other threads *2: func has been called, and func returns OK *- 2: func has been called, and func returns failed */ tb_atomic_t called = tb_atomic_fetch_and_pset(lock, 0, 1); //Func has been called by other threads? Direct return if (called && called != 1) { return called == 2; } //Func has not been called yet? Then call it else if (! called) { //Call function tb_bool_t ok = func(priv); //Set Return Status tb_atomic_set(lock, ok? 2 : -1); // ok? return ok; } //The lock is being obtained by other threads, and func is being called, which has not been completed yet? Try to wait for lock else { //Here, I simply do some sleep loop waiting until the other thread finishes the func execution tb_size_t tryn = 50; while ((1 == tb_atomic_get(lock)) && tryn--) { // wait some time tb_msleep(100); } } /*Retrieve the lock status to determine whether it is successful * *Success: 2 *Timeout: 1 *Failed: - 2 * *If it is not 2 here, it is a failure */ return tb_atomic_get(lock) == 2; }
64 bit atomic operation
-
Type in tbox is tb_atomic64_t , the interface is changed to tb_atomic64_xxxx -
The type in gcc is volatile long long , the interface is changed to __sync_xxxx_8 series -
Interlockedxxx64 on windows