explicit atomics

This commit is contained in:
flysand7 2023-09-10 17:53:22 +11:00
parent 7c65447a9a
commit dc2ec9eb13
7 changed files with 35 additions and 27 deletions

View File

@ -27,8 +27,8 @@ struct _LD_Thread_Block typedef _LD_Thread_Block;
struct _LD_Thread_Block {
/* +0x00 */ u64 thread_id;
/* +0x08 */ u64 parent_id;
/* +0x10 */ u32 thread_behaviour; /* One of the CIA_THREAD_BEHAVIOR_* constants */
/* +0x14 */ u32 thread_finished;
/* +0x10 */ _Atomic(u32) thread_behaviour; /* One of the CIA_THREAD_BEHAVIOR_* constants */
/* +0x14 */ _Atomic(i32) thread_finished;
/* +0x18 */ u32 pad0;
/* +0x1c */ i32 exit_code;
/* +0x20 */ u64 pad1;

View File

@ -9,7 +9,7 @@ int cia_wake_all(u32 *addr, u32 *n_woken);
struct Cia_Mutex typedef Cia_Mutex;
struct Cia_Mutex {
u32 tag;
_Atomic(u32) tag;
};
void cia_mutex_init(Cia_Mutex *mutex);

View File

@ -88,6 +88,6 @@ static _RT_Status _rt_mem_alloc(void *optional_desired_addr, u64 size, void **ou
static _RT_Status _rt_mem_free(void *ptr, u64 size);
// Synchronization API
static _RT_Status _rt_sync_wait(u32 *addr, u32 compare_with, u64 time);
static _RT_Status _rt_sync_wake_one(u32 *addr, u32 *n_woken);
static _RT_Status _rt_sync_wake_all(u32 *addr, u32 *n_woken);
static _RT_Status _rt_sync_wait(void *addr, u32 compare_with, u64 time);
static _RT_Status _rt_sync_wake_one(void *addr, u32 *n_woken);
static _RT_Status _rt_sync_wake_all(void *addr, u32 *n_woken);

View File

@ -5,6 +5,7 @@ static u64 cia_stack_size;
static u64 cia_tls_image_size;
static void *cia_tls_image_base;
#include <stdatomic.h>
#include <cia-ld/tcb.h>
#include <linux/futex.h>

View File

@ -33,7 +33,7 @@ static _RT_Status _rt_thread_create(_RT_Thread *thread, int (*thread_fn)(void *c
// Initialize the _RT_Thread handle, which would point to
// the TCB
thread->handle = tcb;
tcb->thread_finished = 0;
atomic_store_explicit(&tcb->thread_finished, 0, memory_order_relaxed);
// Create the new thread
u64 flags = 0;
// flags |= CLONE_CHILD_CLEARTID;
@ -58,20 +58,19 @@ static _RT_Status _rt_thread_create(_RT_Thread *thread, int (*thread_fn)(void *c
void _rt_thread_finish(int exit_code) {
_LD_Thread_Block *tcb = (void *)((u64)__builtin_frame_address(0) & ~(cia_stack_size - 1));
// Wait until the main thread decides what to do with the child thread
while(tcb->thread_behaviour == _LD_THREAD_BEHAVIOUR_NOT_SET) {
u32 thread_behaviour = atomic_load_explicit(&tcb->thread_behaviour, memory_order_relaxed);
while(thread_behaviour == _LD_THREAD_BEHAVIOUR_NOT_SET) {
syscall(SYS_futex, &tcb->thread_behaviour, FUTEX_WAIT, _LD_THREAD_BEHAVIOUR_NOT_SET, NULL, 0, 0);
thread_behaviour = atomic_load_explicit(&tcb->thread_behaviour, memory_order_relaxed);
}
if(tcb->thread_behaviour == _LD_THREAD_BEHAVIOUR_JOIN) {
// If main thread set this thread to be joined, we signal it that we're done here
if(thread_behaviour == _LD_THREAD_BEHAVIOUR_JOIN) {
tcb->exit_code = exit_code;
// Idk if a memory barrier should be here, because we don't want the compiler
// to reorder these two lines. If that happens, and we get a spurious wake up
// after the thread_finished is set and before exit_code is set, the main thread
// will proceed to fetch an invalid exit code from the thread.
tcb->thread_finished = 1;
atomic_store_explicit(&tcb->thread_finished, 1, memory_order_release);
syscall(SYS_futex, &tcb->thread_finished, FUTEX_WAKE, 0, NULL, 0, 0);
sys_exit(exit_code);
}
else if(tcb->thread_behaviour == _LD_THREAD_BEHAVIOUR_DETACH) {
else if(thread_behaviour == _LD_THREAD_BEHAVIOUR_DETACH) {
// TODO: clean up the thread resources
sys_exit(exit_code);
}
@ -80,10 +79,10 @@ void _rt_thread_finish(int exit_code) {
static _RT_Status _rt_thread_join(_RT_Thread *thread, int *out_exit_code) {
_LD_Thread_Block *tcb = thread->handle;
// Signal the thread that we want it to be joined
tcb->thread_behaviour = _LD_THREAD_BEHAVIOUR_JOIN;
atomic_store_explicit(&tcb->thread_behaviour, _LD_THREAD_BEHAVIOUR_JOIN, memory_order_relaxed);
syscall(SYS_futex, &tcb->thread_behaviour, FUTEX_WAKE, 0, NULL, 0, 0);
// Wait until the thread signals that it has completed the execution
while(tcb->thread_finished != 1) {
while(atomic_load_explicit(&tcb->thread_finished, memory_order_acquire) != 1) {
syscall(SYS_futex, &tcb->thread_finished, FUTEX_WAIT, 0, NULL, 0, 0);
}
// Set the exit code
@ -93,7 +92,7 @@ static _RT_Status _rt_thread_join(_RT_Thread *thread, int *out_exit_code) {
static _RT_Status _rt_thread_detach(_RT_Thread *thread) {
_LD_Thread_Block *tcb = thread->handle;
tcb->thread_behaviour = _LD_THREAD_BEHAVIOUR_DETACH;
atomic_store_explicit(&tcb->thread_behaviour, _LD_THREAD_BEHAVIOUR_DETACH, memory_order_relaxed);
return _RT_STATUS_OK;
}

View File

@ -86,7 +86,7 @@ static _RT_Status _rt_mem_free(void *ptr, u64 len) {
return _RT_STATUS_OK;
}
static _RT_Status _rt_sync_wait(u32 *addr, u32 compare_with, u64 sleep) {
static _RT_Status _rt_sync_wait(void *addr, u32 compare_with, u64 sleep) {
i64 result = syscall(SYS_futex, addr, FUTEX_WAIT_PRIVATE, compare_with, NULL, 0, 0);
if(-result == EAGAIN) {
return _RT_STATUS_OK;
@ -97,7 +97,7 @@ static _RT_Status _rt_sync_wait(u32 *addr, u32 compare_with, u64 sleep) {
return _RT_STATUS_OK;
}
static _RT_Status _rt_sync_wake_one(u32 *addr, u32 *n_woken) {
static _RT_Status _rt_sync_wake_one(void *addr, u32 *n_woken) {
i64 result = syscall(SYS_futex, addr, FUTEX_WAKE_PRIVATE, 1, NULL, 0, 0);
if(result < 0) {
return _RT_ERROR_GENERIC;
@ -108,7 +108,7 @@ static _RT_Status _rt_sync_wake_one(u32 *addr, u32 *n_woken) {
return _RT_STATUS_OK;
}
static _RT_Status _rt_sync_wake_all(u32 *addr, u32 *n_woken) {
static _RT_Status _rt_sync_wake_all(void *addr, u32 *n_woken) {
i64 result = syscall(SYS_futex, addr, FUTEX_WAKE_PRIVATE, 0x7fffffff, NULL, 0, 0);
if(result < 0) {
return _RT_ERROR_GENERIC;

View File

@ -4,15 +4,23 @@
#define _CIA_MUTEX_CONT 2
void cia_mutex_init(Cia_Mutex *mutex) {
mutex->tag = _CIA_MUTEX_FREE;
atomic_store_explicit(&mutex->tag, _CIA_MUTEX_FREE, memory_order_relaxed);
}
void cia_mutex_lock(Cia_Mutex *mutex) {
u64 prev_tag;
u32 p_tag;
for(;;) {
prev_tag = __sync_val_compare_and_swap(&mutex->tag, _CIA_MUTEX_FREE, _CIA_MUTEX_LOCK);
//p_tag = __sync_val_compare_and_swap(&mutex->tag, _CIA_MUTEX_FREE, _CIA_MUTEX_LOCK);
p_tag = _CIA_MUTEX_FREE;
atomic_compare_exchange_strong_explicit(
&mutex->tag
, &p_tag
, _CIA_MUTEX_LOCK
, memory_order_relaxed
, memory_order_relaxed
);
// We got the mutex, lets bail
if(prev_tag == _CIA_MUTEX_FREE) {
if(p_tag == _CIA_MUTEX_FREE) {
break;
}
#if 0
@ -20,7 +28,7 @@ void cia_mutex_lock(Cia_Mutex *mutex) {
// (1) the mutex is contested
// (2)
bool should_wait = 0;
should_wait |= (prev_tag == _CIA_MUTEX_CONT);
should_wait |= (p_tag == _CIA_MUTEX_CONT);
should_wait |= (__sync_val_compare_and_swap(&mutex->tag, _CIA_MUTEX_LOCK, _CIA_MUTEX_CONT) != _CIA_MUTEX_FREE);
// We wait while its contested
if(should_wait) {
@ -35,6 +43,6 @@ void cia_mutex_lock(Cia_Mutex *mutex) {
void cia_mutex_unlock(Cia_Mutex *mutex) {
// TODO: add error when we unlock a free mutex
// TODO: support recursive muteces
mutex->tag = _CIA_MUTEX_FREE;
atomic_store_explicit(&mutex->tag, _CIA_MUTEX_FREE, memory_order_relaxed);
_rt_sync_wake_one(&mutex->tag, NULL);
}