[event system] allocate events and additional payload (e.g. file paths) directly in ring buffer, and copy them to user-supplied arena in mp_next_event()
This commit is contained in:
parent
ab150f94f2
commit
bb6b68ad73
|
@ -116,51 +116,51 @@ void mp_input_next_frame(mp_input_state* state)
|
|||
state->frameCounter++;
|
||||
}
|
||||
|
||||
void mp_input_process_event(mp_input_state* state, mp_event event)
|
||||
void mp_input_process_event(mp_input_state* state, mp_event* event)
|
||||
{
|
||||
switch(event.type)
|
||||
switch(event->type)
|
||||
{
|
||||
case MP_EVENT_KEYBOARD_KEY:
|
||||
{
|
||||
mp_key_state* key = &state->keyboard.keys[event.key.code];
|
||||
mp_update_key_state(state, key, event.key.action);
|
||||
mp_update_key_mods(state, event.key.mods);
|
||||
mp_key_state* key = &state->keyboard.keys[event->key.code];
|
||||
mp_update_key_state(state, key, event->key.action);
|
||||
mp_update_key_mods(state, event->key.mods);
|
||||
} break;
|
||||
|
||||
case MP_EVENT_KEYBOARD_CHAR:
|
||||
mp_update_text(state, event.character.codepoint);
|
||||
mp_update_text(state, event->character.codepoint);
|
||||
break;
|
||||
|
||||
case MP_EVENT_KEYBOARD_MODS:
|
||||
mp_update_key_mods(state, event.key.mods);
|
||||
mp_update_key_mods(state, event->key.mods);
|
||||
break;
|
||||
|
||||
case MP_EVENT_MOUSE_MOVE:
|
||||
mp_update_mouse_move(state, event.move.x, event.move.y, event.move.deltaX, event.move.deltaY);
|
||||
mp_update_mouse_move(state, event->move.x, event->move.y, event->move.deltaX, event->move.deltaY);
|
||||
break;
|
||||
|
||||
case MP_EVENT_MOUSE_WHEEL:
|
||||
mp_update_mouse_wheel(state, event.move.deltaX, event.move.deltaY);
|
||||
mp_update_mouse_wheel(state, event->move.deltaX, event->move.deltaY);
|
||||
break;
|
||||
|
||||
case MP_EVENT_MOUSE_BUTTON:
|
||||
{
|
||||
mp_key_state* key = &state->mouse.buttons[event.key.code];
|
||||
mp_update_key_state(state, key, event.key.action);
|
||||
mp_key_state* key = &state->mouse.buttons[event->key.code];
|
||||
mp_update_key_state(state, key, event->key.action);
|
||||
|
||||
if(event.key.action == MP_KEY_PRESS)
|
||||
if(event->key.action == MP_KEY_PRESS)
|
||||
{
|
||||
if(event.key.clickCount >= 1)
|
||||
if(event->key.clickCount >= 1)
|
||||
{
|
||||
key->sysClicked = true;
|
||||
}
|
||||
if(event.key.clickCount >= 2)
|
||||
if(event->key.clickCount >= 2)
|
||||
{
|
||||
key->sysDoubleClicked = true;
|
||||
}
|
||||
}
|
||||
|
||||
mp_update_key_mods(state, event.key.mods);
|
||||
mp_update_key_mods(state, event->key.mods);
|
||||
} break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -70,7 +70,7 @@ typedef struct mp_input_state
|
|||
mp_text_state text;
|
||||
} mp_input_state;
|
||||
|
||||
MP_API void mp_input_process_event(mp_input_state* state, mp_event event);
|
||||
MP_API void mp_input_process_event(mp_input_state* state, mp_event* event);
|
||||
MP_API void mp_input_next_frame(mp_input_state* state);
|
||||
|
||||
MP_API bool mp_key_down(mp_input_state* state, mp_key_code key);
|
||||
|
|
83
src/mp_app.c
83
src/mp_app.c
|
@ -97,28 +97,85 @@ static void mp_terminate_common()
|
|||
|
||||
void mp_queue_event(mp_event* event)
|
||||
{
|
||||
if(ringbuffer_write_available(&__mpApp.eventQueue) < sizeof(mp_event))
|
||||
ringbuffer* queue = &__mpApp.eventQueue;
|
||||
|
||||
if(ringbuffer_write_available(queue) < sizeof(mp_event))
|
||||
{
|
||||
log_error("event queue full\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
u32 written = ringbuffer_write(&__mpApp.eventQueue, sizeof(mp_event), (u8*)event);
|
||||
DEBUG_ASSERT(written == sizeof(mp_event));
|
||||
}
|
||||
}
|
||||
bool error = false;
|
||||
ringbuffer_reserve(queue, sizeof(mp_event), (u8*)event);
|
||||
|
||||
bool mp_next_event(mp_event* event)
|
||||
{
|
||||
//NOTE pop and return event from queue
|
||||
if(ringbuffer_read_available(&__mpApp.eventQueue) >= sizeof(mp_event))
|
||||
if(event->type == MP_EVENT_PATHDROP)
|
||||
{
|
||||
u64 read = ringbuffer_read(&__mpApp.eventQueue, sizeof(mp_event), (u8*)event);
|
||||
DEBUG_ASSERT(read == sizeof(mp_event));
|
||||
return(true);
|
||||
for_list(&event->paths.list, elt, str8_elt, listElt)
|
||||
{
|
||||
str8* path = &elt->string;
|
||||
if(ringbuffer_write_available(queue) < (sizeof(u64) + path->len))
|
||||
{
|
||||
log_error("event queue full\n");
|
||||
error = true;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
return(false);
|
||||
ringbuffer_reserve(queue, sizeof(u64), (u8*)&path->len);
|
||||
ringbuffer_reserve(queue, path->len, (u8*)path->ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
if(error)
|
||||
{
|
||||
ringbuffer_rewind(queue);
|
||||
}
|
||||
else
|
||||
{
|
||||
ringbuffer_commit(queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mp_event* mp_next_event(mem_arena* arena)
|
||||
{
|
||||
//NOTE: pop and return event from queue
|
||||
mp_event* event = 0;
|
||||
ringbuffer* queue = &__mpApp.eventQueue;
|
||||
|
||||
if(ringbuffer_read_available(queue) >= sizeof(mp_event))
|
||||
{
|
||||
event = mem_arena_alloc_type(arena, mp_event);
|
||||
u64 read = ringbuffer_read(queue, sizeof(mp_event), (u8*)event);
|
||||
DEBUG_ASSERT(read == sizeof(mp_event));
|
||||
|
||||
if(event->type == MP_EVENT_PATHDROP)
|
||||
{
|
||||
u64 pathCount = event->paths.eltCount;
|
||||
event->paths = (str8_list){0};
|
||||
|
||||
for(int i=0; i<pathCount; i++)
|
||||
{
|
||||
if(ringbuffer_read_available(queue) < sizeof(u64))
|
||||
{
|
||||
log_error("malformed path payload: no string size\n");
|
||||
break;
|
||||
}
|
||||
|
||||
u64 len = 0;
|
||||
ringbuffer_read(queue, sizeof(u64), (u8*)&len);
|
||||
if(ringbuffer_read_available(queue) < len)
|
||||
{
|
||||
log_error("malformed path payload: string shorter than expected\n");
|
||||
break;
|
||||
}
|
||||
|
||||
char* buffer = mem_arena_alloc_array(arena, char, len);
|
||||
ringbuffer_read(queue, len, (u8*)buffer);
|
||||
|
||||
str8_list_push(arena, &event->paths, str8_from_buffer(len, buffer));
|
||||
}
|
||||
}
|
||||
}
|
||||
return(event);
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include"typedefs.h"
|
||||
#include"utf8.h"
|
||||
#include"lists.h"
|
||||
#include"memory.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -237,7 +238,6 @@ typedef struct mp_frame_event // window resize / move
|
|||
typedef struct mp_event
|
||||
{
|
||||
//TODO clipboard and path drop
|
||||
|
||||
mp_window window;
|
||||
mp_event_type type;
|
||||
|
||||
|
@ -247,11 +247,9 @@ typedef struct mp_event
|
|||
mp_char_event character;
|
||||
mp_move_event move;
|
||||
mp_frame_event frame;
|
||||
str8 path;
|
||||
str8_list paths;
|
||||
};
|
||||
|
||||
//TODO(martin): chain externally ?
|
||||
list_elt list;
|
||||
} mp_event;
|
||||
|
||||
//--------------------------------------------------------------------
|
||||
|
@ -272,7 +270,7 @@ MP_API void mp_set_cursor(mp_mouse_cursor cursor);
|
|||
//--------------------------------------------------------------------
|
||||
|
||||
MP_API void mp_pump_events(f64 timeout);
|
||||
MP_API bool mp_next_event(mp_event* event);
|
||||
MP_API mp_event* mp_next_event(mem_arena* arena);
|
||||
|
||||
typedef void(*mp_live_resize_callback)(mp_event event, void* data);
|
||||
MP_API void mp_set_live_resize_callback(mp_live_resize_callback callback, void* data);
|
||||
|
|
|
@ -475,12 +475,20 @@ void mp_install_keyboard_layout_listener()
|
|||
|
||||
- (BOOL)application:(NSApplication *)application openFile:(NSString *)filename
|
||||
{
|
||||
mp_event event = {};
|
||||
mp_event event = {0};
|
||||
event.window = (mp_window){0};
|
||||
event.type = MP_EVENT_PATHDROP;
|
||||
event.path = str8_push_cstring(&__mpApp.eventArena, [filename UTF8String]);
|
||||
|
||||
mem_arena* scratch = mem_scratch();
|
||||
mem_arena_marker mark = mem_arena_mark(scratch);
|
||||
|
||||
str8 path = str8_push_cstring(scratch, [filename UTF8String]);
|
||||
str8_list_push(scratch, &event.paths, path);
|
||||
|
||||
mp_queue_event(&event);
|
||||
|
||||
mem_arena_clear_to(scratch, mark);
|
||||
|
||||
return(YES);
|
||||
}
|
||||
|
||||
|
@ -491,11 +499,20 @@ void mp_install_keyboard_layout_listener()
|
|||
mp_event event = {};
|
||||
event.window = (mp_window){0};
|
||||
event.type = MP_EVENT_PATHDROP;
|
||||
event.path = str8_push_cstring(&__mpApp.eventArena, [nsPath UTF8String]);
|
||||
|
||||
mem_arena* scratch = mem_scratch();
|
||||
mem_arena_marker mark = mem_arena_mark(scratch);
|
||||
|
||||
str8 path = str8_push_cstring(scratch, [nsPath UTF8String]);
|
||||
str8_list_push(scratch, &event.paths, path);
|
||||
|
||||
mp_queue_event(&event);
|
||||
|
||||
mem_arena_clear_to(scratch, mark);
|
||||
}
|
||||
|
||||
//TODO: drag and drop paths
|
||||
|
||||
@end // @implementation MPAppDelegate
|
||||
|
||||
//---------------------------------------------------------------
|
||||
|
|
2
src/ui.c
2
src/ui.c
|
@ -327,7 +327,7 @@ void ui_style_box_after(ui_box* box, ui_pattern pattern, ui_style* style, ui_sty
|
|||
// input
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
void ui_process_event(mp_event event)
|
||||
void ui_process_event(mp_event* event)
|
||||
{
|
||||
ui_context* ui = ui_get_context();
|
||||
mp_input_process_event(&ui->input, event);
|
||||
|
|
2
src/ui.h
2
src/ui.h
|
@ -395,7 +395,7 @@ MP_API void ui_init(ui_context* context);
|
|||
MP_API ui_context* ui_get_context(void);
|
||||
MP_API void ui_set_context(ui_context* context);
|
||||
|
||||
MP_API void ui_process_event(mp_event event);
|
||||
MP_API void ui_process_event(mp_event* event);
|
||||
MP_API void ui_begin_frame(ui_style* defaultStyle, ui_style_mask mask);
|
||||
MP_API void ui_end_frame(void);
|
||||
MP_API void ui_draw(void);
|
||||
|
|
|
@ -11,9 +11,10 @@
|
|||
|
||||
void ringbuffer_init(ringbuffer* ring, u8 capExp)
|
||||
{
|
||||
u32 cap = 1<<capExp;
|
||||
u64 cap = 1<<capExp;
|
||||
ring->mask = cap - 1;
|
||||
ring->readIndex = 0;
|
||||
ring->reserveIndex = 0;
|
||||
ring->writeIndex = 0;
|
||||
ring->buffer = (u8*)malloc(cap);
|
||||
}
|
||||
|
@ -23,54 +24,24 @@ void ringbuffer_cleanup(ringbuffer* ring)
|
|||
free(ring->buffer);
|
||||
}
|
||||
|
||||
u32 ringbuffer_read_available(ringbuffer* ring)
|
||||
u64 ringbuffer_read_available(ringbuffer* ring)
|
||||
{
|
||||
return((ring->writeIndex - ring->readIndex) & ring->mask);
|
||||
}
|
||||
|
||||
u32 ringbuffer_write_available(ringbuffer* ring)
|
||||
u64 ringbuffer_write_available(ringbuffer* ring)
|
||||
{
|
||||
//NOTE(martin): we keep one sentinel byte between write index and read index,
|
||||
// when the buffer is full, to avoid overrunning read index.
|
||||
// Hence, available write space is size - 1 - available read space.
|
||||
return(ring->mask - ringbuffer_read_available(ring));
|
||||
return(((ring->readIndex - ring->reserveIndex) & ring->mask) - 1);
|
||||
}
|
||||
|
||||
u32 ringbuffer_write(ringbuffer* ring, u32 size, u8* data)
|
||||
u64 ringbuffer_read(ringbuffer* ring, u64 size, u8* data)
|
||||
{
|
||||
u32 read = ring->readIndex;
|
||||
u32 write = ring->writeIndex;
|
||||
u64 read = ring->readIndex;
|
||||
u64 write = ring->writeIndex;
|
||||
|
||||
u32 writeAvailable = ringbuffer_write_available(ring);
|
||||
if(size > writeAvailable)
|
||||
{
|
||||
DEBUG_ASSERT("not enough space available");
|
||||
size = writeAvailable;
|
||||
}
|
||||
|
||||
if(read <= write)
|
||||
{
|
||||
u32 copyCount = minimum(size, ring->mask + 1 - write);
|
||||
memcpy(ring->buffer + write, data, copyCount);
|
||||
|
||||
data += copyCount;
|
||||
copyCount = size - copyCount;
|
||||
memcpy(ring->buffer, data, copyCount);
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(ring->buffer + write, data, size);
|
||||
}
|
||||
ring->writeIndex = (write + size) & ring->mask;
|
||||
return(size);
|
||||
}
|
||||
|
||||
u32 ringbuffer_read(ringbuffer* ring, u32 size, u8* data)
|
||||
{
|
||||
u32 read = ring->readIndex;
|
||||
u32 write = ring->writeIndex;
|
||||
|
||||
u32 readAvailable = ringbuffer_read_available(ring);
|
||||
u64 readAvailable = ringbuffer_read_available(ring);
|
||||
if(size > readAvailable)
|
||||
{
|
||||
size = readAvailable;
|
||||
|
@ -82,7 +53,7 @@ u32 ringbuffer_read(ringbuffer* ring, u32 size, u8* data)
|
|||
}
|
||||
else
|
||||
{
|
||||
u32 copyCount = minimum(size, ring->mask + 1 - read);
|
||||
u64 copyCount = minimum(size, ring->mask + 1 - read);
|
||||
memcpy(data, ring->buffer + read, copyCount);
|
||||
|
||||
data += copyCount;
|
||||
|
@ -92,3 +63,50 @@ u32 ringbuffer_read(ringbuffer* ring, u32 size, u8* data)
|
|||
ring->readIndex = (read + size) & ring->mask;
|
||||
return(size);
|
||||
}
|
||||
|
||||
u64 ringbuffer_reserve(ringbuffer* ring, u64 size, u8* data)
|
||||
{
|
||||
u64 read = ring->readIndex;
|
||||
u64 reserve = ring->reserveIndex;
|
||||
|
||||
u64 writeAvailable = ringbuffer_write_available(ring);
|
||||
if(size > writeAvailable)
|
||||
{
|
||||
DEBUG_ASSERT("not enough space available");
|
||||
size = writeAvailable;
|
||||
}
|
||||
|
||||
if(read <= reserve)
|
||||
{
|
||||
u64 copyCount = minimum(size, ring->mask + 1 - reserve);
|
||||
memcpy(ring->buffer + reserve, data, copyCount);
|
||||
|
||||
data += copyCount;
|
||||
copyCount = size - copyCount;
|
||||
memcpy(ring->buffer, data, copyCount);
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(ring->buffer + reserve, data, size);
|
||||
}
|
||||
ring->reserveIndex = (reserve + size) & ring->mask;
|
||||
return(size);
|
||||
}
|
||||
|
||||
u64 ringbuffer_write(ringbuffer* ring, u64 size, u8* data)
|
||||
{
|
||||
ringbuffer_commit(ring);
|
||||
u64 res = ringbuffer_reserve(ring, size, data);
|
||||
ringbuffer_commit(ring);
|
||||
return(res);
|
||||
}
|
||||
|
||||
void ringbuffer_commit(ringbuffer* ring)
|
||||
{
|
||||
ring->writeIndex = ring->reserveIndex;
|
||||
}
|
||||
|
||||
void ringbuffer_rewind(ringbuffer* ring)
|
||||
{
|
||||
ring->reserveIndex = ring->writeIndex;
|
||||
}
|
||||
|
|
|
@ -18,9 +18,10 @@ extern "C" {
|
|||
|
||||
typedef struct ringbuffer
|
||||
{
|
||||
u32 mask;
|
||||
_Atomic(u32) readIndex;
|
||||
_Atomic(u32) writeIndex;
|
||||
u64 mask;
|
||||
_Atomic(u64) readIndex;
|
||||
_Atomic(u64) writeIndex;
|
||||
u64 reserveIndex;
|
||||
|
||||
u8* buffer;
|
||||
|
||||
|
@ -28,10 +29,13 @@ typedef struct ringbuffer
|
|||
|
||||
void ringbuffer_init(ringbuffer* ring, u8 capExp);
|
||||
void ringbuffer_cleanup(ringbuffer* ring);
|
||||
u32 ringbuffer_read_available(ringbuffer* ring);
|
||||
u32 ringbuffer_write_available(ringbuffer* ring);
|
||||
u32 ringbuffer_write(ringbuffer* ring, u32 size, u8* data);
|
||||
u32 ringbuffer_read(ringbuffer* ring, u32 size, u8* data);
|
||||
u64 ringbuffer_read_available(ringbuffer* ring);
|
||||
u64 ringbuffer_write_available(ringbuffer* ring);
|
||||
u64 ringbuffer_read(ringbuffer* ring, u64 size, u8* data);
|
||||
u64 ringbuffer_write(ringbuffer* ring, u64 size, u8* data);
|
||||
u64 ringbuffer_reserve(ringbuffer* ring, u64 size, u8* data);
|
||||
void ringbuffer_commit(ringbuffer* ring);
|
||||
void ringbuffer_rewind(ringbuffer* ring);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
|
Loading…
Reference in New Issue