diff --git a/async2/async2.c b/async2/async2.c index e2b3d13..8d33ebc 100644 --- a/async2/async2.c +++ b/async2/async2.c @@ -20,455 +20,396 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "async2.h" +#include #include /* va_start, va_end, va_arg, va_list */ #include /* ma|re|calloc, free */ #include /* memset, memmove */ -#include /* clock_t, CLOCKS_PER_SEC */ +#include /* clock_t, CLOCKS_PER_SEC */ + +#define ignored_ (void) + +#ifdef NDEBUG + #define ON_DEBUG(statement) \ + (void) 0 +#else + #define ON_DEBUG(statement) \ + do { statement } while (0) +#endif /* * event loop member functions declaration */ -static struct astate *async_loop_add_task_(struct astate *state); +static struct astate *async_loop_create_task_(struct astate *state); -static struct astate **async_loop_add_tasks_(size_t n, struct astate **states); +static struct astate **async_loop_create_tasks_(size_t n, struct astate * const states[]); static void async_loop_init_(void); static void async_loop_run_forever_(void); -static void async_loop_run_until_complete_(struct astate *main); +static void async_loop_run_until_complete_(struct astate *amain); -static void async_loop_destroy_(void); +static void async_loop_close_(void); -/* array is inspired by rxi's vec: https://github.com/rxi/vec */ -static int async_arr_expand_(char **data, const size_t *len, size_t *capacity, size_t memsz, size_t n_memb) { - void *mem; - size_t n, needed; +static void async_loop_stop_(void); - needed = *len + n_memb; - if (needed > *capacity) { - n = (*capacity == 0) ? 1 : *capacity << 1; - while (needed > n) { /* Calculate power of 2 for new capacity */ - n <<= 1; - } - mem = realloc(*data, n * memsz); - if (mem == NULL) return 0; - *data = mem; - *capacity = n; - } - return 1; +static int async_all_(size_t n, struct astate * const states[]); + +/* todo: optionally switch to arena allocator? */ + +static void async_loop_free_(void *ptr){ + free(ptr); } -static void async_arr_splice_( - char **data, const size_t *len, const size_t *capacity, - size_t memsz, size_t start, size_t count) { - (void) capacity; - memmove(*data + start * memsz, - *data + (start + count) * memsz, - (*len - start - count) * memsz); +static void *async_loop_malloc_(size_t size){ + return malloc(size); } +static void *async_loop_realloc_(void *block, size_t size){ + return realloc(block, size); +} -#define async_arr_init(arr) \ - memset((arr), 0, sizeof(*(arr))) +static void *async_loop_calloc_(size_t count, size_t size){ + return calloc(count, size); +} -#define async_arr_destroy(arr) \ - ( \ - free((arr)->data), \ - async_arr_init(arr)\ - ) +#define STD_EVENT_LOOP_INIT \ + async_loop_init_, \ + async_loop_stop_, \ + async_loop_close_, \ + async_loop_create_task_, \ + async_loop_create_tasks_, \ + async_loop_run_forever_, \ + async_loop_run_until_complete_, \ + async_loop_malloc_, \ + async_loop_realloc_, \ + async_loop_calloc_, \ + async_loop_free_, \ + NULL, NULL, \ + 0, \ + 0 -#define async_arr_push(arr, val) \ - ( \ - async_arr_expand_(async_arr_unpack_(arr), 1) \ - ? ((arr)->data[(arr)->length++] = (val), 1) \ - : 0 \ - ) +/* Init default event loop, custom event loop should create own initializer instead. */ +static struct async_event_loop async_default_event_loop_ = { + STD_EVENT_LOOP_INIT +}; -#define async_arr_reserve(arr, n) async_arr_expand_(async_arr_unpack_(arr), n) +static const struct async_event_loop async_default_event_loop_copy_ = { + STD_EVENT_LOOP_INIT +}; -#define async_arr_unpack_(arr) \ - (char **) &(arr)->data, &(arr)->length, &(arr)->capacity, sizeof(*(arr)->data) +static struct async_event_loop *event_loop = &async_default_event_loop_; -#define async_arr_splice(arr, start, count) \ - ( \ - async_arr_splice_(async_arr_unpack_(arr), start, count), \ - (arr)->length -= (count) \ - ) +const struct async_event_loop * const * const async_loop_ptr = (const struct async_event_loop * const * const) &event_loop; -#define async_arr_pop(arr) \ - (arr)->data[--(arr)->length] -static int async_all_(size_t n, struct astate **states) { /* Returns false if at least one state is NULL */ +static int async_all_(size_t n, struct astate * const states[]) { /* Returns false if at least one state is NULL */ while (n--) { if (states[n] == NULL) { return 0; } } return 1; } -/* Init default event loop, custom event loop should create own initializer instead. */ -static struct async_event_loop async_standard_event_loop_ = { - async_loop_init_, - async_loop_destroy_, - async_loop_add_task_, - async_loop_add_tasks_, - async_loop_run_forever_, - async_loop_run_until_complete_, - {0, 0, 0}, - {0, 0, 0}, /* fill array structs with zeros */ -}; - -struct async_event_loop *async_default_event_loop = &async_standard_event_loop_; - -static struct async_event_loop *event_loop = &async_standard_event_loop_; +/* + * this union will have the largest basic data type offset needed for its any element to be aligned, + * it's not guaranteed to work with extension types (like 128 bit integers), nor to be the same as alignof(max_align_t) + */ +typedef struct memblock_header { + union { +/* some embedded compilers or the ones like C65 don't support floats */ +#if !defined ASYNC_NO_FLOATS + long double a; +#endif +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 199901L + long long int b; /* long in modern compilers should be the same size and alignment as long long, but if we have c99, include it just in case */ +#endif + long int c; + size_t d; + void *e; + void (*f)(void); + } Align; + + struct memblock_header *prev, *next; + void *ref; +} memblock_header; + + +#define MEMBLOCK_FREED 0xF3EE +#define MEMBLOCK_ALLOC 0xBEEF +#define MEMBLOCK_FREELATER 0xBAEE + +#define MEMBLOCK_HEADER_FREE(header) \ + do { \ + /* this assertion is unlikely to work because of how modern libs free memory */ \ + assert((ignored_"Double free of async_alloc memory", (header)->Align.c != MEMBLOCK_FREED)); \ + assert((ignored_"Given memory address wasn't allocated with async_alloc", \ + (header)->Align.c == MEMBLOCK_ALLOC || (header)->Align.c == MEMBLOCK_FREELATER)); \ + ON_DEBUG({ \ + (header)->Align.c = MEMBLOCK_FREED; \ + }); \ + free((header)->ref); \ + event_loop->free(header); \ + } while (0) /* Free astate, its allocs and invalidate it completely */ -#define STATE_FREE(state) \ - { \ - while ((state)->_allocs.length--) { \ - free((state)->_allocs.data[(state)->_allocs.length]); \ - } \ - async_arr_destroy(&(state)->_allocs); \ - free(state); \ - } (void) 0 - - -#define ASYNC_LOOP_HEAD \ - size_t i; \ - struct astate *state \ - -#define ASYNC_LOOP_RUNNER_BLOCK_NOREFS \ - if (state->_refcnt == 0) { \ - if (!async_done(state) && state->_cancel != NULL) { \ - state->_cancel(state); \ - } \ - STATE_FREE(state); \ - if (async_arr_push(&event_loop->vacant_queue, i)) { \ - event_loop->events_queue.data[i] = NULL; \ - } else { \ - async_arr_splice(&event_loop->events_queue, i, 1); \ - i--; \ - } \ +#define ASTATE_FREE(state) \ + do { \ + memblock_header *_header; \ + if ((state)->_runner->destr) { \ + (state)->_runner->destr(state); \ + } \ + _header = (state)->_allocs; \ + while (_header) { \ + memblock_header *_hnext = _header->next; \ + MEMBLOCK_HEADER_FREE(_header); \ + _header = _hnext; \ + } \ + event_loop->free(state); \ + } while (0) + +/* prepend is used because we don't want tasks to run in the same loop they were added */ +static void async_loop_prepend_(struct astate *state){ + struct async_event_loop *loop = event_loop; + if (loop->n_tasks == 0) { + loop->head = loop->tail = state; + } else { + loop->head->_prev = state; + state->_next = loop->head; + loop->head = state; } + loop->n_tasks++; +} -/* - * This block is faster than runner because it knows that you don't need event loop anymore, - * so it can just free and NULL all states without references inside events queue - */ -#define ASYNC_LOOP_DESTRUCTOR_BLOCK_NOREFS \ - if (state->_refcnt == 0) { \ - if (!async_done(state) && state->_cancel != NULL) { \ - state->_cancel(state); \ - } \ - STATE_FREE(state); \ - event_loop->events_queue.data[i] = NULL; \ - event_loop->vacant_queue.length++; \ - } -#define ASYNC_LOOP_RUNNER_BLOCK_CANCELLED \ - else if (state->err != ASYNC_ECANCELED && async_cancelled(state)){ \ - if (!async_done(state)) { \ - ASYNC_DECREF(state); \ - if (state->_cancel != NULL) { \ - state->_cancel(state); \ - } \ - } \ - if (state->_next) { \ - ASYNC_DECREF(state->_next); \ - async_cancel(state->_next); \ - } \ - state->err = ASYNC_ECANCELED; \ - state->_async_k = ASYNC_DONE; \ - } +static void async_loop_remove_(struct astate *state){ + struct async_event_loop *loop = event_loop; + assert((ignored_"Loop is already empty, state was already removed or never added", loop->n_tasks != 0)); + assert(state != NULL); -#define ASYNC_LOOP_BODY_BEGIN \ - for (i = 0; i < event_loop->events_queue.length; i++) { \ - state = event_loop->events_queue.data[i]; \ - if (state == NULL) { \ - continue; \ - } + if (state == loop->tail) + loop->tail = loop->tail->_prev; + else if (state->_next) + state->_next->_prev = state->_prev; + if (state == loop->head) + loop->head = loop->head->_next; + else if (state->_prev) + state->_prev->_next = state->_next; -/* Parent while loop breaks if all array elements are vacant (NULL'ed) */ -#define ASYNC_LOOP_BODY_END \ - }(void)0 - -#define ASYNC_LOOP_RUNNER_BODY \ - ASYNC_LOOP_BODY_BEGIN \ - ASYNC_LOOP_RUNNER_BLOCK_NOREFS \ - ASYNC_LOOP_RUNNER_BLOCK_CANCELLED \ - else if (!async_done(state) && (!state->_next || async_done(state->_next))) { \ - /* Nothing special to do with this function, let it run */ \ - state->_func(state); \ - } \ - ASYNC_LOOP_BODY_END - - -#define ASYNC_LOOP_DESTRUCTOR_BODY \ - ASYNC_LOOP_BODY_BEGIN \ - ASYNC_LOOP_DESTRUCTOR_BLOCK_NOREFS \ - ASYNC_LOOP_RUNNER_BLOCK_CANCELLED \ - else if (!async_cancelled(state)) { \ - /* Nothing special to do with this function, cancel it */ \ - async_cancel(state); \ - i--; \ - } \ - ASYNC_LOOP_BODY_END + state->_prev = state->_next = NULL; + loop->n_tasks--; +} -static void async_loop_run_forever_(void) { - ASYNC_LOOP_HEAD; - while (event_loop->events_queue.length > 0 && event_loop->events_queue.length > event_loop->vacant_queue.length) { - ASYNC_LOOP_RUNNER_BODY; +static void async_loop_once_(void) { + struct astate *state = event_loop->head; + + while (state) { + struct astate *next = state->_next; + + if (state->_refcnt == 0) { + async_loop_remove_(state); + ASTATE_FREE(state); + } else if (!async_is_done(state) && (!state->_child || async_is_done(state->_child))) { + /* no awaited child or child is done, run parent */ + state->_runner->coro(state); + } + state = next; } } +/* todo: run forever loop */ +static void async_loop_run_forever_(void) { + async_set_flag_(event_loop, ASYNC_LOOP_FLAG_RUNNING); -static void async_loop_run_until_complete_(struct astate *main) { - ASYNC_LOOP_HEAD; - if (main == NULL) { - return; - } - while (main->_func(main) != ASYNC_DONE) { - ASYNC_LOOP_RUNNER_BODY; + async_unset_flag_(event_loop, ASYNC_LOOP_FLAG_RUNNING); +} + +static void async_loop_run_until_complete_(struct astate *amain) { + if (amain == NULL) { return; } + + async_set_flag_(event_loop, ASYNC_LOOP_FLAG_RUNNING); + + ASYNC_INCREF(amain); + event_loop->create_task(amain); + + while (!async_is_done(amain)) { + async_loop_once_(); } - if (main->_refcnt == 0) { - STATE_FREE(main); + + ASYNC_DECREF(amain); + + async_unset_flag_(event_loop, ASYNC_LOOP_FLAG_RUNNING); + if (amain->_refcnt == 0) { + async_loop_remove_(amain); + ASTATE_FREE(amain); } } static void async_loop_init_(void) { - async_arr_init(&event_loop->events_queue); - async_arr_init(&event_loop->vacant_queue); + /* todo: loop constructor */ } -static void async_loop_destroy_(void) { - ASYNC_LOOP_HEAD; - while (event_loop->events_queue.length > 0 && event_loop->events_queue.length > event_loop->vacant_queue.length) { - ASYNC_LOOP_DESTRUCTOR_BODY; - } - async_arr_destroy(&event_loop->events_queue); - async_arr_destroy(&event_loop->vacant_queue); +static void async_loop_close_(void) { + /* todo: loop destructor */ } -#define async_set_sheduled(state) ((state)->_flags |= _ASYNC_FLAG_SCHEDULED) +static void async_loop_stop_(void){ + async_unset_flag_(event_loop, ASYNC_LOOP_FLAG_RUNNING); +} -#define async_sheduled(state) (!!((state)->_flags & _ASYNC_FLAG_SCHEDULED)) +#define async_set_scheduled(task) (async_set_flag_((task), ASYNC_TASK_FLAG_SCHEDULED)) -static struct astate *async_loop_add_task_(struct astate *state) { - size_t i; - if (state == NULL) return NULL; - - if (!async_sheduled(state)) { - if (event_loop->vacant_queue.length > 0) { - i = async_arr_pop(&event_loop->vacant_queue); - event_loop->events_queue.data[i] = state; - } else { - if (!async_arr_push(&event_loop->events_queue, state)) { - STATE_FREE(state); - return NULL; - } - } - async_set_sheduled(state); +#define async_is_scheduled(task) (async_get_flag_(task, ASYNC_TASK_FLAG_SCHEDULED)) + +static struct astate *async_loop_create_task_(struct astate *state) { + if (state == NULL) { return NULL; } + + if (!async_is_scheduled(state)) { + async_loop_prepend_(state); + async_set_scheduled(state); } return state; } -static struct astate **async_loop_add_tasks_(size_t n, struct astate **states) { + +static struct astate **async_loop_create_tasks_(size_t n, struct astate * const states[]) { size_t i; - if (states == NULL || !async_all_(n, states) || !async_arr_reserve(&event_loop->events_queue, n)) { return NULL; } + if (!(states && async_all_(n, states))) { + return NULL; + } + for (i = 0; i < n; i++) { - if (!async_sheduled(states[i])) { - /* push would never fail here as we've reserved enough memory already, no need to check the return value */ - async_arr_push(&event_loop->events_queue, states[i]); - async_set_sheduled(states[i]); + if (!async_is_scheduled(states[i])) { + async_loop_prepend_(states[i]); + async_set_scheduled(states[i]); } } - return states; + return (struct astate **)states; } -struct astate *async_new_coro_(AsyncCallback child_f, void *args, size_t stack_size, size_t stack_offset) { - struct astate *state; - size_t padding; - - padding = stack_offset - sizeof(*state); - state = calloc(1, sizeof(*state) + padding + stack_size); +struct astate *async_new_task_(const async_runner *runner, void *args) { + struct astate *state = event_loop->calloc(1, sizeof(*state) + runner->sizes.task_addsize); if (state == NULL) { return NULL; } - state->locals = ((char *) state) + stack_offset; - state->args = args; - state->_func = child_f; + +#ifdef ASYNC_DEBUG + state->stack = (runner->sizes.stack_offset) + ? ((char *) state) + runner->sizes.stack_offset + : NULL; +#else + state->stack = ((char *) state) + runner->sizes.stack_offset; +#endif + + state->args = (runner->sizes.args_size) + ? ( + assert((ignored_"Pointer to args must be non-NULL", args)), + memcpy(((char *) state) + runner->sizes.args_offset, args, runner->sizes.args_size) + ) + : args; + + state->_runner = runner; state->_refcnt = 1; /* State has 1 reference set as function "owns" itself until exited or cancelled */ /* state->_async_k = ASYNC_INIT; state is already ASYNC_INIT because calloc */ + +#if !defined ASYNC_NULL_ZERO_BITS + /* pointer variables aren't guaranteed to be represented with all zero bits, assign them explicitly */ + state->_prev = state->_next = NULL; + state->_child = NULL; + state->_allocs = NULL; +#endif return state; } -void async_free_coro_(struct astate *state) { +void async_free_task_(struct astate *state) { if (state != NULL) { - STATE_FREE(state); + ASTATE_FREE(state); } } -void async_free_coros_(size_t n, struct astate **states) { +void async_free_tasks_(size_t n, struct astate * const states[]) { while (n--) { - if (states[n]) STATE_FREE(states[n]); + if (states[n]) { ASTATE_FREE(states[n]); } } } -static async async_yielder(struct astate *state) { - async_begin(state); - async_yield; - async_end; -} +struct gather_args { + struct astate **states; + size_t n; +}; -typedef struct { - async_arr_t(struct astate *) arr_coros; -} gathered_stack; -static void async_gatherer_cancel(struct astate *state) { - gathered_stack *locals = state->locals; - size_t i; - for (i = 0; i < locals->arr_coros.length; i++) { - if (!locals->arr_coros.data[i]) continue; - ASYNC_DECREF(locals->arr_coros.data[i]); - async_cancel(locals->arr_coros.data[i]); +static int gather_cancel(struct astate *st){ + struct gather_args *arg = st->args; + size_t n = arg->n; + + /* remove ownership and cancel child functions left */ + if(n != (size_t)-1){ + while(n--){ + async_cancel(arg->states[n]); + ASYNC_XDECREF(arg->states[n]); + } } + return 1; } -static async async_gatherer(struct astate *state) { - gathered_stack *locals = state->locals; - size_t i; - struct astate *child; - async_begin(state); - while (1) { - for (i = 0; i < locals->arr_coros.length; i++) { - child = locals->arr_coros.data[i]; - if (!child) continue; - if (!async_done(child)) { - goto cont; - } else { /* NULL coroutine in the list of tracked coros */ - ASYNC_DECREF(child); - locals->arr_coros.data[i] = NULL; - } - } - break; - cont : - { - async_yield; - } - } +static async gather_coro(struct astate *st) { + struct gather_args *arg = st->args; + async_begin(st); + while (arg->n--) { + await(arg->states[arg->n]); + async_errno = ASYNC_OK; + ASYNC_XDECREF(arg->states[arg->n]); + } async_end; } -struct astate *async_vgather(size_t n, ...) { - va_list v_args; - gathered_stack *stack; - struct astate *state; - size_t i; - - ASYNC_PREPARE_NOARGS(async_gatherer, state, gathered_stack, async_gatherer_cancel, fail); - - stack = state->locals; - async_arr_init(&stack->arr_coros); - if (!async_arr_reserve(&stack->arr_coros, n) || !async_free_later_(state, stack->arr_coros.data)) { - goto fail; - } - - va_start(v_args, n); - for (i = 0; i < n; i++) { - stack->arr_coros.data[i] = va_arg(v_args, struct astate *); - } - va_end(v_args); - stack->arr_coros.length = n; - if (!async_create_tasks(n, stack->arr_coros.data)) { - goto fail; - } - for (i = 0; i < n; i++) { - ASYNC_INCREF(stack->arr_coros.data[i]); - } - return state; - - fail: - if (state) { - async_arr_destroy(&stack->arr_coros); - STATE_FREE(state); - } - va_start(v_args, n); - for (i = 0; i < n; i++) { - state = va_arg(v_args, struct astate *); - if (state) STATE_FREE(state); +struct astate *async_gather(size_t n, struct astate * const states[]) { + static const async_runner gather_runner = { + gather_coro, NULL, gather_cancel, + ASYNC_RUNNER_ARGS_INIT(struct gather_args)}; + struct gather_args args; + args.states = (struct astate **) states; + args.n = n; + while (n--) { + async_create_task(states[n]); + ASYNC_XINCREF(states[n]); } - va_end(v_args); - return NULL; + return async_create_task(async_new(&gather_runner, &args)); } -struct astate *async_gather(size_t n, struct astate **states) { - struct astate *state; - gathered_stack *stack; - size_t i; - - ASYNC_PREPARE_NOARGS(async_gatherer, state, gathered_stack, async_gatherer_cancel, fail); - stack = state->locals; - stack->arr_coros.capacity = n; - stack->arr_coros.length = n; - stack->arr_coros.data = states; - if (!async_create_tasks(n, states)) { - STATE_FREE(state); - return NULL; - } - for (i = 0; i < n; i++) { - ASYNC_INCREF(stack->arr_coros.data[i]); - } - return state; - fail: - return NULL; +static void vgather_destr(struct astate *st) { + struct gather_args *arg = st->args; + event_loop->free(arg->states); } -typedef struct { - double sec; - clock_t start; -} sleeper_stack; - +struct astate *async_vgather(size_t n, ...) { + static const async_runner vgather_runner = { + gather_coro, vgather_destr, gather_cancel, + ASYNC_RUNNER_ARGS_INIT(struct gather_args)}; + struct gather_args args; + va_list va_args; -static async async_sleeper(struct astate *state) { - sleeper_stack *locals = state->locals; - async_begin(state); - locals->start = clock(); - await_while((double) (clock() - locals->start) / CLOCKS_PER_SEC < locals->sec); - async_end; -} + args.states = event_loop->malloc(sizeof(*args.states) * n); + if (!args.states) return NULL; + args.n = n; -struct astate *async_sleep(double delay) { - struct astate *state; - sleeper_stack *stack; - if (delay == 0) { - ASYNC_PREPARE_NOARGS(async_yielder, state, ASYNC_NONE, NULL, fail); - } else { - ASYNC_PREPARE_NOARGS(async_sleeper, state, sleeper_stack, NULL, fail); - stack = state->locals; /* Yet another predefined locals trick for mere optimisation, use async_alloc_ in real adapter functions instead. */ - stack->sec = delay; + va_start(va_args, n); + while (n--) { + args.states[n] = async_create_task(va_arg(va_args, struct astate *)); + ASYNC_XINCREF(args.states[n]); } - return state; - fail: - return NULL; + va_end(va_args); + return async_create_task(async_new(&vgather_runner, &args)); } +#if 0 +/* todo: any time-related operations require a separate heap queue in order to be efficient */ typedef struct { double sec; clock_t start; } waiter_stack; -static void async_waiter_cancel(struct astate *state) { - struct astate *child = state->args; - if (child == NULL) return; - if (async_create_task(child)) { - if (!async_done(child)) { - async_cancel(child); - } - ASYNC_DECREF(child); - } -} - static async async_waiter(struct astate *state) { waiter_stack *locals = state->locals; struct astate *child = state->args; @@ -499,49 +440,94 @@ struct astate *async_wait_for(struct astate *child, double timeout) { ASYNC_INCREF(child); return state; fail: - STATE_FREE(child); + STATE_FREE(child); return NULL; } -void *async_alloc_(struct astate *state, size_t size) { - void *mem; - if (state == NULL) { return NULL; } - mem = malloc(size); - if (mem == NULL) { return NULL; } - if (!async_arr_push(&state->_allocs, mem)) { - free(mem); - return NULL; +#endif + + + +static void async_allocs_prepend_(struct astate *state, memblock_header *header){ + header->prev = NULL; + header->next = state->_allocs; + + if(state->_allocs){ + memblock_header *head = state->_allocs; + head->prev = header; } - return mem; + + state->_allocs = header; } -int async_free_(struct astate *state, void *mem) { - size_t i; - void *obj; - - i = state->_allocs.length; - while (i--) { - obj = state->_allocs.data[i]; - if (obj == mem) { - free(obj); - async_arr_splice(&state->_allocs, i, 1); - return 1; - } +static void async_allocs_remove_(struct astate *state, memblock_header *header) { + if(state->_allocs == header){ + state->_allocs = header->next; + } else { + header->prev->next = header->next; + } + + if(header->next){ + header->next->prev = header->prev; } - return 0; } -int async_free_later_(struct astate *state, void *mem) { - if (mem == NULL || !async_arr_push(&state->_allocs, mem)) return 0; +void *async_alloc_(struct astate *state, size_t size) { + memblock_header *header; + assert(state != NULL); + + header = event_loop->malloc(sizeof(*header) + size); + if (header == NULL) { return NULL; } + + header->ref = NULL; + ON_DEBUG({ + header->Align.c = MEMBLOCK_ALLOC; + }); + + async_allocs_prepend_(state, header); + return header + 1; +} + +void async_free_(struct astate *state, void *ptr) { + memblock_header *header = (memblock_header *) ptr - 1; /* pointer element object after the bound is still valid */ + if (ptr == NULL) { return; } + + assert((ignored_"List of allocs for this state is empty", state->_allocs != NULL)); + assert((ignored_"Double free of async_alloc memory", header->Align.c != MEMBLOCK_FREED)); + + async_allocs_remove_(state, header); + + MEMBLOCK_HEADER_FREE(header); +} + +int async_free_later_(struct astate *state, void *ptr) { + memblock_header *header; + if (ptr == NULL) { return 0; } + + header = event_loop->malloc(sizeof(*header)); + if (header == NULL) { return 0; } + + header->ref = ptr; + ON_DEBUG({ + header->Align.c = MEMBLOCK_FREELATER; + }); + + async_allocs_prepend_(state, header); return 1; } -struct async_event_loop *async_get_event_loop(void) { +const struct async_event_loop *async_get_event_loop(void) { return event_loop; } void async_set_event_loop(struct async_event_loop *loop) { - event_loop = loop; + assert(!event_loop || async_get_flag_(event_loop, ASYNC_LOOP_FLAG_RUNNING)); + if(loop != NULL){ + event_loop = loop; + } else { + memcpy(&async_default_event_loop_, &async_default_event_loop_copy_, sizeof(async_default_event_loop_)); + event_loop = &async_default_event_loop_; + } } const char *async_strerror(async_error err) { @@ -549,12 +535,63 @@ const char *async_strerror(async_error err) { case ASYNC_OK: return "OK"; case ASYNC_ENOMEM: - return "MEMORY ALLOCATION ERROR"; - case ASYNC_ECANCELED: - return "COROUTINE WAS CANCELLED"; + return "MEMORY ALLOCATION FAILED"; + case ASYNC_ECANCELLED: + return "TASK WAS CANCELLED"; case ASYNC_EINVAL_STATE: - return "INVALID STATE WAS PASSED TO COROUTINE"; + return "INVALID STATE WAS PASSED TO RUNNER'S COROUTINE"; default: return "UNKNOWN ERROR"; } } + +void async_args_destructor(struct astate *state) { + free(state->args); +} + +void async_run(struct astate *amain) { + event_loop->init(); + event_loop->run_until_complete(amain); + event_loop->close(); +} + +#define ASYNC_CANCEL_SINGLE(state) \ + ( \ + ASYNC_DECREF(state), /* decref its ownership of self */ \ + ((state)->_runner->cancel && !(state)->_runner->cancel(state)) \ + ? ( \ + /* refused to cancel, restore refcnt */ \ + ASYNC_INCREF(state), \ + 0 \ + ) \ + : ( \ + (state)->err = ASYNC_ECANCELLED, \ + (state)->_async_k = ASYNC_DONE, \ + async_set_flag_(state, ASYNC_TASK_FLAG_CANCELLED), \ + 1 \ + ) \ + ) + +int async_cancel_(struct astate *state) { + /* these are basic requirements so you shouldn't test for these in custom cancel code */ + /* already cancelled */ + if(async_get_flag_(state, ASYNC_TASK_FLAG_CANCELLED)) return 1; + /* can't cancel finished task */ + if(async_is_done(state)) return 0; + + if (ASYNC_CANCEL_SINGLE(state)) { + while (state->_child) { + /* decref parent's ownership of child */ + ASYNC_DECREF(state->_child); + /* it's done, or isn't only referenced by self or refuses to cancel */ + if (async_is_done(state->_child) || state->_refcnt != 1 || !ASYNC_CANCEL_SINGLE(state->_child)) { + break; + } + /* one of the child tasks down the chain refused to cancel so we are done here */ + state = state->_child; + } + return 1; + } else { + return 0; + } +} diff --git a/async2/async2.h b/async2/async2.h index be76662..a271f9d 100644 --- a/async2/async2.h +++ b/async2/async2.h @@ -26,7 +26,7 @@ SOFTWARE. * = Stackful Async Subroutines = * * Taking inspiration from protothreads, async.h, coroutines.h and async/await as found in python - * this is an async/await/fawait/event loop implementation for C based on Duff's device. + * this is an async/await/event loop implementation for C based on Duff's device. * * Features: * @@ -50,35 +50,40 @@ SOFTWARE. #include /* NULL, offsetof */ -#ifdef _MSC_VER - /* silent MSVC's warning on unnamed type definition in parentheses for _ASYNC_COMPUTE_OFFSET because it's valid C */ - #pragma warning(disable : 4116) -#endif - #ifdef ASYNC_DEBUG #include /* fprintf, stderr */ + #define ASYNC_ZUTIL_ON_DEBUG_(expr) (void)(expr) +#else + #define ASYNC_ZUTIL_ON_DEBUG_(expr) (void)0 #endif /* * The async computation status */ -typedef enum ASYNC_EVT { +typedef enum { ASYNC_INIT, ASYNC_CONT, ASYNC_DONE } async; -typedef enum ASYNC_ERR { - ASYNC_OK = 0, ASYNC_ENOMEM = 12, ASYNC_ECANCELED = 42, ASYNC_EINVAL_STATE +typedef enum { + ASYNC_OK = 0, ASYNC_ENOMEM = 12, + ASYNC_ECANCELLED = 42, ASYNC_EINVAL_STATE } async_error; -#define _ASYNC_FLAG_SCHEDULED 0x1 /* 0b1 */ -#define _ASYNC_FLAG_MUST_CANCEL 0x2 /* 0b10 */ +#define async_set_flag_(obj, flag) (void) ((obj)->_flags |= (flag)) +#define async_unset_flag_(obj, flag) (void) ((obj)->_flags &= ~(flag)) +#define async_get_flag_(obj, flag) ((obj)->_flags & (flag)) -/* - * Core async type to imply empty locals when creating new coro - */ -typedef char ASYNC_NONE; +typedef enum { + ASYNC_TASK_FLAG_SCHEDULED = 1u << 0, + ASYNC_TASK_FLAG_CANCELLED = 1u << 1 +} async_task_flags; + +typedef enum { + ASYNC_LOOP_FLAG_RUNNING = 1u << 0, + ASYNC_LOOP_FLAG_CLOSED = 1u << 1 +} async_loop_flags; -typedef struct astate *s_astate; +typedef struct astate *p_astate; /* @@ -86,118 +91,180 @@ typedef struct astate *s_astate; */ typedef async (*AsyncCallback)(struct astate *); -typedef void (*AsyncCancelCallback)(struct astate *); +typedef void (*AsyncDestructorCallback)(struct astate *); + +typedef int (*AsyncCancelCallback)(struct astate *); + +#ifdef _MSC_VER + /* silent MSVC's warning on unnamed type definition in parentheses because it's valid C */ + #pragma warning(disable : 4116) +#endif + +#define ASYNC_ZUTIL_SPACK_a_(T_a) struct { struct astate st; T_a a; } + +#define ASYNC_ZUTIL_SPACK_ab_(T_a, T_b) struct { struct astate st; T_a a; T_b b; } + /* - * Figures out proper offset from struct beginning to T_b - * in order to allocate struct capable storing both Types a and b in one go - * and prevent unaligned memory access + * Initializers for Async_Runner::sizes */ -#define _ASYNC_COMPUTE_OFFSET(T_a, T_b)\ - offsetof(struct{T_a a; T_b b;}, b) - -#define async_arr_t(T)\ - struct { T *data; size_t length, capacity; } +#define ASYNC_RUNNER_PURE_INIT() \ + { \ + 0, \ + 0, \ + 0, \ + 0 \ + } + +#define ASYNC_RUNNER_STACK_INIT(T_stack) \ + { \ + offsetof(ASYNC_ZUTIL_SPACK_a_(T_stack), a), \ + 0, \ + 0, \ + sizeof(ASYNC_ZUTIL_SPACK_a_(T_stack)) - sizeof(struct astate) \ + } + +#define ASYNC_RUNNER_ARGS_INIT(T_args) \ + { \ + 0, \ + sizeof(T_args), \ + offsetof(ASYNC_ZUTIL_SPACK_a_(T_args), a), \ + sizeof(ASYNC_ZUTIL_SPACK_a_(T_args)) - sizeof(struct astate) \ + } + +#define ASYNC_RUNNER_FULL_INIT(T_stack, T_args) \ + { \ + offsetof(ASYNC_ZUTIL_SPACK_ab_(T_stack, T_args), a), \ + sizeof(T_args), \ + offsetof(ASYNC_ZUTIL_SPACK_ab_(T_stack, T_args), b), \ + sizeof(ASYNC_ZUTIL_SPACK_ab_(T_stack, T_args)) - sizeof(struct astate) \ + } + +typedef struct { + AsyncCallback coro; + AsyncDestructorCallback destr; + AsyncCancelCallback cancel; + struct { + size_t stack_offset; /* offset from the base of a struct to stack */ + size_t args_size; /* size of args without padding */ + size_t args_offset; /* offset from the base of a struct to args */ + size_t task_addsize; /* additional task size for optional stack and args, includes padding, might be 0 + * computed as [stack_pad + stack + args_pre_pad + args + args_post_pad] */ + } sizes; +} async_runner; struct astate { - /* user-accessible values: */ + /* public: */ void *args; /* args to be passed along with state to the async function */ - void *locals; /* function's stack pointer (locals_t) to be passed with state to the async function */ + void *stack; /* function's stack pointer (T_stack) to be passed with state to the async function */ async_error err; /* ASYNC_OK(0) if state has no errors, other async_error otherwise, also might be a custom error code defined by function that sets errno itself */ - /* internal numeric values: */ - size_t _refcnt; /* reference count number of functions still using this state. 1 by default, because coroutine owns itself too. If number of references is 0, the state becomes invalid and will be freed by the event loop soon */ + + /* protected: */ + async_task_flags _flags; /* default event loop functions use first 2 bit flags: FLAG_SCHEDULED and FLAG_MUST_CANCEL, custom event loop might support more */ unsigned int _async_k; /* current execution state. ASYNC_EVT if <= ASYNC_DONE and number of line in the function otherwise (means that state (or its function) is still running) */ - unsigned char _flags; /* default event loop functions use first 2 bit flags: FLAG_SHEDULED and FLAG_MUST_CANCEL, custom event loop might support more */ + unsigned int _refcnt; /* reference count number of functions still using this state. 1 by default, because coroutine owns itself too. If number of references is 0, the state becomes invalid and will be freed by the event loop soon */ /* containers: */ - AsyncCallback _func; /* function to be called by the event loop */ - AsyncCancelCallback _cancel; /* function to be called in case of cancelling state, can be NULL */ - s_astate _next; /* child state used by fawait */ - - async_arr_t(void*) _allocs; /* array of memory blocks allocated by async_alloc and managed by the event loop */ - + const async_runner *_runner; + struct astate *_child; /* child state used by await */ + struct astate *_prev, *_next; + void *_allocs; #ifdef ASYNC_DEBUG - const char *debug_taskname; /* must never be explicitly initialized */ + const char *_debug_taskname_; /* must never be explicitly used */ #endif }; -struct async_event_loop { +typedef struct async_event_loop { void (*init)(void); - void (*destroy)(void); + void (*stop)(void); - struct astate *(*add_task)(struct astate *state); + void (*close)(void); - struct astate **(*add_tasks)(size_t n, struct astate **states); + struct astate *(*create_task)(struct astate *state); + + struct astate **(*create_tasks)(size_t n, struct astate * const states[]); void (*run_forever)(void); void (*run_until_complete)(struct astate *main_state); + void *(*malloc)(size_t size); + + void *(*realloc)(void *ptr, size_t size); + + void *(*calloc)(size_t count, size_t size); + + void (*free)(void *ptr); + /* Main tasks queue */ - async_arr_t(struct astate *) events_queue; - /* Helper stack to keep track of vacant indices, allows to avoid slow array - * slicing when there's a lot of tasks with a cost of bigger memory footprint */ - async_arr_t(size_t) vacant_queue; -}; + struct astate *head, *tail; + size_t n_tasks; -extern struct async_event_loop *async_default_event_loop; + async_loop_flags _flags; +} async_event_loop; -#define ASYNC_INCREF(coro) coro->_refcnt++ +extern const struct async_event_loop * const * const async_loop_ptr; + +#ifdef ASYNC_DIRECT_LOOP + #undef ASYNC_DIRECT_LOOP + #define ASYNC_DIRECT_LOOP (*async_loop_ptr) +#else + #define ASYNC_DIRECT_LOOP (async_get_event_loop()) +#endif -#define ASYNC_DECREF(coro) coro->_refcnt-- +/* manual ownership control */ +#define ASYNC_INCREF(coro) (void) ((coro)->_refcnt++) -#define ASYNC_XINCREF(coro) if(coro) ASYNC_INCREF(coro) +#define ASYNC_DECREF(coro) (void) ((coro)->_refcnt--) -#define ASYNC_XDECREF(coro) if(coro) ASYNC_DECREF(coro) +#define ASYNC_XINCREF(coro) (void) ((coro) ? ASYNC_INCREF(coro) : (void)0) +#define ASYNC_XDECREF(coro) (void) (((coro) && (coro)->_refcnt != 0) ? ASYNC_DECREF(coro) : (void)0) /* * Mark the start of an async subroutine * Unknown continuation values now set async_errno to ASYNC_EINVAL_STATE. */ -#ifdef ASYNC_DEBUG -#define async_begin(k) \ - struct astate *_async_p = k; \ - fprintf(stderr, " Entered '%s'\n", __func__); \ - switch(_async_p->_async_k) { \ - case ASYNC_INIT: \ - fprintf(stderr, " Begin '%s'\n", __func__);\ - _async_p->debug_taskname = __func__ -#else -#define async_begin(st) \ - struct astate *_async_p = st; \ - switch(_async_p->_async_k) { \ - case ASYNC_INIT: (void)0 -#endif + +#define async_begin(st) \ + { /* beginning of the block in case user has some statements before async body */ \ + struct astate *_async_ctx_ = st; \ + ASYNC_ZUTIL_ON_DEBUG_(fprintf(stderr, " Entered '%s'\n", __func__)); \ + switch (_async_ctx_->_async_k) { \ + case ASYNC_INIT: \ + ASYNC_ZUTIL_ON_DEBUG_( \ + (_async_ctx_->_debug_taskname_ = __func__, fprintf(stderr, " Begin '%s'\n", __func__)) \ + ); /* * Mark the end of a async subroutine */ -#ifdef ASYNC_DEBUG -#define async_end \ - _async_p->_async_k=ASYNC_DONE; \ - ASYNC_DECREF(_async_p); \ - fprintf(stderr, " Ended '%s'\n", __func__); \ - /* fall through */ \ - case ASYNC_DONE: \ - return ASYNC_DONE; \ - default: \ - async_errno = ASYNC_EINVAL_STATE; \ - fprintf(stderr, " WARNING: %s: %s(%d)\n", async_strerror(async_errno), __FILE__, __LINE__);\ - return ASYNC_DONE;} (void) 0 -#else -#define async_end \ - _async_p->_async_k = ASYNC_DONE; \ - ASYNC_DECREF(_async_p); \ - /* fall through */ \ - case ASYNC_DONE: \ - return ASYNC_DONE; \ - default: \ - async_errno = ASYNC_EINVAL_STATE; \ - return ASYNC_DONE; \ - } (void)0 -#endif +#define async_end \ + async_exit; \ + /* fall through */ \ + case ASYNC_DONE: \ + ASYNC_ZUTIL_ON_DEBUG_( \ + fprintf( \ + stderr, " WARNING: task is already done, but its coro was called: %s(%d)\n", \ + __FILE__, __LINE__ \ + ) \ + ); \ + return ASYNC_DONE; \ + default: \ + async_errno = ASYNC_EINVAL_STATE; \ + ASYNC_ZUTIL_ON_DEBUG_( \ + fprintf(stderr, " WARNING: %s: %s(%d)\n", async_strerror(async_errno), __FILE__, __LINE__) \ + ); \ + return ASYNC_DONE; \ + } /* close async_begin's switch */ \ + } /* close async body block */ \ + (void)0 + +/* + * todo: do while bodies might be made optional in case compiler can't optimise empty loops. + * They exist only to prevent code like "if(cond) async_macro() else ..." from breaking with cryptic error. + * */ /* * Wait while the condition succeeds (optional) @@ -205,127 +272,114 @@ extern struct async_event_loop *async_default_event_loop; * Continuation state is now callee-saved like protothreads which avoids * duplicate writes from the caller-saved design. */ -#ifdef ASYNC_DEBUG -#define await_while(cond) \ - _async_p->_async_k = __LINE__; /* fall through */ case __LINE__: \ - if (cond) return (fprintf(stderr, " Awaited in '%s' %s(%d)\n", __func__, __FILE__, __LINE__), ASYNC_CONT) -#else -#define await_while(cond) \ - _async_p->_async_k = __LINE__; /* fall through */ case __LINE__: \ - if (cond) return ASYNC_CONT -#endif +#define await_while(cond) \ + do { \ + while (cond) { \ + ASYNC_ZUTIL_ON_DEBUG_( \ + fprintf(stderr, " Awaited in '%s' %s(%d)\n", __func__, __FILE__, __LINE__) \ + ); \ + async_yield; \ + } \ + } while (0) + /* * Wait until the condition succeeds */ -#define await(cond) await_while(!(cond)) +#define await_until(cond) await_while(!(cond)) /* * Yield execution */ -#ifdef ASYNC_DEBUG -#define async_yield _async_p->_async_k = __LINE__; fprintf(stderr, " Yielded in '%s' %s(%d)\n", __func__, __FILE__, __LINE__); return ASYNC_CONT; /* fall through */ case __LINE__: (void)0 -#else -#define async_yield _async_p->_async_k = __LINE__; return ASYNC_CONT; /* fall through */ case __LINE__: (void)0 -#endif +#define async_yield \ + do { \ + ASYNC_ZUTIL_ON_DEBUG_( \ + fprintf(stderr, " Yielded in '%s' %s(%d)\n", __func__, __FILE__, __LINE__) \ + ); \ + _async_ctx_ ->_async_k = __LINE__; return ASYNC_CONT; /* fall through */ case __LINE__: \ + (void)0; \ + } while (0) + /* * Exit the current async subroutine */ -#ifdef ASYNC_DEBUG -#define async_exit _async_p->_async_k = ASYNC_DONE; ASYNC_DECREF(_async_p); fprintf(stderr, " Exited from '%s' %s(%d)\n", __func__, __FILE__, __LINE__); return ASYNC_DONE -#else -#define async_exit _async_p->_async_k = ASYNC_DONE; ASYNC_DECREF(_async_p); return ASYNC_DONE -#endif +#define async_exit \ + return ( \ + _async_ctx_ ->_async_k = ASYNC_DONE, \ + ASYNC_DECREF(_async_ctx_), \ + ASYNC_ZUTIL_ON_DEBUG_( \ + fprintf(stderr, " Exited from '%s' %s(%d)\n", __func__, __FILE__, __LINE__) \ + ), \ + ASYNC_DONE \ + ) + /* - * Cancels running coroutine + * Cancels running task and children recursively while state is their only owner */ -#define async_cancel(coro) ((coro)->_flags |= _ASYNC_FLAG_MUST_CANCEL) +#define async_cancel(task) async_cancel_(task) /* * returns 1 if function was cancelled */ -#define async_cancelled(coro) (!!((coro)->_flags & _ASYNC_FLAG_MUST_CANCEL)) +#define async_is_cancelled(task) ((task)->err == ASYNC_ECANCELLED) /* * Check if async subroutine is done */ -#define async_done(coro) ((coro)->_async_k==ASYNC_DONE) +#define async_is_done(task) ((task)->_async_k == ASYNC_DONE) /* - * Create a new coro + * Create a new task */ -#define async_new(call_func, args, T_locals)\ - async_new_coro_((call_func), (args), sizeof(T_locals), _ASYNC_COMPUTE_OFFSET(struct astate, T_locals)) +#define async_new(arunner, args) async_new_task_((arunner), (args)) /* - * Create task from coro + * Schedule task from new state object */ -#define async_create_task(coro) (async_get_event_loop()->add_task(coro)) + +#define async_create_task(task) (ASYNC_DIRECT_LOOP->create_task(task)) + /* * Create tasks from array of states */ -#define async_create_tasks(n, coros) (async_get_event_loop()->add_tasks(n, coros)) -/* - * Get async_error code for current execution state. Can be used to check for errors after fawait() - */ -#define async_errno (_async_p->err) +#define async_create_tasks(n, tasks) ((ASYNC_DIRECT_LOOP)->create_tasks(n, tasks)) /* - * Create task and wait until the coro succeeds. Resets async_errno and sets it. + * Get async_error code for current execution state. Can be used to check for errors after await() */ - -#define fawait(coro) \ - if ((_async_p->_next = async_create_task(coro))) { \ - ASYNC_INCREF(_async_p->_next); \ - await(async_done(_async_p->_next)); \ - ASYNC_DECREF(_async_p->_next); \ - async_errno = _async_p->_next->err; \ - _async_p->_next = NULL; \ - } else { async_errno = ASYNC_ENOMEM; } \ - if(async_errno != ASYNC_OK) +#define async_errno (_async_ctx_->err) /* - * Initial preparation for adapter functions like async_sleep + * Create task and wait until it succeeds. Resets async_errno and sets it. */ -#define ASYNC_PREPARE_NOARGS(async_callback, state, T_locals, cancel_f, err_label) \ - (state) = async_new(async_callback, NULL, T_locals); \ - if (!(state)) goto err_label; \ - async_set_on_cancel(state, cancel_f) - -#define ASYNC_PREPARE(async_callback, state, args_size, T_locals, cancel_f, err_label) \ - ASYNC_PREPARE_NOARGS(async_callback, state, T_locals, cancel_f); \ - if (args_size) { \ - (state)->args = async_alloc_((state), args_size); \ - if (!state->args) { \ - async_free_coro_(state); \ - goto err_label; \ - } \ - }(void) 0 - +#define await(task) \ + do { \ + if ((_async_ctx_->_child = async_create_task(task)) != NULL) { \ + ASYNC_INCREF(_async_ctx_->_child); \ + if(!async_is_done(_async_ctx_->_child)){ \ + async_yield; \ + } \ + \ + async_errno = _async_ctx_->_child->err; \ + ASYNC_DECREF(_async_ctx_->_child); \ + _async_ctx_->_child = NULL; \ + } else { \ + async_errno = ASYNC_ENOMEM; \ + } \ + } while (0) /* * Allocate memory that'll be freed automatically after async function ends. - * Allows to avoid async_cancel callback. + * Allows to avoid setting a separate destructor */ -#define async_alloc(size) async_alloc_(_async_p, size) +#define async_alloc(size) async_alloc_(_async_ctx_, size) -#define async_free(ptr) async_free_(_async_p, ptr) +#define async_free(ptr) async_free_(_async_ctx_, ptr) -#define async_free_later(ptr) async_free_later_(_async_p, ptr) +#define async_free_later(ptr) async_free_later_(_async_ctx_, ptr) -/* - * Set function to be executed on function cancellation once. Can be used to free memory and finish some tasks. - */ -#define async_set_on_cancel(coro, cancel_func) (coro->_cancel=cancel_func) - -/* - * Set function to be executed on function cancellation once. This version can be used inside the async function. - * In this case cancel_func will be called only if async function has reached async_on_cancel statement - * before async_cancel() was called on current state. - */ -#define async_on_cancel(cancel_func) async_set_on_cancel(_async_p, cancel_func) /* * Run few variadic tasks in parallel @@ -334,10 +388,9 @@ struct astate *async_vgather(size_t n, ...); /* * Does the same, but takes array and number of array elements. - * Arr must not be freed before this coro is done or cancelled. - * arr will be modified inside the task, so pass a copy if you need original array to be unchanged. + * Arr must not be freed before this task is done. */ -struct astate *async_gather(size_t n, struct astate **states); +struct astate *async_gather(size_t n, struct astate * const states[]); /* * Block for `delay` seconds @@ -349,24 +402,30 @@ struct astate *async_sleep(double delay); */ struct astate *async_wait_for(struct astate *child, double timeout); -struct async_event_loop *async_get_event_loop(void); +const struct async_event_loop *async_get_event_loop(void); void async_set_event_loop(struct async_event_loop *); +void async_run(struct astate *amain); + /* * Internal functions, use with caution! (At least read the code) */ -struct astate *async_new_coro_(AsyncCallback child_f, void *args, size_t stack_size, size_t stack_offset); +struct astate *async_new_task_(const async_runner *runner, void *args); + +void async_args_destructor(struct astate *state); + +void async_free_task_(struct astate *state); -void async_free_coro_(struct astate *state); +void async_free_tasks_(size_t n, struct astate * const states[]); -void async_free_coros_(size_t n, struct astate **states); +int async_cancel_(struct astate *state); void *async_alloc_(struct astate *state, size_t size); -int async_free_(struct astate *state, void *mem); +void async_free_(struct astate *state, void *ptr); -int async_free_later_(struct astate *state, void *mem); +int async_free_later_(struct astate *state, void *ptr); const char *async_strerror(async_error err);