#pragma once #include #include #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif #ifndef RT_TASK_READY_CTZ_ENABLE #if defined(__ARM_FEATURE_CLZ) || defined(__riscv_zbb) || defined(__x86_64__) #define RT_TASK_READY_CTZ_ENABLE 1 #else // architecture detection #define RT_TASK_READY_CTZ_ENABLE 0 #endif #endif // !defined(RT_TASK_READY_CTZ_ENABLE) #define RT_TASK_PRIORITY_MIN UINT32_C(0) #define RT_TASK_PRIORITY_MAX UINT32_C(31) #define RT_TASK_PRIORITY_IDLE RT_TASK_PRIORITY_MAX #ifndef RT_TASK_CYCLE_ENABLE #define RT_TASK_CYCLE_ENABLE 0 #endif #if RT_TASK_CYCLE_ENABLE && !RT_CYCLE_ENABLE #error "To use task cycle counts, the cycle counter must be enabled." #endif /* Yield the core to another task of the same priority. If the current task is * still the highest priority, it will continue executing. */ void rt_task_yield(void); // Sleep the current task for a given number of ticks. void rt_task_sleep(unsigned long ticks); /* Sleep the current task until *last_wake_tick + period. *last_wake_tick will * be set to the next wakeup tick. */ void rt_task_sleep_periodic(unsigned long *last_wake_tick, unsigned long period); // Exit from the current task. Returning from a task function also exits. __attribute__((noreturn)) void rt_task_exit(void); // Get the name of the current task. const char *rt_task_name(void); // Get the current task. struct rt_task *rt_task_self(void); // Make the current task unprivileged. (This is a no-op on some architectures.) void rt_task_drop_privilege(void); extern struct rt_list rt_global_task_list; enum rt_task_state { RT_TASK_STATE_RUNNING, RT_TASK_STATE_READY, RT_TASK_STATE_BLOCKED_ON_SEM_WAIT, RT_TASK_STATE_BLOCKED_ON_SEM_TIMEDWAIT, RT_TASK_STATE_BLOCKED_ON_MUTEX_LOCK, RT_TASK_STATE_BLOCKED_ON_MUTEX_TIMEDLOCK, RT_TASK_STATE_BLOCKED_ON_EVENT_WAIT, RT_TASK_STATE_BLOCKED_ON_EVENT_TIMEDWAIT, RT_TASK_STATE_ASLEEP, RT_TASK_STATE_EXITED, }; struct rt_task { struct rt_list list; struct rt_list sleep_list; void *ctx; uint32_t priority; uint32_t base_priority; unsigned long wake_tick; #if RT_MPU_TASK_REGIONS_ENABLE struct rt_mpu_config mpu_config; #endif // RT_MPU_TASK_REGIONS_ENABLE #if RT_TASK_LOCAL_STORAGE_ENABLE void *tls; #endif // RT_TASK_LOCAL_STORAGE_ENABLE enum rt_task_state state; union { struct rt_sem *sem; struct rt_mutex *mutex; struct rt_event *event; } blocker; union { bool *success; uint32_t *bits; } syscall_return; struct rt_list mutex_list; #if RT_TASK_CYCLE_ENABLE uint64_t total_cycles; uint32_t start_cycle; #endif const char *name; struct rt_list global_task_list; }; #if RT_MPU_TASK_REGIONS_ENABLE rt_static_assert(offsetof(struct rt_task, mpu_config) == RT_MPU_TASK_CONFIG_OFFSET, "RT_MPU_TASK_CONFIG_OFFSET is incorrect"); #endif // RT_MPU_TASK_REGIONS_ENABLE /* Add a task to the ready list. This function may only be called in the system * call handler or before rt_start. */ void rt_task_ready(struct rt_task *task); #define RT_TASK_INIT(name_, name_str, priority_) \ { \ .list = RT_LIST_INIT(name_.list), \ .sleep_list = RT_LIST_INIT(name_.sleep_list), \ .priority = (priority_), \ .base_priority = (priority_), \ .state = RT_TASK_STATE_READY, \ .mutex_list = RT_LIST_INIT(name_.mutex_list), \ .name = (name_str), \ .global_task_list = RT_LIST_INIT(name_.global_task_list), \ } #define RT_CAT_(a, b) a##b #define RT_CAT(a, b) RT_CAT_(a, b) #if RT_MPU_TASK_REGIONS_ENABLE #define RT_TASK_STACK_MPU_REGION(fn) \ RT_MPU_REGION(fn##_task_stack, sizeof fn##_task_stack, RT_MPU_ATTR_STACK) #define RT_TASK_MPU_CONFIG_INIT(fn, ...) \ RT_MPU_CONFIG_INIT(&fn##_task.mpu_config, __VA_ARGS__); #else // !RT_MPU_TASK_REGIONS_ENABLE #define RT_TASK_STACK_MPU_REGION(fn) #define RT_TASK_MPU_CONFIG_INIT(fn, ...) #endif // RT_MPU_TASK_REGIONS_ENABLE #if RT_TASK_LOCAL_STORAGE_ENABLE /* Don't wrap in a do {} while (0) so the MPU region config can access the * definition of fn##_task_local_storage. */ #define RT_TASK_LOCAL_STORAGE_INIT(fn) \ static RT_TLS(fn##_task_local_storage, RT_TASK_LOCAL_STORAGE_SIZE); \ rt_tls_init(fn##_task_local_storage, sizeof fn##_task_local_storage); \ fn##_task.tls = fn##_task_local_storage /* Use a trailing comma so the no-tls case can be empty and not take up another * MPU region. */ #define RT_TASK_LOCAL_STORAGE_MPU_REGION(fn) \ RT_MPU_REGION(fn##_task_local_storage, sizeof fn##_task_local_storage, \ RT_MPU_ATTR_STACK), #else // !RT_TASK_LOCAL_STORAGE_ENABLE #define RT_TASK_LOCAL_STORAGE_INIT(fn) \ do \ { \ } while (0) #define RT_TASK_LOCAL_STORAGE_MPU_REGION(fn) #endif // RT_TASK_LOCAL_STORAGE_ENABLE #define RT_TASK_COMMON(fn, arg, stack_size, priority, name, ...) \ __attribute__((constructor, used)) static void RT_CAT(fn##_task_init_, \ __COUNTER__)(void) \ { \ static RT_STACK(fn##_task_stack, stack_size); \ RT_MPU_PRIV_DATA(fn##_task) \ static struct rt_task fn##_task = \ RT_TASK_INIT(fn##_task, name, priority); \ RT_TASK_LOCAL_STORAGE_INIT(fn); \ RT_TASK_MPU_CONFIG_INIT(fn, RT_TASK_STACK_MPU_REGION(fn), \ RT_TASK_LOCAL_STORAGE_MPU_REGION(fn) \ __VA_ARGS__); \ fn##_task.ctx = \ rt_context_init((uintptr_t)(fn), (arg), fn##_task_stack, \ sizeof fn##_task_stack); \ rt_list_push_back(&rt_global_task_list, &fn##_task.global_task_list); \ rt_task_ready(&fn##_task); \ } \ rt_static_assert((priority) <= RT_TASK_PRIORITY_MAX, \ "the priority of task \"" name "\", " #priority \ ", exceeds the maximum value") /* Create a task that runs fn on a stack of at least stack_size, with the given * priority. Additional arguments are MPU regions that will be active while the * task is running. */ #define RT_TASK(fn, stack_size, priority, ...) \ RT_TASK_COMMON(fn, 0, stack_size, priority, #fn, __VA_ARGS__) #define RT_TASK_ARG(fn, arg, stack_size, priority, ...) \ RT_TASK_COMMON(fn, arg, stack_size, priority, #fn "(" #arg ")", __VA_ARGS__) extern struct rt_task *rt_active_task; #ifdef __cplusplus } #endif