#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include __attribute__((noreturn)) void rt_task_entry(void); void *rt_context_init(uintptr_t fn, uintptr_t arg, void *stack, size_t stack_size) { void *const sp = (char *)stack + stack_size; struct context *ctx = sp; --ctx; ctx->a0 = arg; ctx->a1 = fn; ctx->pc = (uintptr_t)rt_task_entry; /* Tasks start in machine mode with machine interrupts enabled. * NOTE: mstatus.MIE is not set here because this is the value of mstatus * when an exception is active on top of a task, not while the task is * running. The syscall handling code additionally relies on the context * restore sequence to disable interrupts, prior to doing an mret, which * will re-enable them. */ ctx->mstatus = MSTATUS_MPP_MACHINE | MSTATUS_MPIE; return ctx; } __attribute__((noreturn, weak)) void rt_idle(void) { /* NOTE: wfi can cause illegal instruction exceptions in non-privileged * modes, depending on the implementation and mstatus.TW. */ for (;;) { __asm__("wfi" :::); } } __attribute__((noreturn, weak)) void rt_abort(void) { for (;;) { __asm__("unimp" :::); } } __attribute__((noreturn, weak)) void rt_trap(void) { for (;;) { __asm__("ebreak" :::); } } void rt_task_drop_privilege(void) { /* There isn't a direct way to change the privilege mode, but mstatus.MPP * will be set to the least-privileged supported mode after the mret that * enters the task context, and executing another mret will change to that * mode. mret will also set pc to mepc, so we first need to set mepc to the * instruction following the mret. Because mepc is overwritten whenever an * exception is taken, we also need to clear mstatus.MIE to prevent * exceptions from occurring in between the write to mepc and the mret. The * mret will re-enable interrupts because mstatus.MPIE is still set. A * memory barrier prevents any accesses that are made with the higher * privilege level from occurring after the privilege level is dropped. */ uintptr_t mepc; __asm__ __volatile__("la %[mepc], 0f;" "fence;" "csrc mstatus, %[mstatus_mie];" "csrw mepc, %[mepc];" "mret;" "0:;" : [mepc] "=&r"(mepc) : [mstatus_mie] "i"(MSTATUS_MIE) : "memory"); } void rt_syscall_0(enum rt_syscall syscall) { register enum rt_syscall s __asm__("a0") = syscall; __asm__("ecall" : : "r"(s) : "memory"); } void rt_syscall_1(enum rt_syscall syscall, uintptr_t arg0) { register enum rt_syscall s __asm__("a0") = syscall; register uintptr_t a0 __asm__("a1") = arg0; __asm__("ecall" : : "r"(s), "r"(a0) : "memory"); } void rt_syscall_2(enum rt_syscall syscall, uintptr_t arg0, uintptr_t arg1) { register enum rt_syscall s __asm__("a0") = syscall; register uintptr_t a0 __asm__("a1") = arg0, a1 __asm__("a2") = arg1; __asm__("ecall" : : "r"(s), "r"(a0), "r"(a1) : "memory"); } void rt_syscall_3(enum rt_syscall syscall, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2) { register enum rt_syscall s __asm__("a0") = syscall; register uintptr_t a0 __asm__("a1") = arg0, a1 __asm__("a2") = arg1, a2 __asm__("a3") = arg2; __asm__("ecall" : : "r"(s), "r"(a0), "r"(a1), "r"(a2) : "memory"); } void rt_syscall_pend(void) { msi_pend(); } rt_atomic_int rt_trap_level = -1; bool rt_interrupt_is_active(void) { return rt_atomic_load(&rt_trap_level, RT_ATOMIC_RELAXED) >= 0; } void rt_tls_set(void *tls) { __asm__("mv tp, %0" : : "r"(tls)); } /* Some cores don't support the cycle CSR but might have other cycle counters * available, so define rt_cycle and rt_cycle_init as weak. */ __attribute__((weak)) void rt_cycle_init(void) { } __attribute__((weak)) uint32_t rt_cycle(void) { uint32_t x; __asm__ __volatile__("rdcycle %0" : "=r"(x)); return x; }