#include #include #include #include #include /* Separate saving t0-t2 so they can happen first and interrupts can be re-enabled * before all of the volatile context is saved. */ .macro save_t0t1t2 sw t0, 0(sp) sw t1, 4(sp) sw t2, 8(sp) .endm .macro mpuconfigure #if RT_MPU_TASK_REGIONS_ENABLE lui t0, %hi(rt_active_task) lw t0, %lo(rt_active_task)(t0) pmpaddrset RT_MPU_TASK_REGION_START_ID, RT_MPU_NUM_TASK_REGIONS pmpcfgset RT_MPU_TASK_REGION_START_ID, RT_MPU_NUM_TASK_REGIONS fence #endif .endm .macro pmpaddrset r, n .if \n == 0 .exitm .endif lw t1, (RT_MPU_TASK_PMPADDR_OFFSET + 4*\r)(t0) lw t2, (RT_MPU_TASK_PMPADDR_OFFSET + 4*(\r + 1))(t0) lw t3, (RT_MPU_TASK_PMPADDR_OFFSET + 4*(\r + 2))(t0) lw t4, (RT_MPU_TASK_PMPADDR_OFFSET + 4*(\r + 3))(t0) csrw RT_MPU_CSR_PMPADDR0 + \r, t1 csrw RT_MPU_CSR_PMPADDR0 + \r + 1, t2 csrw RT_MPU_CSR_PMPADDR0 + \r + 2, t3 csrw RT_MPU_CSR_PMPADDR0 + \r + 3, t4 pmpaddrset (\r + 4), (\n - 4) .endm .macro pmpcfgset r, n .if \n == 0 .exitm .endif lw t1, (RT_MPU_TASK_PMPCFG_OFFSET + \r)(t0) csrw RT_MPU_CSR_PMPCFG0 + (\r / 4), t1 pmpcfgset (\r + 4), (\n - 4) .endm .macro syscall_handler mepc_adjust, syscall_run lui t1, %hi(rt_trap_level) sw zero, %lo(rt_trap_level)(t1) mv t0, sp la sp, rt_main_sp csrc mie, MIx_MSI csrr t1, mepc csrrs t2, mstatus, MSTATUS_MIE .if \mepc_adjust != 0 add t1, t1, \mepc_adjust .endif sw t1, 12(t0) sw t2, 16(t0) sw a0, 20(t0) sw a1, 24(t0) sw a2, 28(t0) sw a3, 32(t0) sw a4, 36(t0) sw a5, 40(t0) sw a6, 44(t0) sw a7, 48(t0) sw t3, 52(t0) sw t4, 56(t0) sw t5, 60(t0) sw t6, 64(t0) sw ra, 68(t0) csrw mscratch, t0 jal \syscall_run csrr t0, mscratch beqz a0, \@f add t0, t0, -NONVOLATILE_CONTEXT_SIZE sw s0, 0(t0) sw s1, 4(t0) sw s2, 8(t0) sw s3, 12(t0) sw s4, 16(t0) sw s5, 20(t0) sw s6, 24(t0) sw s7, 28(t0) sw s8, 32(t0) sw s9, 36(t0) sw s10, 40(t0) sw s11, 44(t0) // Store the stack pointer with the saved context. lui t1, %hi(rt_context_prev) lw t1, %lo(rt_context_prev)(t1) sw t0, 0(t1) mpuconfigure /* Holding a memory reservation across a synchronous system call is * technically possible, but rt_syscall's inline assembly has a memory clobber * on it, so this shouldn't happen in real code. Therefore, don't bother * clearing the reservation. * NOTE: during an asynchronous system call, the memory reservation of the * current task needs to be cleared. Depending on the core's implementation of * Zalrsc, this may happen automatically when the exception occurs, or may * happen with any store. If the reservation can only be cleared with another * sc, then we need to do one here. */ lw s0, 0(a0) lw s1, 4(a0) lw s2, 8(a0) lw s3, 12(a0) lw s4, 16(a0) lw s5, 20(a0) lw s6, 24(a0) lw s7, 28(a0) lw s8, 32(a0) lw s9, 36(a0) lw s10, 40(a0) lw s11, 44(a0) // Switch to the new task stack. add t0, a0, NONVOLATILE_CONTEXT_SIZE \@: lw a0, 20(t0) lw a1, 24(t0) lw a2, 28(t0) lw a3, 32(t0) lw a4, 36(t0) lw a5, 40(t0) lw a6, 44(t0) lw a7, 48(t0) lw t3, 52(t0) lw t4, 56(t0) lw t5, 60(t0) lw t6, 64(t0) lw ra, 68(t0) /* Restore mstatus before mepc so that interrupts are disabled first. * Otherwise mepc could be clobbered after it is restored. */ lw t1, 16(t0) csrw mstatus, t1 lw t2, 12(t0) csrs mie, MIx_MSI csrw mepc, t2 li t2, -1 lui t1, %hi(rt_trap_level) sw t2, %lo(rt_trap_level)(t1) /* Adjust the stack pointer as it is set and before t0-t2 are restored. * This is safe because interrupts are disabled here. */ add sp, t0, VOLATILE_CONTEXT_SIZE lw t0, -VOLATILE_CONTEXT_SIZE(sp) lw t1, -VOLATILE_CONTEXT_SIZE+4(sp) lw t2, -VOLATILE_CONTEXT_SIZE+8(sp) mret .endm .section .text.rt_ecall_handler, "ax", %progbits .global rt_ecall_handler .type rt_ecall_handler, %function rt_ecall_handler: syscall_handler 4, rt_syscall_run .macro msi_handler lui t1, %hi(MSIP_BASE) sw zero, %lo(MSIP_BASE)(t1) syscall_handler 0, rt_syscall_run_pending .endm .section .text.rt_msi_handler, "ax", %progbits .global rt_msi_handler .type rt_msi_handler, %function rt_msi_handler: add sp, sp, -VOLATILE_CONTEXT_SIZE save_t0t1t2 msi_handler .section .text.rt_msi_trap_handler, "ax", %progbits .global rt_msi_trap_handler .type rt_msi_trap_handler, %function rt_msi_trap_handler: msi_handler .section .text.rt_start, "ax", %progbits .global rt_start .type rt_start, %function rt_start: jal rt_start_context mpuconfigure // Switch to the first task's stack and go to the top of it. add sp, a0, CONTEXT_SIZE // Load mepc, mstatus, and the arguments to rt_task_entry. lw t1, -VOLATILE_CONTEXT_SIZE+12(sp) lw t2, -VOLATILE_CONTEXT_SIZE+16(sp) lw a0, -VOLATILE_CONTEXT_SIZE+20(sp) lw a1, -VOLATILE_CONTEXT_SIZE+24(sp) csrs mie, MIx_MSI csrw mepc, t1 csrw mstatus, t2 mret .size rt_start, .-rt_start .section .text.rt_task_entry, "ax", %progbits .global rt_task_entry .type rt_task_entry, %function rt_task_entry: jalr a1 j rt_task_exit .size rt_task_entry, .-rt_task_entry .section .text.rt_trap_handler, "ax", %progbits .global rt_trap_handler .type rt_trap_handler, %function rt_trap_handler: add sp, sp, -VOLATILE_CONTEXT_SIZE save_t0t1t2 csrrw t2, mcause, zero sll t0, t2, 2 lui t1, %hi(rt_trap_vector) add t0, t0, t1 lw t0, %lo(rt_trap_vector)(t0) jr t0 .size rt_trap_handler, .-rt_trap_handler