.intel_syntax noprefix .macro push_nonvolatile in_signal push r15 push r14 push r13 push r12 push rbx sub rsp, 8 mov byte ptr [rsp + 6], \in_signal fstcw [rsp + 4] stmxcsr [rsp] .endm .macro pop_nonvolatile ldmxcsr [rsp] fldcw [rsp + 4] cmp byte ptr [rsp + 6], 0 /* Use lea instead of add because it doesn't update the condition flags. * The cmp result's ZF flag will be used by sysreturn. */ lea rsp, [rsp + 8] pop rbx pop r12 pop r13 pop r14 pop r15 .endm .macro swapcontext in_signal push_nonvolatile \in_signal // Store the stack pointer with the saved context. mov rdi, [rip + rt_context_prev@GOTPCREL] mov rdi, [rdi] mov [rdi], rsp // Switch to the new task stack. mov rsp, rax pop_nonvolatile .endm .macro sysreturn /* If returning into a task that isn't in a pending syscall handler, we * need to unblock pending syscalls on return. Tasks that are in a pending * syscall handler will do this automatically during the signal return. */ pop rbp je rt_unblock_pending_syscalls ret .endm .text .global rt_syscall_0, rt_syscall_1, rt_syscall_2, rt_syscall_3 rt_syscall_0: rt_syscall_1: rt_syscall_2: rt_syscall_3: rt_syscall: push rbp mov rbp, rsp push rcx push rdx push rsi push rdi call rt_block_pending_syscalls pop rdi pop rsi pop rdx pop rcx call rt_syscall_run test rax, rax jne 0f pop rbp jmp rt_unblock_pending_syscalls 0: swapcontext 0 sysreturn .global rt_pending_syscall_handler rt_pending_syscall_handler: push rbp mov rbp, rsp call rt_syscall_run_pending test rax, rax jne 0f pop rbp ret 0: swapcontext 1 sysreturn .global rt_start rt_start: push rbp mov rbp, rsp call rt_start_context mov rsp, rax pop_nonvolatile pop rbp jmp rt_unblock_pending_syscalls .global rt_task_entry, rt_task_entry rt_task_entry: mov rdi, r12 /* On entry, the stack pointer is 16-byte aligned because we enter by * returning, so push a zero return address to the stack to create a * correctly-formatted initial stack frame. */ xor rax, rax push rax push rbp mov rbp, rsp call rbx pop rbp jmp rt_task_exit