Cargo.toml0000644000175100001770000000245614716371575012402 0ustar runnerdocker[package] name = "startpe" description = "Runner for Packed Executables" version = "1.0.4" license = "BSD-2-Clause" authors = ["Christian Sdunek "] edition = "2021" rust-version = "1.77.2" publish = false repository = "https://github.com/Systemcluster/wrappe" [[bin]] name = "startpe" path = "src/main.rs" [features] default = ["prefetch", "once"] prefetch = [] once = ["dep:procfs"] [profile.release] codegen-units = 1 debug = false debug-assertions = false incremental = false lto = "fat" opt-level = 's' panic = "abort" strip = "symbols" [dependencies] dirs = "5.0.1" filetime = "0.2.25" fslock-guard = "0.2.0" memchr = "2.7.4" memmap2 = "0.9.5" rayon = "1.10.0" twox-hash = { version = "1.6.3", default-features = false } zerocopy = { version = "0.8.10", features = ["derive"] } zstd = { version = "0.13.2", default-features = false, features = [] } [target.'cfg(windows)'.dependencies] windows-sys = { version = "0.59.0", features = ["Win32_Foundation", "Win32_System_Console", "Win32_System_LibraryLoader", "Win32_System_Threading", "Win32_System_Diagnostics", "Win32_System_Diagnostics_ToolHelp", "Win32_System_ProcessStatus", "Win32_UI_WindowsAndMessaging"] } [target.'cfg(target_os = "linux")'.dependencies] procfs = { version = "0.17.0", default-features = false, optional = true } Cross.toml0000644000175100001770000000013114716371575012424 0ustar runnerdocker[build.env] passthrough = [ "RUSTFLAGS", "GIT_HASH", "MACOSX_DEPLOYMENT_TARGET", ] src/0000755000175100001770000000000014716371575011232 5ustar runnerdockersrc/versioning.rs0000644000175100001770000000053414716371575013765 0ustar runnerdockeruse std::{ fs::{read_to_string, write}, path::Path, }; const VERSION_FILE: &str = "._wrappe_uid_"; pub fn get_version(target: &Path) -> String { read_to_string(target.join(VERSION_FILE)).unwrap_or_else(|_| "0".to_string()) } pub fn set_version(target: &Path, version: &str) { write(target.join(VERSION_FILE), version).unwrap() } src/prefetch.rs0000644000175100001770000000524414716371575013405 0ustar runnerdockeruse std::{io::Result, thread::JoinHandle}; #[cfg(windows)] pub fn prefetch_memory(mmap: &[u8], offset: usize) -> Option>> { use core::{ffi::c_void, ptr::null_mut}; use windows_sys::{ Win32::{ Foundation::HANDLE, System::{ LibraryLoader::{GetProcAddress, LOAD_LIBRARY_SEARCH_SYSTEM32, LoadLibraryExA}, Threading::GetCurrentProcess, }, }, core::PCSTR, }; let virtual_address = mmap.as_ptr() as usize + offset; let number_of_bytes = mmap.len() - offset; Some(std::thread::spawn(move || { fn get_function(library: PCSTR, function: PCSTR) -> Result<*const c_void> { let module = unsafe { LoadLibraryExA(library, null_mut(), LOAD_LIBRARY_SEARCH_SYSTEM32) }; if module.is_null() { Err(std::io::Error::last_os_error())?; } let address = unsafe { GetProcAddress(module, function) }; if address.is_none() { Err(std::io::Error::last_os_error())?; } Ok(address.unwrap() as *const _) } type PrefetchVirtualMemory = unsafe extern "system" fn( hProcess: HANDLE, NumberOfEntries: usize, VirtualAddresses: *mut WIN32_MEMORY_RANGE_ENTRY, Flags: u32, ) -> u32; #[repr(C)] #[allow(non_camel_case_types, non_snake_case)] struct WIN32_MEMORY_RANGE_ENTRY { VirtualAddress: *const c_void, NumberOfBytes: usize, } // Dynamically load PrefetchVirtualMemory since it is only available on Windows 8 and later let prefetch_fn = unsafe { #[allow(clippy::manual_c_str_literals)] match get_function( b"kernel32.dll\0".as_ptr() as _, b"PrefetchVirtualMemory\0".as_ptr() as _, ) { Err(e) => return Err(e), Ok(f) => std::mem::transmute::<*const _, PrefetchVirtualMemory>(f), } }; let mut memory = WIN32_MEMORY_RANGE_ENTRY { VirtualAddress: virtual_address as *mut _, NumberOfBytes: number_of_bytes as _, }; let process = unsafe { GetCurrentProcess() }; if process.is_null() { Err(std::io::Error::last_os_error())?; } let result = unsafe { prefetch_fn(process, 1, &mut memory as *mut _, 0) }; if result == 0 { Err(std::io::Error::last_os_error())?; } Ok(()) })) } #[cfg(not(windows))] #[inline(always)] pub fn prefetch_memory(_: &[u8], _: usize) -> Option>> { None } src/once.rs0000644000175100001770000001064514716371575012532 0ustar runnerdockeruse std::path::Path; #[cfg(windows)] pub fn check_instance(run_path: &Path) -> Result { use core::ffi::c_void; use std::{ffi::OsString, os::windows::ffi::OsStringExt}; use windows_sys::Win32::{ System::{ Diagnostics::ToolHelp::{ CreateToolhelp32Snapshot, PROCESSENTRY32W, Process32FirstW, Process32NextW, TH32CS_SNAPPROCESS, }, Threading::{ OpenProcess, PROCESS_QUERY_LIMITED_INFORMATION, QueryFullProcessImageNameW, }, }, UI::WindowsAndMessaging::EnumWindows, }; unsafe extern "system" fn enum_windows_proc(hwnd: *mut c_void, lparam: isize) -> i32 { use windows_sys::Win32::UI::WindowsAndMessaging::{ GetWindowThreadProcessId, SW_SHOW, SetForegroundWindow, ShowWindow, }; let mut process_id = 0; unsafe { GetWindowThreadProcessId(hwnd, &mut process_id); } if process_id == lparam as u32 { unsafe { ShowWindow(hwnd, SW_SHOW) }; let result = unsafe { SetForegroundWindow(hwnd) }; if result == 0 { return 1; } 0 } else { 1 } } let snapshot = unsafe { CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) }; if snapshot.is_null() { return Err(std::io::Error::last_os_error()); } let mut entry = unsafe { std::mem::zeroed::() }; entry.dwSize = std::mem::size_of::() as u32; if unsafe { Process32FirstW(snapshot, &mut entry) } != 0 { let command_name = run_path.file_name().unwrap().to_os_string(); let mut path = [0u16; 1024]; loop { let process_name: &[u16] = unsafe { std::slice::from_raw_parts( entry.szExeFile.as_ptr().cast::(), entry .szExeFile .iter() .take(entry.szExeFile.len()) .position(|&c| c == 0) .unwrap_or(entry.szExeFile.len()), ) }; let process_name = OsString::from_wide(process_name); if process_name == command_name { let process = unsafe { OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, 0, entry.th32ProcessID) }; let mut len = path.len() as u32; let result = unsafe { QueryFullProcessImageNameW(process, 0, path.as_mut_ptr(), &mut len) }; if result == 0 { return Err(std::io::Error::last_os_error()); } let path = OsString::from_wide(&path[..len as usize]); if path == run_path.as_os_str() { let result = unsafe { EnumWindows(Some(enum_windows_proc), entry.th32ProcessID as _) }; if result == 0 { let err = std::io::Error::last_os_error(); if err.raw_os_error() != Some(0) { return Err(err); } } return Ok(true); } } if unsafe { Process32NextW(snapshot, &mut entry) } == 0 { break; } } } Ok(false) } #[cfg(target_os = "linux")] pub fn check_instance(run_path: &Path) -> Result { let processes = procfs::process::all_processes(); if let Err(_e) = processes { #[cfg(debug_assertions)] eprintln!("error: {}", _e); return Ok(false); } for proc in processes.unwrap() { match proc { Ok(p) => match p.exe() { Ok(exe) => { if exe == run_path { return Ok(true); } } Err(_e) => { #[cfg(debug_assertions)] eprintln!("error: {}", _e); continue; } }, Err(_e) => { #[cfg(debug_assertions)] eprintln!("error: {}", _e); continue; } } } Ok(false) } #[cfg(not(any(windows, target_os = "linux")))] #[inline(always)] pub fn check_instance(_: &Path) -> Result { Ok(false) } src/main.rs0000644000175100001770000002720514716371575012532 0ustar runnerdockeruse std::{ env::{current_exe, var_os}, fs::{File, create_dir_all, read_link, remove_dir, remove_dir_all}, io::Write, mem::size_of, panic::set_hook, process::Command, time::SystemTime, }; #[cfg(windows)] use std::os::windows::fs::OpenOptionsExt; #[cfg(any(unix, target_os = "redox"))] use std::os::unix::process::CommandExt; #[cfg(not(any(unix, target_os = "redox")))] use std::process::Stdio; #[cfg(windows)] use windows_sys::Win32::System::Console::{ATTACH_PARENT_PROCESS, AttachConsole}; use fslock_guard::LockFileGuard; use memchr::memmem; use memmap2::MmapOptions; use zerocopy::Ref; mod types; use types::*; mod decompress; use decompress::*; mod permissions; use permissions::*; mod versioning; use versioning::*; #[cfg(feature = "prefetch")] mod prefetch; #[cfg(feature = "once")] mod once; fn main() { set_hook(Box::<_>::new(move |panic| { if let Some(message) = panic.payload().downcast_ref::<&str>() { eprintln!("error: {}", message); } else if let Some(message) = panic.payload().downcast_ref::() { eprintln!("error: {}", message); } else { eprintln!("error: {}", panic); } #[cfg(windows)] { use std::sync::atomic::{AtomicBool, Ordering}; static WRITTEN: AtomicBool = AtomicBool::new(false); if WRITTEN.swap(true, Ordering::Relaxed) { return; } let now = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap_or_default(); if let Ok(mut file) = File::create(format!( "error-{}-{}.txt", now.as_secs(), now.subsec_millis() )) { let _ = writeln!(file, "An error occurred while starting the application."); let _ = writeln!(file, "Please report this error to the developers."); let _ = writeln!(file); let _ = writeln!(file, "{}", panic); } } })); let mut exe = current_exe().expect("couldn't get handle to current executable"); while let Ok(link) = read_link(&exe) { exe = link; } #[cfg(windows)] let file = File::options() .read(true) .custom_flags(0x10000000) // FILE_FLAG_RANDOM_ACCESS .open(&exe) .expect("couldn't open current executable"); #[cfg(not(windows))] let file = File::options() .read(true) .open(&exe) .expect("couldn't open current executable"); let mmap = unsafe { MmapOptions::new() .map(&file) .expect("couldn't memory map current executable") }; let end = mmap.len(); if end < size_of::() { panic!("file is too small ({} < {})", end, size_of::()) } let mut signature = Vec::with_capacity(8); signature.extend_from_slice(&WRAPPE_SIGNATURE_1[..4]); signature.extend_from_slice(&WRAPPE_SIGNATURE_2[..4]); let mut info_start = end - size_of::(); if mmap[info_start..info_start + 8] != signature[..] { if let Some(pos) = memmem::rfind(&mmap[..], &signature) { info_start = pos; } else { panic!("couldn't find starter info") } } if info_start + size_of::() > end { panic!( "starter info is too small ({} < {})", end - info_start, size_of::() ) } let info = Ref::into_ref( Ref::<_, StarterInfo>::from_bytes(&mmap[info_start..info_start + size_of::()]) .expect("couldn't read starter info"), ); if info.signature != signature[..] { panic!("file signature is invalid") } if info.wrappe_format != WRAPPE_FORMAT { panic!( "runner version ({}) differs from wrapper version ({})", WRAPPE_FORMAT, info.wrappe_format ); } let mut show_information = info.show_information; let show_console = info.show_console; if show_information < 2 && var_os("STARTPE_FORCE_VERBOSE").is_some() { show_information = 2; } #[cfg(not(windows))] let console_attached = false; #[cfg(windows)] let mut console_attached = false; #[cfg(windows)] if show_console == 2 || (show_console == 0 && show_information == 2) { console_attached = unsafe { AttachConsole(ATTACH_PARENT_PROCESS) != 0 }; } if show_information >= 1 { println!( "{} {}{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"), option_env!("GIT_HASH") .map(|hash| format!(" ({})", hash)) .unwrap_or_default() ); } if info.unpack_directory.is_empty() { panic!("empty unpack directory name") } let unpack_dir_name = std::str::from_utf8( &info.unpack_directory[0..(info .unpack_directory .iter() .position(|&c| c == b'\0') .unwrap_or(info.unpack_directory.len()))], ) .unwrap(); if show_information >= 1 { println!("{}", unpack_dir_name); } let version = std::str::from_utf8( &info.uid[0..(info .uid .iter() .position(|&c| c == b'\0') .unwrap_or(info.uid.len()))], ) .unwrap(); if show_information >= 2 { println!(); println!("version: {}", version); println!( "show console: {} (attached: {})", show_console, console_attached ); } let mut unpack_root = match info.unpack_target { 0 => std::env::temp_dir(), 1 => dirs::data_local_dir().unwrap(), 2 => std::env::current_dir().unwrap(), _ => panic!("invalid unpack target"), }; unpack_root = unpack_root.join(unpack_dir_name); let mut unpack_dir = unpack_root.clone(); if info.versioning == 0 { unpack_dir = unpack_dir.join(version); } if show_information >= 2 { println!("target directory: {}", unpack_dir.display()); } let command_name = std::str::from_utf8( &info.command[0..(info .command .iter() .position(|&c| c == b'\0') .unwrap_or(info.command.len()))], ) .unwrap(); let run_path = &unpack_dir.join(command_name); if show_information >= 2 { println!("runpath: {}", run_path.display()); } create_dir_all(&unpack_dir) .unwrap_or_else(|e| panic!("couldn't create directory {}: {}", unpack_dir.display(), e)); let lockfile = if info.once == 1 { let lockfile = LockFileGuard::try_lock(unpack_dir.join(LOCK_FILE)) .unwrap_or_else(|e| panic!("couldn't lock file: {}", e)); if lockfile.is_none() { println!("another instance is already unpacking, exiting..."); return; } lockfile.unwrap() } else { LockFileGuard::lock(unpack_dir.join(LOCK_FILE)).unwrap_or_else(|e| { panic!("couldn't lock file: {}", e); }) }; #[cfg(feature = "once")] if info.once == 1 { if show_information >= 2 { println!("checking for running processes..."); } let running = once::check_instance(run_path).unwrap(); if running { println!("another instance is already running, exiting..."); return; } } let cleanup: bool; if let Some(var) = var_os("STARTPE_CLEANUP") { cleanup = var == "1" } else { cleanup = info.cleanup == 1 } let should_extract = match info.versioning { 0 => get_version(&unpack_dir) != version, 1 => get_version(&unpack_dir) != version, _ => true, }; let verification = if !should_extract { info.verification } else { 0 }; if show_information >= 2 { println!("should verify: {}", verification); println!("should extract: {}", should_extract); println!("should cleanup: {}", cleanup); } if should_extract || verification > 0 { let now = SystemTime::now(); let extracted = decompress( &mmap[..info_start], &unpack_dir, verification, should_extract, version, show_information, ); if extracted { if show_information >= 2 { println!( "decompressed in {}ms", now.elapsed().unwrap_or_default().as_millis() ); } set_executable_permissions(run_path); } } drop(lockfile); let baked_arguments = std::str::from_utf8( &info.arguments[0..(info .arguments .iter() .position(|&c| c == b'\0') .unwrap_or(info.arguments.len()))], ) .expect("couldn't parse baked arguments"); let baked_arguments = baked_arguments .split('\u{1f}') .map(|arg| arg.trim().to_string()) .filter(|arg| !arg.is_empty()) .collect::>(); if show_information >= 2 && !baked_arguments.is_empty() { println!("baked arguments: {:?}", baked_arguments); } let forwarded_arguments = std::env::args().skip(1).collect::>(); if show_information >= 2 && !forwarded_arguments.is_empty() { println!("forwarded arguments: {:?}", forwarded_arguments); } let launch_dir = std::env::current_dir().unwrap(); let current_dir = match info.current_dir { 0 => &launch_dir, 1 => &unpack_dir, 2 => exe.parent().unwrap(), 3 => run_path.parent().unwrap(), _ => panic!("invalid current directory"), }; if show_information >= 2 { println!("current dir: {}", current_dir.display()); } drop(mmap); drop(file); if show_information >= 2 { println!("running..."); } if console_attached && show_console == 0 { let _ = std::io::stdout().flush(); } let mut command = Command::new(run_path); command.args(baked_arguments); command.args(forwarded_arguments); command.env("WRAPPE_UNPACK_DIR", unpack_dir.as_os_str()); command.env("WRAPPE_LAUNCH_DIR", launch_dir.as_os_str()); command.current_dir(current_dir); #[cfg(not(any(unix, target_os = "redox")))] { if show_console == 0 || (show_console == 2 && !console_attached) { command.stdout(Stdio::null()); command.stderr(Stdio::null()); command.stdin(Stdio::null()); } } if cleanup { let mut child = command .spawn() .unwrap_or_else(|e| panic!("failed to run {}: {}", run_path.display(), e)); let status = child .wait() .unwrap_or_else(|e| panic!("failed to run {}: {}", run_path.display(), e)); let _ = remove_dir_all(unpack_dir); let _ = remove_dir(unpack_root); std::process::exit(status.code().unwrap_or(1)) } else { #[cfg(any(unix, target_os = "redox"))] { let e = command.exec(); panic!("failed to run {}: {}", run_path.display(), e); } #[cfg(not(any(unix, target_os = "redox")))] { #[allow(clippy::zombie_processes)] let mut child = command .spawn() .unwrap_or_else(|e| panic!("failed to run {}: {}", run_path.display(), e)); if show_console == 1 || (show_console == 2 && console_attached) { let status = child .wait() .unwrap_or_else(|e| panic!("failed to run {}: {}", run_path.display(), e)); std::process::exit(status.code().unwrap_or(1)) } } } } src/decompress.rs0000644000175100001770000005034514716371575013753 0ustar runnerdockeruse std::{ fs::{File, create_dir_all, read_link, remove_dir, remove_file}, hash::Hasher, io::{BufReader, BufWriter, Read, Result, copy, sink}, mem::size_of, path::{Path, PathBuf}, thread::sleep, time::Duration, }; #[cfg(windows)] use std::os::windows::fs::OpenOptionsExt; use filetime::{FileTime, set_file_times, set_symlink_file_times}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use twox_hash::XxHash64; use zerocopy::Ref; use zstd::{Decoder, dict::DecoderDictionary, zstd_safe::DCtx}; use crate::{types::*, versioning::*}; pub const HASH_SEED: u64 = 1246736989840; pub const LOCK_FILE: &str = "._wrappe_lock_"; pub struct HashReader { reader: R, hasher: H, } impl HashReader { pub fn new(reader: R, hasher: H) -> Self { HashReader { reader, hasher } } pub fn finish(self) -> u64 { self.hasher.finish() } } impl Read for HashReader { fn read(&mut self, buf: &mut [u8]) -> Result { let bytes = self.reader.read(buf)?; if bytes > 0 { self.hasher.write(&buf[0..bytes]); } Ok(bytes) } } /// Decompress the payload and section data in `mmap` into `unpack_dir`. /// The data is expected to be in the following order at the end of `mmap`: /// - compressed file contents /// - compression dictionary /// - compressed sections /// - directory sections /// - file section headers /// - symlink sections /// - payload section header pub fn decompress( mmap: &[u8], unpack_dir: &Path, verification: u8, mut should_extract: bool, version: &str, show_information: u8, ) -> bool { // read payload header sections let payload_header_start = mmap.len() - size_of::(); let payload_header = Ref::into_ref( Ref::<_, PayloadHeader>::from_bytes(&mmap[payload_header_start..]) .expect("couldn't read payload header"), ); let directory_sections = payload_header.directory_sections as usize; let file_sections = payload_header.file_sections as usize; let symlink_sections = payload_header.symlink_sections as usize; let dictionary_size = payload_header.dictionary_size as usize; let payload_size = payload_header.payload_size as usize; let sections_size = payload_header.sections_size as usize; if show_information >= 2 { println!( "payload: {} directories, {} files, {} symlinks ({} total)", directory_sections, file_sections, symlink_sections, payload_header.len() ); println!("dictionary size: {}", dictionary_size); println!("payload size: {}", payload_size); } let mut sections = Vec::with_capacity(sections_size); let mut reader = BufReader::with_capacity( DCtx::in_size(), &mmap[payload_header_start - sections_size..payload_header_start], ); let mut decoder = Decoder::new(&mut reader).unwrap(); copy(&mut decoder, &mut sections) .unwrap_or_else(|e| panic!("couldn't decompress payload sections: {}", e)); let directory_sections_start = 0; let file_sections_start = directory_sections_start + directory_sections * size_of::(); let symlink_sections_start = file_sections_start + file_sections * size_of::(); let dictionary_start = payload_header_start - sections_size - dictionary_size; let files_start = dictionary_start - payload_size; let mut section_hasher = XxHash64::with_seed(HASH_SEED); if show_information >= 2 { println!("reading sections..."); } let dictionary = if dictionary_size > 0 { Some(DecoderDictionary::copy( &mmap[dictionary_start..payload_header_start - sections_size], )) } else { None }; let directories = sections[directory_sections_start..file_sections_start] .chunks(size_of::()) .enumerate() .fold( // start with the unpack directory as parent 0 Vec::::from([PathBuf::from("")]), |mut directories, (i, section)| { let section_start = directory_sections_start + i * size_of::(); section_hasher.write(section); let section = Ref::into_ref( Ref::<_, DirectorySection>::from_bytes( §ions[section_start..section_start + size_of::()], ) .expect("couldn't read payload header"), ); directories.push( directories[section.parent as usize].join( std::str::from_utf8( §ion.name[0..(section .name .iter() .position(|&c| c == b'\0') .unwrap_or(section.name.len()))], ) .unwrap(), ), ); directories }, ); let files = sections[file_sections_start..symlink_sections_start] .chunks(size_of::()) .enumerate() .map(|(i, section)| { let section_start = file_sections_start + i * size_of::(); section_hasher.write(section); let section = Ref::into_ref( Ref::<_, FileSectionHeader>::from_bytes( §ions[section_start..section_start + size_of::()], ) .expect("couldn't read payload header"), ); ( section, std::str::from_utf8( §ion.name[0..(section .name .iter() .position(|&c| c == b'\0') .unwrap_or(section.name.len()))], ) .unwrap(), ) }) .collect::>(); let symlinks = sections[symlink_sections_start..] .chunks(size_of::()) .enumerate() .map(|(i, section)| { let section_start = symlink_sections_start + i * size_of::(); section_hasher.write(section); let section = Ref::into_ref( Ref::<_, SymlinkSection>::from_bytes( §ions[section_start..section_start + size_of::()], ) .expect("couldn't read payload header"), ); ( section, std::str::from_utf8( §ion.name[0..(section .name .iter() .position(|&c| c == b'\0') .unwrap_or(section.name.len()))], ) .unwrap(), ) }) .collect::>(); let section_hash = section_hasher.finish(); if section_hash != payload_header.section_hash { let expected = payload_header.section_hash; panic!( "section hash ({}) differs from expected section hash ({})", section_hash, expected ); } // verify files if verification > 0 && !should_extract && file_sections > 0 { if show_information >= 2 { println!("verifying files..."); } should_extract = !files.par_iter().all(|(file, file_name)| { let path = unpack_dir .join(&directories[file.parent as usize]) .join(file_name); if !path.is_file() { eprintln!("verification failed: not a file: {}", path.display()); return false; } if verification == 2 { // verify checksums #[cfg(windows)] let target = File::options() .read(true) .custom_flags(0x08000000) // FILE_FLAG_SEQUENTIAL_SCAN .open(&path); #[cfg(not(windows))] let target = File::options().read(true).open(&path); if target.is_err() { eprintln!( "verification failed: couldn't open file: {}", path.display() ); return false; } let target = target.unwrap(); let mut hasher = XxHash64::with_seed(HASH_SEED); let mut reader = HashReader::new(&target, &mut hasher); if copy(&mut reader, &mut sink()).is_err() { eprintln!( "verification failed: couldn't read file: {}", path.display() ); return false; }; let file_hash = hasher.finish(); if file_hash != file.file_hash { let expected = file.file_hash; eprintln!( "verification failed: file hash ({}) differs from expected file hash ({}): {}", file_hash, expected, path.display() ); return false; } } true }); } // verify symlinks if verification > 0 && !should_extract && symlink_sections > 0 { if show_information >= 2 { println!("verifying symlinks..."); } should_extract = !symlinks.par_iter().all(|(symlink, symlink_name)| { let path = unpack_dir .join(&directories[symlink.parent as usize]) .join(symlink_name); let link = read_link(&path); if link.is_err() { eprintln!( "verification failed: not a valid symlink: {}", path.display() ); return false; } let link = link.unwrap(); if !link.starts_with(unpack_dir) { eprintln!( "verification failed: symlink points to target outside the target directory: {}", path.display() ); return false; } // directory symlink if symlink.kind == 0 { let target = unpack_dir.join(&directories[symlink.target as usize]); if link != target { eprintln!( "verification failed: symlink points to wrong target: {} (expected: {})", target.display(), link.display(), ); return false; } } // file symlink if symlink.kind == 1 { let (file, file_name) = files[symlink.target as usize]; let target = unpack_dir .join(&directories[file.parent as usize]) .join(file_name); if target != link { eprintln!( "verification failed: symlink points to wrong target: {} (expected: {})", target.display(), link.display(), ); return false; } } true }); } if should_extract { #[cfg(feature = "prefetch")] let mut prefetch_handle = None; #[cfg(feature = "prefetch")] // prefetch memory mapped data if it is larger than 512 MB if mmap.len() - files_start > 512 * 1024 * 1024 { if show_information >= 2 { println!("prefetching memory..."); } prefetch_handle = crate::prefetch::prefetch_memory(mmap, files_start); } // create directories if show_information >= 2 { println!("creating directories..."); } directories.iter().for_each(|directory| { let path = unpack_dir.join(directory); create_dir_all(&path).unwrap_or_else(|e| { panic!("couldn't create directory {}: {}", path.display(), e); }); }); // unpack files if show_information >= 2 { println!("unpacking..."); } files.par_iter().for_each(|(file, file_name)| { let path = unpack_dir .join(&directories[file.parent as usize]) .join(file_name); let content = &mmap[files_start + file.position as usize ..files_start + (file.position + file.size) as usize]; let mut reader = HashReader::new(content, XxHash64::with_seed(HASH_SEED)); { let mut reader = BufReader::with_capacity(DCtx::in_size(), &mut reader); let output = File::options() .write(true) .create(true) .truncate(true) .open(&path) .unwrap_or_else(|e| panic!("failed to create file {}: {}", path.display(), e)); let mut output = BufWriter::with_capacity(DCtx::out_size(), output); let decoder = if let Some(dict) = &dictionary { Decoder::with_prepared_dictionary(&mut reader, dict) } else { Decoder::with_buffer(&mut reader) }; let mut decoder = decoder.unwrap_or_else(|e| { panic!("failed to create decoder for {}: {}", path.display(), e) }); copy(&mut decoder, &mut output) .unwrap_or_else(|e| panic!("failed to unpack file {}: {}", path.display(), e)); } let compressed_hash = reader.finish(); if file.compressed_hash != compressed_hash { let expected = file.compressed_hash; panic!( "compressed file hash ({}) differs from expected hash ({}) for {}", compressed_hash, expected, path.display() ); } #[cfg(windows)] { use ::std::fs::{metadata, set_permissions}; let meta = metadata(&path); if let Ok(ref meta) = meta { let read = file.readonly != 0; let mut perm = meta.permissions(); perm.set_readonly(read); set_permissions(&path, perm).unwrap_or_else(|e| { eprintln!("failed to set permissions for {}: {}", path.display(), e) }); } } #[cfg(any(unix, target_os = "redox"))] { use ::std::{ fs::{Permissions, set_permissions}, os::unix::prelude::*, }; let mode = file.mode; let mut perm: Permissions = PermissionsExt::from_mode(mode); let read = file.readonly != 0; perm.set_readonly(read); set_permissions(&path, perm).unwrap_or_else(|e| { eprintln!("failed to set permissions for {}: {}", path.display(), e) }); } set_file_times( &path, FileTime::from_unix_time( file.time_accessed_seconds as i64, file.time_accessed_nanos, ), FileTime::from_unix_time( file.time_modified_seconds as i64, file.time_modified_nanos, ), ) .unwrap_or_else(|e| println!("failed to set file times for {}: {}", path.display(), e)); }); // create symlinks #[cfg(not(any(windows, unix, target_os = "redox")))] { eprintln!("skipping symlink creation on unsupported platform"); } #[cfg(any(windows, unix, target_os = "redox"))] { if show_information >= 2 { println!("creating symlinks..."); } symlinks.par_iter().for_each(|(symlink, symlink_name)| { let path = unpack_dir .join(&directories[symlink.parent as usize]) .join(symlink_name); // directory symlink if symlink.kind == 0 { if path.exists() { remove_dir(&path).unwrap_or_else(|e| { panic!( "failed to remove existing symlink {}: {}", path.display(), e ) }); } while path.exists() { sleep(Duration::from_millis(20)); } let target = unpack_dir.join(&directories[symlink.target as usize]); #[cfg(windows)] { use ::std::os::windows::fs::symlink_dir; symlink_dir(target, &path).unwrap_or_else(|e| { panic!("failed to create symlink {}: {}", path.display(), e) }); } #[cfg(any(unix, target_os = "redox"))] { use ::std::os::unix::fs::symlink; symlink(target, &path).unwrap_or_else(|e| { panic!("failed to create symlink {}: {}", path.display(), e) }); } } // file symlink if symlink.kind == 1 { if path.exists() { remove_file(&path).unwrap_or_else(|e| { panic!( "failed to remove existing symlink {}: {}", path.display(), e ) }); } while path.exists() { sleep(Duration::from_millis(20)); } let (file, file_name) = files[symlink.target as usize]; let target = unpack_dir .join(&directories[file.parent as usize]) .join(file_name); #[cfg(windows)] { use ::std::os::windows::fs::symlink_file; symlink_file(target, &path).unwrap_or_else(|e| { panic!("failed to create symlink {}: {}", path.display(), e) }); } #[cfg(any(unix, target_os = "redox"))] { use ::std::os::unix::fs::symlink; symlink(target, &path).unwrap_or_else(|e| { panic!("failed to create symlink {}: {}", path.display(), e) }); } set_symlink_file_times( &path, FileTime::from_unix_time( symlink.time_accessed_seconds as i64, symlink.time_accessed_nanos, ), FileTime::from_unix_time( symlink.time_modified_seconds as i64, symlink.time_modified_nanos, ), ) .unwrap_or_else(|e| { eprintln!("failed to set file times for {}: {}", path.display(), e) }); } }); } set_version(unpack_dir, version); #[cfg(feature = "prefetch")] if let Some(prefetch_result) = prefetch_handle { let _ = prefetch_result .join() .map_err(|e| eprintln!("failed to join prefetch thread: {:?}", e)) .map(|r| r.map_err(|e| eprintln!("failed to prefetch memory: {}", e))); } } should_extract } src/permissions.rs0000644000175100001770000000130114716371575014146 0ustar runnerdockeruse std::path::Path; #[cfg(any(unix, target_os = "redox"))] pub fn set_executable_permissions(path: &Path) { use ::std::{ fs::{Permissions, metadata, set_permissions}, os::unix::prelude::*, }; let meta = metadata(path); if let Ok(ref meta) = meta { let mut perm: Permissions = meta.permissions(); perm.set_mode(perm.mode() | 0o110); set_permissions(path, perm).unwrap_or_else(|e| { eprintln!( "failed to set executable permissions for {}: {}", path.display(), e ) }); } } #[cfg(not(any(unix, target_os = "redox")))] pub fn set_executable_permissions(_: &Path) {} src/types.rs0000644000175100001770000000472114716371575012750 0ustar runnerdockerpub use zerocopy::{FromBytes, Immutable, KnownLayout}; pub const WRAPPE_FORMAT: u8 = 204; pub const WRAPPE_SIGNATURE_1: [u8; 6] = [0x50, 0x45, 0x33, 0x44, 0x00, 0x00]; pub const WRAPPE_SIGNATURE_2: [u8; 4] = [0x41, 0x54, 0x41, 0x00]; pub const NAME_SIZE: usize = 128; pub const ARGS_SIZE: usize = 512; #[repr(C, packed)] #[derive(FromBytes, Immutable, KnownLayout)] pub struct StarterInfo { pub signature: [u8; 8], pub show_console: u8, pub current_dir: u8, pub verification: u8, pub show_information: u8, pub uid: [u8; 16], pub unpack_target: u8, pub versioning: u8, pub once: u8, pub cleanup: u8, pub wrappe_format: u8, pub unpack_directory: [u8; NAME_SIZE], pub command: [u8; NAME_SIZE], pub arguments: [u8; ARGS_SIZE], } #[repr(C, packed)] #[derive(FromBytes, Immutable, KnownLayout)] pub struct PayloadHeader { pub directory_sections: u64, pub file_sections: u64, pub symlink_sections: u64, pub dictionary_size: u64, pub section_hash: u64, pub payload_size: u64, pub sections_size: u64, pub kind: u8, } impl PayloadHeader { pub fn len(&self) -> u64 { self.directory_sections + self.file_sections + self.symlink_sections } } #[repr(C, packed)] #[derive(FromBytes, Immutable, KnownLayout)] pub struct DirectorySection { pub name: [u8; NAME_SIZE], pub parent: u32, } #[repr(C, packed)] #[derive(FromBytes, Immutable, KnownLayout)] pub struct FileSectionHeader { pub position: u64, pub size: u64, pub name: [u8; NAME_SIZE], pub file_hash: u64, pub compressed_hash: u64, pub time_accessed_seconds: u64, pub time_modified_seconds: u64, pub parent: u32, pub mode: u32, pub time_accessed_nanos: u32, pub time_modified_nanos: u32, pub readonly: u8, } #[repr(C, packed)] #[derive(FromBytes, Immutable, KnownLayout)] pub struct SymlinkSection { pub name: [u8; NAME_SIZE], pub parent: u32, pub target: u32, pub time_accessed_seconds: u64, pub time_modified_seconds: u64, pub time_accessed_nanos: u32, pub time_modified_nanos: u32, pub mode: u32, pub kind: u8, pub readonly: u8, }