diff --git a/.gitignore b/.gitignore index ef6ca6f..b4b901e 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,6 @@ Thumbs.db # --- Logs --- *.log + +# --- Cached Images --- +cached_images/ \ No newline at end of file diff --git a/Justfile b/Justfile new file mode 100644 index 0000000..a5ba941 --- /dev/null +++ b/Justfile @@ -0,0 +1,37 @@ +# Justfile +# https://github.com/casey/just + +default: + @just --list + +build: + cargo build --release + +fmt: + cargo fmt + cargo clippy --all-targets --all-features -- -D warnings + # cargo shear --fix # first install shear: cargo install shear + +check: + cargo fmt --check + cargo clippy --all-targets --all-features -- -D warnings + +install-hook: + #!/usr/bin/env bash + cat > .git/hooks/pre-commit << 'EOF' + #!/bin/sh + set -e + echo "Running pre-commit quality checks..." + just check + EOF + chmod +x .git/hooks/pre-commit + echo "Pre-commit hook installation confirmed." + +remove-hook: + rm .git/hooks/pre-commit + echo "Pre-commit hook uninstallation confirmed." + +# Run unit tests +test: fmt + cargo test + diff --git a/pull_image.py b/pull_image.py index ac9ec48..c7f4ed0 100644 --- a/pull_image.py +++ b/pull_image.py @@ -1,5 +1,5 @@ +import subprocess import os -import tarfile import urllib.request import shutil import sys @@ -11,39 +11,47 @@ "debian": "https://github.com/debuerreotype/docker-debian-artifacts/raw/dist-amd64/bookworm/rootfs.tar.xz" } +CACHE_DIR = "./cached_images" + def setup_rootfs(distro_name, target_dir="./rootfs"): if distro_name not in DISTROS: print(f"Error: Distro '{distro_name}' not supported. Choose from: {list(DISTROS.keys())}") return + # Create cache dir if it doesn't exist + if not os.path.exists(CACHE_DIR): + os.makedirs(CACHE_DIR) + # 1. Cleanup old rootfs if os.path.exists(target_dir): print(f"Cleaning up old {target_dir}...") shutil.rmtree(target_dir) os.makedirs(target_dir) - # 2. Download + # 2. Check Cache or Download url = DISTROS[distro_name] - file_name = f"base_image.tar.gz" - print(f"Downloading {distro_name} from {url}...") - - try: - urllib.request.urlretrieve(url, file_name) - print("Download complete.") + # Simple extension detection + ext = ".tar.gz" if ".tar.gz" in url else ".tar.xz" + cache_path = os.path.join(CACHE_DIR, f"{distro_name}{ext}") - # 3. Extract + if os.path.exists(cache_path): + print(f"Using cached image: {cache_path}") + else: + print(f"Downloading {distro_name} from {url}...") + try: + urllib.request.urlretrieve(url, cache_path) + print("Download complete.") + except Exception as e: + print(f"Error downloading image: {e}") + return + + # 3. Extract + try: print(f"Extracting to {target_dir}...") - with tarfile.open(file_name) as tar: - tar.extractall(path=target_dir) - + subprocess.run(["tar", "-xf", cache_path, "-C", target_dir], check=True) print(f"Success! {distro_name} is ready in {target_dir}") - except Exception as e: print(f"Error: {e}") - finally: - # 4. Cleanup the tar file - if os.path.exists(file_name): - os.remove(file_name) if __name__ == "__main__": name = sys.argv[1] if len(sys.argv) > 1 else "alpine" diff --git a/src/args.rs b/src/args.rs index 883732b..3fcc4e1 100644 --- a/src/args.rs +++ b/src/args.rs @@ -1,7 +1,11 @@ use clap::Parser; #[derive(Parser, Debug, Clone)] -#[command(author, version, about = "Nucleus: High-performance Rust Container Engine")] +#[command( + author, + version, + about = "Nucleus: High-performance Rust Container Engine" +)] pub struct OxideArgs { /// Unique name for the container instance #[arg(short, long)] diff --git a/src/container.rs b/src/container.rs index f99c5c8..f869735 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,23 +1,29 @@ +use crate::args::OxideArgs; use anyhow::{Context, Result}; use caps::{CapSet, Capability}; -use nix::mount::{mount, MsFlags, umount2, MntFlags}; -use nix::sched::{unshare, CloneFlags}; -use nix::sys::wait::{waitpid, WaitStatus}; -use nix::unistd::{chdir, execvp, read, sethostname, pivot_root, fork, ForkResult}; +use nix::mount::{MntFlags, MsFlags, mount, umount2}; +use nix::sched::{CloneFlags, unshare}; +use nix::sys::wait::{WaitStatus, waitpid}; +use nix::unistd::{ForkResult, chdir, execvp, fork, pivot_root, read, sethostname}; use std::ffi::CString; use std::fs; use std::os::unix::io::RawFd; use std::path::Path; -use crate::args::OxideArgs; /// Child Context: Isolates itself and prepares the container environment. pub fn run_container_child(args: OxideArgs) -> Result<()> { // 1. Isolate BEFORE doing anything else - unshare(CloneFlags::CLONE_NEWNS | CloneFlags::CLONE_NEWUTS | CloneFlags::CLONE_NEWPID | CloneFlags::CLONE_NEWNET | CloneFlags::CLONE_NEWCGROUP) - .context("Failed to isolate child namespaces")?; + unshare( + CloneFlags::CLONE_NEWNS + | CloneFlags::CLONE_NEWUTS + | CloneFlags::CLONE_NEWPID + | CloneFlags::CLONE_NEWNET + | CloneFlags::CLONE_NEWCGROUP, + ) + .context("Failed to isolate child namespaces")?; // 2. Fork into the new PID namespace - // In Linux, the process that calls unshare(CLONE_NEWPID) doesn't enter the namespace, + // In Linux, the process that calls unshare(CLONE_NEWPID) doesn't enter the namespace, // but its next child becomes PID 1. match unsafe { fork() }.context("Failed to fork after unshare")? { ForkResult::Parent { child } => { @@ -38,8 +44,14 @@ pub fn run_container_child(args: OxideArgs) -> Result<()> { fn setup_container_env(args: OxideArgs) -> Result<()> { // Fix pivot_root EINVAL: Ensure our mount namespace is private - mount(None::<&str>, "/", None::<&str>, MsFlags::MS_REC | MsFlags::MS_PRIVATE, None::<&str>) - .context("Failed to set mount propagation to private")?; + mount( + None::<&str>, + "/", + None::<&str>, + MsFlags::MS_REC | MsFlags::MS_PRIVATE, + None::<&str>, + ) + .context("Failed to set mount propagation to private")?; // Sync with Parent: Wait for host-side networking to be ready let pipe_fd = args.pipe_fd.context("Missing pipe handle")?; @@ -51,22 +63,35 @@ fn setup_container_env(args: OxideArgs) -> Result<()> { // Layered Filesystem (OverlayFS) let root_base = format!("./temp/{}", args.name); + let _ = fs::remove_dir_all(&root_base); let upper = format!("{}/upper", root_base); let work = format!("{}/work", root_base); let merged = format!("{}/merged", root_base); - + fs::create_dir_all(&upper).ok(); fs::create_dir_all(&work).ok(); fs::create_dir_all(&merged).ok(); let overlay_opts = format!("lowerdir=./rootfs,upperdir={},workdir={}", upper, work); - mount(Some("overlay"), merged.as_str(), Some("overlay"), MsFlags::empty(), Some(overlay_opts.as_str())) - .context("Failed to mount OverlayFS")?; + mount( + Some("overlay"), + merged.as_str(), + Some("overlay"), + MsFlags::empty(), + Some(overlay_opts.as_str()), + ) + .context("Failed to mount OverlayFS")?; // Pivot Root - mount(Some(merged.as_str()), merged.as_str(), None::<&str>, MsFlags::MS_BIND | MsFlags::MS_REC, None::<&str>) - .context("Failed to bind mount root for pivot_root")?; - + mount( + Some(merged.as_str()), + merged.as_str(), + None::<&str>, + MsFlags::MS_BIND | MsFlags::MS_REC, + None::<&str>, + ) + .context("Failed to bind mount root for pivot_root")?; + let old_root_name = ".old_root"; let old_root_path = Path::new(&merged).join(old_root_name); fs::create_dir_all(&old_root_path).context("Failed to create old_root dir")?; @@ -75,23 +100,53 @@ fn setup_container_env(args: OxideArgs) -> Result<()> { chdir("/").context("Failed to chdir to new root")?; let old_root_path_in_container = format!("/{}", old_root_name); - umount2(old_root_path_in_container.as_str(), MntFlags::MNT_DETACH).context("Failed to unmount old root")?; + umount2(old_root_path_in_container.as_str(), MntFlags::MNT_DETACH) + .context("Failed to unmount old root")?; fs::remove_dir(old_root_path_in_container.as_str()).ok(); // System Mounts (Procfs, Sysfs, DNS, Volumes) - mount(Some("proc"), "/proc", Some("proc"), MsFlags::empty(), None::<&str>).context("Failed to mount proc")?; - + fs::create_dir_all("/proc").ok(); + mount( + Some("proc"), + "/proc", + Some("proc"), + MsFlags::empty(), + None::<&str>, + ) + .context("Failed to mount proc")?; + fs::create_dir_all("/etc").ok(); + fs::create_dir_all("/sys").ok(); - mount(Some("sysfs"), "/sys", Some("sysfs"), MsFlags::empty(), None::<&str>).context("Failed to mount sysfs")?; + mount( + Some("sysfs"), + "/sys", + Some("sysfs"), + MsFlags::empty(), + None::<&str>, + ) + .context("Failed to mount sysfs")?; fs::create_dir_all("/sys/fs/cgroup").ok(); - mount(Some("cgroup2"), "/sys/fs/cgroup", Some("cgroup2"), MsFlags::empty(), None::<&str>).context("Failed to mount cgroup2")?; - + mount( + Some("cgroup2"), + "/sys/fs/cgroup", + Some("cgroup2"), + MsFlags::empty(), + None::<&str>, + ) + .context("Failed to mount cgroup2")?; + let resolv_conf = "/etc/resolv.conf"; if Path::new(resolv_conf).exists() { - fs::File::create(resolv_conf).ok(); - mount(Some(resolv_conf), resolv_conf, None::<&str>, MsFlags::MS_BIND | MsFlags::MS_RDONLY, None::<&str>) - .context("Failed to bind mount resolv.conf")?; + fs::File::create(resolv_conf).ok(); + mount( + Some(resolv_conf), + resolv_conf, + None::<&str>, + MsFlags::MS_BIND | MsFlags::MS_RDONLY, + None::<&str>, + ) + .context("Failed to bind mount resolv.conf")?; } // Bind User Volumes: -v /host:/container @@ -104,23 +159,45 @@ fn setup_container_env(args: OxideArgs) -> Result<()> { } else { format!("/{}", parts[1]) }; - + fs::create_dir_all(&container_path).ok(); - mount(Some(host_path), container_path.as_str(), None::<&str>, MsFlags::MS_BIND | MsFlags::MS_REC, None::<&str>) - .context(format!("Failed to bind mount volume: {}", vol))?; + mount( + Some(host_path), + container_path.as_str(), + None::<&str>, + MsFlags::MS_BIND | MsFlags::MS_REC, + None::<&str>, + ) + .context(format!("Failed to bind mount volume: {}", vol))?; } } // Security: Drop dangerous capabilities drop_capabilities()?; + // Setup Environment Variables + unsafe { + std::env::set_var( + "PATH", + "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + ); + std::env::set_var("HOME", "/root"); + std::env::set_var("USER", "root"); + std::env::remove_var("PS1"); + std::env::remove_var("PROMPT"); + } + // Execute Target Command println!("[Container] Entering {}...", args.command[0]); let cmd = CString::new(args.command[0].as_str()).unwrap(); - let c_args: Vec = args.command.iter().map(|s| CString::new(s.as_str()).unwrap()).collect(); - + let c_args: Vec = args + .command + .iter() + .map(|s| CString::new(s.as_str()).unwrap()) + .collect(); + execvp(&cmd, &c_args).context("Failed to execute inner command")?; - + Ok(()) } diff --git a/src/main.rs b/src/main.rs index a266531..5ff93d2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,18 +1,20 @@ mod args; -mod orchestrator; mod container; +mod orchestrator; mod utils; +use crate::args::OxideArgs; use anyhow::Result; use clap::Parser; use nix::unistd::getuid; -use crate::args::OxideArgs; fn main() -> Result<()> { // 1. Startup Verification: Check for root // Nucleus needs root for namespaces, mounts, and networking if !getuid().is_root() { - return Err(anyhow::anyhow!("Nucleus must be run as root to manage namespaces and networking.")); + return Err(anyhow::anyhow!( + "Nucleus must be run as root to manage namespaces and networking." + )); } // 2. Parse CLI Arguments @@ -26,6 +28,6 @@ fn main() -> Result<()> { // Otherwise, we are the host-side orchestrator orchestrator::run_parent_orchestrator(args)?; } - + Ok(()) } diff --git a/src/orchestrator.rs b/src/orchestrator.rs index c44a70d..5b0b083 100644 --- a/src/orchestrator.rs +++ b/src/orchestrator.rs @@ -1,38 +1,58 @@ +use crate::args::OxideArgs; +use crate::utils::{parse_memory, run_command}; use anyhow::{Context, Result}; use nix::unistd::{pipe, write}; use std::fs; use std::process::{Command, Stdio}; -use crate::args::OxideArgs; -use crate::utils::{run_command, parse_memory}; /// Parent Orchestrator: Sets up host networking, resource limits, and manages the child process. pub fn run_parent_orchestrator(args: OxideArgs) -> Result<()> { - println!("[Nucleus] Initializing orchestration for '{}'...", args.name); + println!( + "[Nucleus] Initializing orchestration for '{}'...", + args.name + ); // 1. Setup Host Networking (Bridge) - let _ = Command::new("ip").args(&["link", "add", "br0", "type", "bridge"]).stdout(Stdio::null()).stderr(Stdio::null()).status(); - let _ = Command::new("ip").args(&["addr", "add", "10.0.0.1/24", "dev", "br0"]).stdout(Stdio::null()).stderr(Stdio::null()).status(); - let _ = Command::new("ip").args(&["link", "set", "br0", "up"]).stdout(Stdio::null()).stderr(Stdio::null()).status(); + let _ = Command::new("ip") + .args(["link", "add", "br0", "type", "bridge"]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status(); + let _ = Command::new("ip") + .args(["addr", "add", "10.0.0.1/24", "dev", "br0"]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status(); + let _ = Command::new("ip") + .args(["link", "set", "br0", "up"]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status(); // 2. Sync Pipe let (reader, writer) = pipe().context("Failed to create sync pipe")?; // 3. Spawn Child let mut child_cmd = Command::new("/proc/self/exe"); - child_cmd.arg("--internal-child") - .arg("--name").arg(&args.name) - .arg("--ip").arg(&args.ip) - .arg("--pipe-fd").arg(&reader.to_string()) - .arg("--memory").arg(&args.memory); - + child_cmd + .arg("--internal-child") + .arg("--name") + .arg(&args.name) + .arg("--ip") + .arg(&args.ip) + .arg("--pipe-fd") + .arg(reader.to_string()) + .arg("--memory") + .arg(&args.memory); + for vol in &args.volumes { child_cmd.arg("--volumes").arg(vol); } - + for port in &args.ports { child_cmd.arg("--ports").arg(port); } - + let mut child = child_cmd .args(&args.command) .stdin(Stdio::inherit()) @@ -42,13 +62,25 @@ pub fn run_parent_orchestrator(args: OxideArgs) -> Result<()> { .context("Failed to spawn child process")?; let pid = child.id(); - let short_name = if args.name.len() > 12 { &args.name[..12] } else { &args.name }; + let short_name = if args.name.len() > 12 { + &args.name[..12] + } else { + &args.name + }; let v_host = format!("vh-{}", short_name); let v_child = format!("vc-{}", short_name); // 4. Networking: Connect Host to Container - let _ = Command::new("ip").args(&["link", "delete", &v_host]).stderr(Stdio::null()).status(); - run_command("ip", &["link", "add", &v_host, "type", "veth", "peer", "name", &v_child])?; + let _ = Command::new("ip") + .args(["link", "delete", &v_host]) + .stderr(Stdio::null()) + .status(); + run_command( + "ip", + &[ + "link", "add", &v_host, "type", "veth", "peer", "name", &v_child, + ], + )?; run_command("ip", &["link", "set", &v_child, "netns", &pid.to_string()])?; run_command("ip", &["link", "set", &v_host, "master", "br0"])?; run_command("ip", &["link", "set", &v_host, "up"])?; @@ -56,45 +88,133 @@ pub fn run_parent_orchestrator(args: OxideArgs) -> Result<()> { // Configure Child Networking from the Parent (Safe & Robust) let pid_str = pid.to_string(); let ns_base = ["-t", &pid_str, "-n", "ip"]; - run_command("nsenter", &[ns_base[0], ns_base[1], ns_base[2], ns_base[3], "link", "set", &v_child, "name", "eth0"])?; - run_command("nsenter", &[ns_base[0], ns_base[1], ns_base[2], ns_base[3], "addr", "add", &format!("{}/24", args.ip), "dev", "eth0"])?; - run_command("nsenter", &[ns_base[0], ns_base[1], ns_base[2], ns_base[3], "link", "set", "eth0", "up"])?; - run_command("nsenter", &[ns_base[0], ns_base[1], ns_base[2], ns_base[3], "link", "set", "lo", "up"])?; - run_command("nsenter", &[ns_base[0], ns_base[1], ns_base[2], ns_base[3], "route", "add", "default", "via", "10.0.0.1"])?; + run_command( + "nsenter", + &[ + ns_base[0], ns_base[1], ns_base[2], ns_base[3], "link", "set", &v_child, "name", "eth0", + ], + )?; + run_command( + "nsenter", + &[ + ns_base[0], + ns_base[1], + ns_base[2], + ns_base[3], + "addr", + "add", + &format!("{}/24", args.ip), + "dev", + "eth0", + ], + )?; + run_command( + "nsenter", + &[ + ns_base[0], ns_base[1], ns_base[2], ns_base[3], "link", "set", "eth0", "up", + ], + )?; + run_command( + "nsenter", + &[ + ns_base[0], ns_base[1], ns_base[2], ns_base[3], "link", "set", "lo", "up", + ], + )?; + run_command( + "nsenter", + &[ + ns_base[0], ns_base[1], ns_base[2], ns_base[3], "route", "add", "default", "via", + "10.0.0.1", + ], + )?; // 5. Resource Limits (Cgroups v2) // Enable controllers in the root hierarchy - let _ = fs::write("/sys/fs/cgroup/cgroup.subtree_control", "+memory +cpu +pids"); + let _ = fs::write( + "/sys/fs/cgroup/cgroup.subtree_control", + "+memory +cpu +pids", + ); let cgroup_path = format!("/sys/fs/cgroup/{}", args.name); fs::create_dir_all(&cgroup_path).context("Failed to create cgroup dir")?; - + // Set memory limit in raw bytes (or "max") let mem_bytes = parse_memory(&args.memory)?; let _ = fs::write(format!("{}/memory.max", cgroup_path), &mem_bytes); - + // Set CPU limit - let _ = fs::write(format!("{}/cpu.max", cgroup_path), "max 100000"); + let _ = fs::write(format!("{}/cpu.max", cgroup_path), "max 100000"); // Set PID limit (prevent "can't fork" errors) let _ = fs::write(format!("{}/pids.max", cgroup_path), "max"); // Join the cgroup - fs::write(format!("{}/cgroup.procs", cgroup_path), pid.to_string()).context("Failed to join cgroup")?; + fs::write(format!("{}/cgroup.procs", cgroup_path), pid.to_string()) + .context("Failed to join cgroup")?; // 6. Port Mapping & Forwarding let _ = fs::write("/proc/sys/net/ipv4/ip_forward", "1"); - let _ = Command::new("iptables").args(&["-t", "nat", "-A", "POSTROUTING", "-s", "10.0.0.0/24", "!", "-o", "br0", "-j", "MASQUERADE"]).status(); - let _ = Command::new("iptables").args(&["-A", "FORWARD", "-i", "br0", "-j", "ACCEPT"]).status(); - let _ = Command::new("iptables").args(&["-A", "FORWARD", "-o", "br0", "-j", "ACCEPT"]).status(); + let _ = Command::new("iptables") + .args([ + "-t", + "nat", + "-A", + "POSTROUTING", + "-s", + "10.0.0.0/24", + "!", + "-o", + "br0", + "-j", + "MASQUERADE", + ]) + .status(); + let _ = Command::new("iptables") + .args(["-A", "FORWARD", "-i", "br0", "-j", "ACCEPT"]) + .status(); + let _ = Command::new("iptables") + .args(["-A", "FORWARD", "-o", "br0", "-j", "ACCEPT"]) + .status(); for port_mapping in &args.ports { let parts: Vec<&str> = port_mapping.split(':').collect(); if parts.len() == 2 { let host_port = parts[0]; let container_port = parts[1]; - let _ = Command::new("iptables").args(&["-A", "FORWARD", "-p", "tcp", "-d", &args.ip, "--dport", container_port, "-m", "state", "--state", "NEW,ESTABLISHED,RELATED", "-j", "ACCEPT"]).status(); - let _ = Command::new("iptables").args(&["-t", "nat", "-A", "PREROUTING", "-p", "tcp", "--dport", host_port, "-j", "DNAT", "--to-destination", &format!("{}:{}", args.ip, container_port)]).status(); + let _ = Command::new("iptables") + .args([ + "-A", + "FORWARD", + "-p", + "tcp", + "-d", + &args.ip, + "--dport", + container_port, + "-m", + "state", + "--state", + "NEW,ESTABLISHED,RELATED", + "-j", + "ACCEPT", + ]) + .status(); + let _ = Command::new("iptables") + .args([ + "-t", + "nat", + "-A", + "PREROUTING", + "-p", + "tcp", + "--dport", + host_port, + "-j", + "DNAT", + "--to-destination", + &format!("{}:{}", args.ip, container_port), + ]) + .status(); } } @@ -106,19 +226,57 @@ pub fn run_parent_orchestrator(args: OxideArgs) -> Result<()> { // 8. Cleanup println!("[Nucleus] Cleaning up resources..."); - let _ = fs::remove_dir_all(&cgroup_path); - let _ = Command::new("ip").args(&["link", "delete", &v_host]).status(); + let _ = fs::remove_dir_all(&cgroup_path); + let _ = fs::remove_dir_all(format!("./temp/{}", args.name)); + let _ = Command::new("ip") + .args(["link", "delete", &v_host]) + .status(); for port_mapping in &args.ports { let parts: Vec<&str> = port_mapping.split(':').collect(); if parts.len() == 2 { let host_port = parts[0]; let container_port = parts[1]; - let _ = Command::new("iptables").args(&["-D", "FORWARD", "-p", "tcp", "-d", &args.ip, "--dport", container_port, "-m", "state", "--state", "NEW,ESTABLISHED,RELATED", "-j", "ACCEPT"]).status(); - let _ = Command::new("iptables").args(&["-t", "nat", "-D", "PREROUTING", "-p", "tcp", "--dport", host_port, "-j", "DNAT", "--to-destination", &format!("{}:{}", args.ip, container_port)]).status(); + let _ = Command::new("iptables") + .args([ + "-D", + "FORWARD", + "-p", + "tcp", + "-d", + &args.ip, + "--dport", + container_port, + "-m", + "state", + "--state", + "NEW,ESTABLISHED,RELATED", + "-j", + "ACCEPT", + ]) + .status(); + let _ = Command::new("iptables") + .args([ + "-t", + "nat", + "-D", + "PREROUTING", + "-p", + "tcp", + "--dport", + host_port, + "-j", + "DNAT", + "--to-destination", + &format!("{}:{}", args.ip, container_port), + ]) + .status(); } } - - println!("[Nucleus] Container '{}' terminated (Status: {})", args.name, status); + + println!( + "[Nucleus] Container '{}' terminated (Status: {})", + args.name, status + ); Ok(()) } diff --git a/src/utils.rs b/src/utils.rs index 929b0c4..dab9bdc 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -7,9 +7,14 @@ pub fn run_command(cmd: &str, args: &[&str]) -> Result<()> { .args(args) .status() .context(format!("Failed to execute command: {} {:?}", cmd, args))?; - + if !status.success() { - return Err(anyhow::anyhow!("Command {} {:?} failed with status: {}", cmd, args, status)); + return Err(anyhow::anyhow!( + "Command {} {:?} failed with status: {}", + cmd, + args, + status + )); } Ok(()) } @@ -22,11 +27,11 @@ pub fn parse_memory(mem: &str) -> Result { let mem = mem.to_uppercase(); let (val_str, unit) = if mem.ends_with('G') { - (&mem[..mem.len()-1], 1024 * 1024 * 1024) + (&mem[..mem.len() - 1], 1024 * 1024 * 1024) } else if mem.ends_with('M') { - (&mem[..mem.len()-1], 1024 * 1024) + (&mem[..mem.len() - 1], 1024 * 1024) } else if mem.ends_with('K') { - (&mem[..mem.len()-1], 1024) + (&mem[..mem.len() - 1], 1024) } else { (mem.as_str(), 1) }; @@ -45,8 +50,14 @@ mod tests { #[test] fn test_parse_memory() { assert_eq!(parse_memory("max").unwrap(), "max"); - assert_eq!(parse_memory("512M").unwrap(), (512 * 1024 * 1024).to_string()); - assert_eq!(parse_memory("1G").unwrap(), (1024 * 1024 * 1024).to_string()); + assert_eq!( + parse_memory("512M").unwrap(), + (512 * 1024 * 1024).to_string() + ); + assert_eq!( + parse_memory("1G").unwrap(), + (1024 * 1024 * 1024).to_string() + ); assert_eq!(parse_memory("10k").unwrap(), (10 * 1024).to_string()); assert_eq!(parse_memory("100").unwrap(), "100"); }