#!/usr/bin/env bash

# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0

# Firecracker devtool
#
# Use this script to build and test Firecracker.
#
# TL;DR
# Make sure you have Docker installed and properly configured
# (http://docker.com). Then,
#   building: `./devtool build`
#     Then find the binaries under build/debug/
#   testing: `./devtool test`
#     Will run the entire test battery; will take several minutes to complete.
#   deep-dive: `./devtool shell`
#     Open a shell prompt inside the container. Then build or test (or do
#     anything, really) manually.
#
# Still TL;DR: have Docker; ./devtool build; ./devtool test; ./devtool help.
#
#
# Both building and testing are done inside a Docker container. Please make sure
# you have Docker up and running on your system (see http:/docker.com) and your
# user has permission to run Docker containers.
#
# The Firecracker sources dir will be bind-mounted inside the development
# container (under /firecracker) and any files generated by the build process
# will show up under the build/ dir.  This includes the final binaries, as well
# as any intermediate or cache files.
#
# By default, all devtool commands run the container transparently, removing
# it after the command completes. Any persisting files will be stored under
# build/.
# If, for any reason, you want to access the container directly, please use
# `devtool shell`. This will perform the initial setup (bind-mounting the
# sources dir, setting privileges) and will then drop into a BASH shell inside
# the container.
#
# Building:
#   Run `./devtool build`.
#   By default, the debug binaries are built and placed under build/debug/.
#   To build the release version, run `./devtool build --release` instead.
#   You can then find the binaries under build/release/.
#
# Testing:
#   Run `./devtool test`.
#   This will run the entire integration test battery. The testing system is
#   based on pytest (http://pytest.org).
#
# Opening a shell prompt inside the development container:
#   Run `./devtool shell`.
#
# Additional information:
#   Run `./devtool help`.
#
#
# TODO:
#   - List tests by parsing the `pytest --collect-only` output.
#   - Add a `./devtool run` command to set up and run Firecracker.
#   - Add a `./devtool diag` command to help with troubleshooting, by checking
#     the most common failure conditions.
#   - Look into caching the Cargo registry within the container and if that
#     would help with reproducible builds (in addition to pinning Cargo.lock)

# Development container image (without tag)
DEVCTR_IMAGE_NO_TAG="public.ecr.aws/firecracker/fcuvm"

# Development container tag
DEVCTR_IMAGE_TAG=${DEVCTR_IMAGE_TAG:-v88}

# Development container image (name:tag)
# This should be updated whenever we upgrade the development container.
# (Yet another step on our way to reproducible builds.)
DEVCTR_IMAGE="${DEVCTR_IMAGE_NO_TAG}:${DEVCTR_IMAGE_TAG}"

# Full path to the Firecracker tools dir on the host.
FC_TOOLS_DIR=$(cd "$(dirname "$0")" && pwd)
source "$FC_TOOLS_DIR/functions"

# Full path to the Firecracker sources dir on the host.
FC_ROOT_DIR=$(cd "${FC_TOOLS_DIR}/.." && pwd)

# Full path to the build dir on the host.
FC_BUILD_DIR="${FC_ROOT_DIR}/build"

# Full path to devctr dir on the host.
FC_DEVCTR_DIR="${FC_ROOT_DIR}/tools/devctr"

# Path to the linux kernel directory on the host.
KERNEL_DIR="${FC_ROOT_DIR}/.kernel"

# Full path to the cargo registry dir on the host. This appears on the host
# because we want to persist the cargo registry across container invocations.
# Otherwise, any rust crates from crates.io would be downloaded again each time
# we build or test.
CARGO_REGISTRY_DIR="${FC_BUILD_DIR}/cargo_registry"

# Full path to the cargo git registry on the host. This serves the same purpose
# as CARGO_REGISTRY_DIR, for crates downloaded from GitHub repos instead of
# crates.io.
CARGO_GIT_REGISTRY_DIR="${FC_BUILD_DIR}/cargo_git_registry"

# Full path to the cargo target dir on the host.
CARGO_TARGET_DIR="${FC_BUILD_DIR}/cargo_target"

# Full path to the Firecracker sources dir, as bind-mounted in the container.
CTR_FC_ROOT_DIR="/firecracker"

# Full path to the build dir, as bind-mounted in the container.
CTR_FC_BUILD_DIR="${CTR_FC_ROOT_DIR}/build"
CTR_TEST_RESULTS_DIR="${CTR_FC_ROOT_DIR}/test_results"

# Full path to the cargo target dir, as bind-mounted in the container.
CTR_CARGO_TARGET_DIR="$CTR_FC_BUILD_DIR/cargo_target"

# Path to the microVM images cache dir
LOCAL_ARTIFACTS_DIR="build/artifacts"

# File with a single line specifing the name of the
# currently used artifacts
LOCAL_ARTIFACTS_CURRENT_DIR_FILE="build/current_artifacts"

# Full path to the public key mapping on the guest
PUB_KEY_PATH=/root/.ssh/id_rsa.pub

# Full path to the private key mapping on the guest
PRIV_KEY_PATH=/root/.ssh/id_rsa

# Path to the linux kernel directory, as bind-mounted in the container.
CTR_KERNEL_DIR="${CTR_FC_ROOT_DIR}/.kernel"

# Get the target prefix to avoid repeated calls to uname -m
TARGET_PREFIX="$(uname -m)-unknown-linux-"

# Container path to directory where we store built CI artifacts.
CTR_CI_ARTIFACTS_PATH="${CTR_FC_ROOT_DIR}/resources/$(uname -m)"

DEFAULT_ARTIFACTS_S3_BUCKET=s3://spec.ccfc.min/firecracker-ci

# Lockfile used while modifying KVM modules
KVM_MODULE_LOCKFILE="/tmp/.kvm_module_lock"

# Query default S3 bucket with artifacts and return the most recient path
get_newest_s3_artifacts() {
  local bucket="spec.ccfc.min"
  local base_prefix="firecracker-ci/"

  # Query all files in the `firecracker-ci` directory, check files contianing "vmlinux",
  # sort them by the `LastModified` date, and return the last one (newest).
  # We need to do it this way as S3 doesn't store `LastModified`` date for directories,
  # so we need to list all files.
  local newest_dir=$(aws s3api list-objects-v2 \
        --bucket "$bucket" --prefix "$base_prefix" --no-sign-request \
        --query 'sort_by(Contents[?contains(Key, `vmlinux`)], &LastModified)[-1].Key' |
        tr -d '"' |
        sed "s|^$base_prefix||" |
        cut -d'/' -f1
  )
  [ -z "$newest_dir" ] && die "Could not find newest artifacts in S3."

  echo "$DEFAULT_ARTIFACTS_S3_BUCKET/$newest_dir"
}

# Function to return local path to artifacts. Accepts the url from function above
# as an argument.
get_local_artifacts_path() {
    local path=$1
    echo $LOCAL_ARTIFACTS_DIR/"${path//\//-}"
}


# Check if Docker is available and exit if it's not.
# Upon returning from this call, the caller can be certain Docker is available.
#
ensure_docker() {
    NEWLINE=$'\n'
    output=$(which docker 2>&1)
    ok_or_die "Docker not found. Aborting." \
        "Please make sure you have Docker (http://docker.com) installed" \
        "and properly configured.${NEWLINE}" \
        "Error: $?, command output: ${output}"

    output=$(docker ps 2>&1)
    ok_or_die "Error accessing Docker. Please make sure the Docker daemon" \
        "is running and that you are part of the docker group.${NEWLINE}" \
        "Error: $?, command output: ${output}${NEWLINE}" \
        "For more information, see" \
        "https://docs.docker.com/install/linux/linux-postinstall/"
}

# Run a command and retry multiple times if it fails. Once it stops
# failing return to normal execution. If there are "retry count"
# failures, set the last error code.
# $1 - command
# $2 - retry count
# $3 - sleep interval between retries
retry_cmd() {
    command=$1
    retry_cnt=$2
    sleep_int=$3

    {
        $command
    } || {
        # Command failed, substract one from retry_cnt
        retry_cnt=$((retry_cnt - 1))

        # If retry_cnt is larger than 0, sleep and call again
        if [ "$retry_cnt" -gt 0 ]; then
            echo "$command failed, retrying..."
            sleep "$sleep_int"
            retry_cmd "$command" "$retry_cnt" "$sleep_int"
        fi
    }
}

# Attempt to download our Docker image. Exit if that fails.
# Upon returning from this call, the caller can be certain our Docker image is
# available on this system.
#
ensure_devctr() {

    # We depend on having Docker present.
    ensure_docker

    # Check if we have the container image available locally. Attempt to
    # download it, if we don't.
    [[ $(docker images -q "$DEVCTR_IMAGE" | wc -l) -gt 0 ]] || {
        say "About to pull docker image $DEVCTR_IMAGE"

        # Run docker pull 5 times in case it fails - sleep 3 seconds
        # between attempts
        retry_cmd "docker pull $DEVCTR_IMAGE" 5 3

        ok_or_die "Error pulling docker image. Aborting."
    }
}

# Make sure the build/ dirs are available. Exit if we can't create them.
# Upon returning from this call, the caller can be certain the build/ dirs exist.
#
ensure_build_dir() {
    for dir in "$FC_BUILD_DIR" "$CARGO_TARGET_DIR" \
               "$CARGO_REGISTRY_DIR" "$CARGO_GIT_REGISTRY_DIR"; do
        create_dir "$dir"
    done
}

build_bin_path() {
    target="$1"
    profile="$2"
    binary="$3"
    echo "$CARGO_TARGET_DIR/$target/$profile/$binary"
}

# Fix build/ dir permissions after a privileged container run.
# Since the privileged container runs as root, any files it creates will be
# owned by root. This fixes that by recursively changing the ownership of build/
# to the current user.
#
cmd_fix_perms() {
    # Yes, running Docker to get elevated privileges, just to chown some files
    # is a dirty hack.
    run_devctr \
        --workdir "$CTR_FC_ROOT_DIR" \
        -- \
        chown -f -R "$(id -u):$(id -g)" "$CTR_FC_BUILD_DIR" "$CTR_TEST_RESULTS_DIR" "$CTR_CI_ARTIFACTS_PATH" $@
}

# Builds the development container from its Dockerfile.
#
cmd_build_devctr() {
    docker_file_name=$FC_DEVCTR_DIR/Dockerfile
    build_args="--build-arg ARCH=$(uname -m)"

    while [ $# -gt 0 ]; do
        case "$1" in
            "-h"|"--help")      { cmd_help; exit 1; } ;;
            "--")               { shift; break;     } ;;
            *)
                die "Unknown argument: $1. Please use --help for help."
            ;;
        esac
        shift
    done

    docker build -t "$DEVCTR_IMAGE_NO_TAG" -f "$docker_file_name" $build_args .
}


# Validate the user supplied kernel version number.
# It must be composed of 2 groups of integers separated by dot, with an optional third group.
validate_kernel_version() {
    local version_regex="^([0-9]+.)[0-9]+(.[0-9]+)?$"
    version="$1"

    if [ -z "$version" ]; then
        die "Kernel version cannot be empty."
    elif [[ ! "$version" =~ $version_regex ]]; then
        die "Invalid version number: $version (expected: \$Major.\$Minor.\$Patch(optional))."
    fi

}


# Helper function to run the dev container.
# Usage: run_devctr <docker args> -- <container args>
# Example: run_devctr --privileged -- bash -c "echo 'hello world'"
run_devctr() {
    docker_args=()
    ctr_args=()
    docker_args_done=false
    while [[ $# -gt 0 ]]; do
        [[ "$1" = "--" ]] && {
            docker_args_done=true
            shift
            continue
        }
        [[ $docker_args_done = true ]] && ctr_args+=("$1") || docker_args+=("$1")
        shift
    done

    # If we're running in a terminal, pass the terminal to Docker and run
    # the container interactively
    [[ -t 0 ]] && docker_args+=("-i")
    [[ -t 1 ]] && docker_args+=("-t")

    # Try to pass these environments from host into container for network proxies
    proxies=(http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY)
    for i in "${proxies[@]}"; do
        if [[ ! -z ${!i} ]]; then
            docker_args+=("--env") && docker_args+=("$i=${!i}")
        fi
    done

    # Finally, run the dev container
    # Use 'z' on the --volume parameter for docker to automatically relabel the
    # content and allow sharing between containers.
    docker run "${docker_args[@]}" \
        --rm \
        --volume /dev:/dev \
        --volume "$FC_ROOT_DIR:$CTR_FC_ROOT_DIR:z" \
        --volume "$FC_ROOT_DIR/build/cargo_registry:/usr/local/rust/registry:z" \
        --volume "$FC_ROOT_DIR/build/cargo_git_registry:/usr/local/rust/git:z" \
        --tmpfs /srv:exec,dev,size=32G \
        -v /boot:/boot \
        --env PYTHONDONTWRITEBYTECODE=1 \
        "$DEVCTR_IMAGE" "${ctr_args[@]}"
}

# Helper function to test that the argument provided is a valid path to a SSH key.
#
test_key() {
    ssh-keygen -lf "$1" &>/dev/null
    ret=$?
    [ $ret -ne 0 ] && die "$1 is not a valid key file."
}

create_dir() {
    # Create a dir for the provided path.
    dir="$1"
    mkdir -p "$dir" || die "Error: cannot create dir $dir"
        [ -x "$dir" ] && [ -w "$dir" ] || \
            {
                say "Wrong permissions for $dir. Attempting to fix them ..."
                chmod +x+w "$dir"
            } || \
            die "Error: wrong permissions for $dir. Should be +x+w"
}

# `$0 help`
# Show the detailed devtool usage information.
#
cmd_help() {
    echo ""
    echo "Firecracker $(basename $0)"
    echo "Usage: $(basename $0) [<args>] <command> [<command args>]"
    echo ""
    echo "Global arguments"
    echo "    -y, --unattended         Run unattended. Assume the user would always"
    echo "                             answer \"yes\" to any confirmation prompt."
    echo ""
    echo "Available commands:"
    echo ""
    echo "    build [--debug|--release] [-l|--libc musl|gnu]"
    echo "        Build the Firecracker binaries."
    echo "        Firecracker is built using the Rust build system (cargo). All arguments after --"
    echo "        will be passed through to cargo."
    echo "        --debug               Build the debug binaries. This is the default."
    echo "        --release             Build the release binaries."
    echo "        -l, --libc musl|gnu   Choose the libc flavor against which Firecracker will"
    echo "                              be linked. Default is musl."
    echo "        --ssh-keys            Provide the paths to the public and private SSH keys on the host"
    echo "                              (in this particular order) required for the git authentication."
    echo "                              It is mandatory that both keys are specified."
    echo ""
    echo "    build_devctr"
    echo "        Builds the development container from its Dockerfile."
    echo ""
    echo "    checkenv"
    echo "        Performs prerequisites checks needed to execute firecracker."
    echo ""
    echo "    distclean"
    echo "        Clean up the build tree and remove the docker container."
    echo ""
    echo "    fix_perms"
    echo "        Fixes permissions when devtool dies in the middle of a privileged session."
    echo ""
    echo "    fmt"
    echo "        Auto-format all Rust source files, to match the Firecracker requirements."
    echo "        This should be used as the last step in every commit, to ensure that the"
    echo "        Rust style tests pass."
    echo ""
    echo "    install [-p|--path] [--debug|--release]"
    echo "      Install firecracker, jailer and seccomp binaries to /usr/local/bin or a given path."
    echo "      Only the musl linked binaries are supported."
    echo "        --path                Install binaries to a specified path."
    echo "        --debug               Install the debug binaries."
    echo "        --release             Install the release binaries. This is the default."
    echo ""
    echo "    help"
    echo "        Display this help message."
    echo ""
    echo "    shell [--privileged]"
    echo "        Launch the development container and open an interactive BASH shell."
    echo "        -p, --privileged    Run the container as root, in privileged mode."
    echo "                            Running Firecracker via the jailer requires elevated"
    echo "                            privileges, though the build phase does not."
    echo ""
    echo "    sh CMD..."
    echo "        Launch the development container and run a command."
    echo ""
    echo "    test [args] [-- [<pytest args>]]"
    echo "        Run the Firecracker integration or A/B tests."
    echo "        The Firecracker testing system is based on pytest. All arguments after --"
    echo "        will be passed through to pytest."
    echo ""
    echo "        Args for the 'test' itself:"
    echo "        -h, --help                   Print help"
    echo "        -c, --cpuset-cpus cpulist    Set a dedicated cpulist to be used by the tests."
    echo "        -m, --cpuset-mems memlist    Set a dedicated memlist to be used by the tests."
    echo "            --performance            Tweak various setting of the host running the tests (such as C- and P-states)"
    echo "                                     to achieve consistent performance. Used for running performance tests in CI."
    echo "        --ab                         Run A/B test."
    echo "        --no-build                   Skip building step."
    echo "        --no-archive                 Skip archiving of 'test_result' after the test is done."
    echo "        --no-kvm-check               Skip checking for '/dev/kvm' presence."
    echo "        --no-artifacts-check         Skip checking existing artifacts."
    echo ""
    echo "    build_ci_artifacts [all|rootfs|kernels]"
    echo "        Builds the rootfs and guest kernel artifacts we use for our CI."
    echo "        Run './tools/devtool build_ci_artifacts help' for more details about the available commands."
    echo ""
    echo "    download_ci_artifacts [--force] [s3_uri_1, s3_uri_2 ...]"
    echo "        Downloads artifacts from provided S3 URI (like s3://spec.ccfc.min/firecracker-ci/my_artifacts)"
    echo "        and runs ./tools/setup-ci-artifacts.sh. for each of them."
    echo "        If no arguments are provided, pulls newest artifacts from $DEFAULT_ARTIFACTS_S3_BUCKET"
    echo "        If '--force' is specified, removes previous artifacts with same name"
    echo ""
    echo "    set_current_artifacts [s3_uri/directory name]"
    echo "        Sets the $LOCAL_ARTIFACTS_CURRENT_DIR_FILE to contain a local path where the artifacts should be."
    echo "        Accepts some name used to generate the final directory name. Mainly used with S3 URI"
    echo "        like 'download_ci_artifacts'. Alternatively it is possible to manually write local "
    echo "        path to artifacts directory into $LOCAL_ARTIFACTS_CURRENT_DIR_FILE file"
    echo ""
    echo "    ensure_current_artifacts [s3_uri/directory name]"
    echo "        Makes sure the $LOCAL_ARTIFACTS_CURRENT_DIR_FILE file contains a path to current artifacts."
    echo "        If an optional path/URI is provided, try to set it as current artifacts."
    echo ""

    cat <<EOF
    test_debug [-- [<pytest args>]]
        Run tests in a debugging environment

    sandbox
        Run Firecracker in an IPython REPL (in devctr)

    sandbox_native
        Run Firecracker in an IPython REPL (AL2023/Ubuntu)

    mkdocs
        Use 'cargo doc' to generate rustdoc documentation

    checkstyle
        Run style checks

    checkbuild [--all|-m x86_64|aarch64]
        Run cargo check on the target architecture (supports cross compilation).
EOF
}


# `$0 build` - build Firecracker
# Please see `$0 help` for more information.
#
cmd_build() {
    # By default, we'll build the debug binaries.
    profile="debug"
    libc="musl"


    # Parse any command line args.
    while [ $# -gt 0 ]; do
        case "$1" in
            "-h"|"--help")  { cmd_help; exit 1;     } ;;
            "--debug")      { profile="debug";      } ;;
            "--release")    { profile="release";    } ;;
            "--rev")       { shift; revision=$1; } ;;
            "--ssh-keys")
                shift
                [[ -z "$1" ]] && \
                    die "Please provide the path to the public SSH key."
                [[ ! -f "$1" ]]  && die "The public key file does not exist: $1."
                test_key "$1"
                host_pub_key_path="$1"
                shift
                [[ -z "$1" ]] && \
                    die "Please provide the path to the private SSH key."
                [[ ! -f "$1" ]]  && die "The private key file does not exist: $1."
                test_key "$1"
                host_priv_key_path="$1"
                ;;
            "-l"|"--libc")
                shift
                [[ "$1" =~ ^(musl|gnu)$ ]] || \
                    die "Invalid libc: $1. Valid options are \"musl\" and \"gnu\"."
                libc="$1"
                ;;
            "--")           { shift; break;         } ;;
            *)
                die "Unknown argument: $1. Please use --help for help."
            ;;
        esac
        shift
    done

    # Check prerequisites
    ensure_devctr
    ensure_build_dir

    # Map the public and private keys to the guest if they are specified.
    [ ! -z "$host_pub_key_path" ] && [ ! -z "$host_priv_key_path" ] &&
        extra_args="--volume $host_pub_key_path:$PUB_KEY_PATH:z \
                    --volume $host_priv_key_path:$PRIV_KEY_PATH:z"

    workdir="$CTR_FC_ROOT_DIR"
    if [ ! -z "$revision" ]; then
      commitish="$revision"
      if ! git cat-file -t "$commitish"; then commitish=origin/"$revision"; fi
      branch_name=tmp-$commitish

      tmp_dir=$(mktemp -d)

      git branch $branch_name $commitish
      git clone -b $branch_name . $tmp_dir
      pushd $tmp_dir
      workdir=$tmp_dir
      extra_args="$extra_args --volume $tmp_dir:$tmp_dir:z"
    fi

    # Run the cargo build process inside the container.
    # We don't need any special privileges for the build phase, so we run the
    # container as the current user/group.
    run_devctr \
        --privileged \
        --workdir "$workdir" \
        ${extra_args} \
        -- \
        ./tools/release.sh --libc $libc --profile $profile
    ret=$?

    # Running as root would have created some root-owned files under the build
    # dir. Let's fix that.
    cmd_fix_perms

    if [ ! -z "$revision" ]; then
      popd
      git branch -D $branch_name
      mkdir -p build/"$revision"/examples
      cp $tmp_dir/build/cargo_target/$(uname -m)-unknown-linux-$libc/$profile/* build/"$revision"
      cp $tmp_dir/build/cargo_target/$(uname -m)-unknown-linux-$libc/$profile/examples/* build/"$revision"/examples
      cmd_sh "rm -rf $tmp_dir"
    fi

    return $ret
}

function cmd_make_release {
    ensure_build_dir
    run_devctr \
        --privileged \
        --workdir "$CTR_FC_ROOT_DIR" \
        -- \
        ./tools/release.sh --libc musl --profile release --make-release
    sudo chown -Rc $USER: release*
}

cmd_distclean() {
    # List of folders to remove.
    dirs=("build" "test_results")

    for dir in "${dirs[@]}"; do
        if [ -d "$dir" ]; then
            say "Removing $dir"
            rm -rf "$dir"
        fi
    done

    # Remove devctr if it exists
    if [ $(docker images -q "$DEVCTR_IMAGE" | wc -l) -eq "1" ]; then
        say "Removing $DEVCTR_IMAGE"
        docker rmi -f "$DEVCTR_IMAGE"
    fi
}

cmd_download_ci_artifacts() {
    if [ "$1" = "--force" ]; then
      FORCE_ARTIFACT_DOWNLOAD=1
      shift 1
    fi
    local artifacts_list=$@

    if [[ -z ${artifacts_list[@]} ]]; then
      download_ci_artifacts
    else
      for artifacts in ${artifacts_list[@]}; do
        download_ci_artifacts $artifacts
      done
    fi
}

cmd_set_current_artifacts() {
    local artifacts=$1
    if [ -z $artifacts ]; then
      say "No artifacts were specified"
    else
      local local_artifacts_path=$(get_local_artifacts_path $artifacts)/$(uname -m)
      echo $local_artifacts_path > $LOCAL_ARTIFACTS_CURRENT_DIR_FILE
      say "Current artifacts path: " $local_artifacts_path
    fi
}

cmd_ensure_current_artifacts() {
  if [ -f $LOCAL_ARTIFACTS_CURRENT_DIR_FILE ] && [ $# = 0 ]; then
      local current_local_artifacts_path=$(cat $LOCAL_ARTIFACTS_CURRENT_DIR_FILE)
      if [ -d $current_local_artifacts_path ]; then
        say "Current artifacts path: " $current_local_artifacts_path
        return 0
      fi
      say "Invalid artifact dir! Artifacts will be downloaded again: $current_local_artifacts_path"
  fi

  download_ci_artifacts $@
  echo $LOCAL_ARTIFACTS_PATH > $LOCAL_ARTIFACTS_CURRENT_DIR_FILE
  say "Current artifacts path: " $LOCAL_ARTIFACTS_PATH
}

download_ci_artifacts() {
    local artifacts=$1

    if [ -z $artifacts ]; then
        local default_artifacts=$(get_newest_s3_artifacts)
        say "No specific artifacts are defined. Using default artifacts: " $default_artifacts
        artifacts=$default_artifacts
    fi

    # Fetch all the artifacts so they are local
    local artifacts_arch=$artifacts/$(uname -m)
    local local_artifacts_path=$(get_local_artifacts_path $artifacts)/$(uname -m)

    if [ ! -z $FORCE_ARTIFACT_DOWNLOAD ]; then
      say "Removing " $local_artifacts_path
      rm -rf $local_artifacts_path
    fi

    if [ ! -d "$local_artifacts_path" ]; then
        say "Fetching artifacts from S3: " $artifacts_arch " into: " $local_artifacts_path
        mkdir -pv $local_artifacts_path
        aws s3 sync --no-sign-request "$artifacts_arch" "$local_artifacts_path"
        ok_or_die "Failed to download artifacts using awscli!"
        cmd_sh "./tools/setup-ci-artifacts.sh" $local_artifacts_path
        if [ $? != 0 ]; then
          rm -rf $local_artifacts_path
          die "Failed to setup artifacts!"
        fi
    else
        say "Found existing artifacts: " $artifacts_arch " at: " $local_artifacts_path
    fi

    LOCAL_ARTIFACTS_PATH=$local_artifacts_path
}

# Acquire the KVM module lock and run the given command.
# Uses flock with a timeout for safe, automatic lock management.
# Usage: with_kvm_module_lock <command> [args...]
with_kvm_module_lock() {
    local LOCK_TIMEOUT=120
    (
        if ! flock -w "$LOCK_TIMEOUT" 9; then
            say_warn "Timed out waiting for KVM module lock after: ${LOCK_TIMEOUT}s"
            exit 1
        fi
        echo "Successfully acquired lock"
        "$@"
    ) 9>"$KVM_MODULE_LOCKFILE"
}

# Reload KVM modules with the given vendor module and kvm params.
# Always enables avic=1 on AMD. Unloads first if already loaded.
# Usage: reload_kvm_modules <vendor_mod> [kvm_param...]
#   e.g. reload_kvm_modules kvm_intel nx_huge_pages=never
reload_kvm_modules() {
    local vendor_mod=$1; shift

    # Unload if already loaded
    if lsmod | grep -qP "^kvm_(amd|intel)"; then
        if ! sudo modprobe -r $vendor_mod kvm; then
            say_warn "Failed to unload KVM modules (${vendor_mod}, kvm) (may be in use)"
            return 1
        fi
    fi

    if ! sudo modprobe kvm "$@"; then
        say_warn "Failed to load kvm module"
        return 1
    fi
    if [[ $vendor_mod == "kvm_amd" ]]; then
        if ! sudo modprobe kvm_amd avic=1; then
            say_warn "Failed to load kvm_amd module"
            return 1
        fi
    else
        if ! sudo modprobe $vendor_mod; then
            say_warn "Failed to load $vendor_mod module"
            return 1
        fi
    fi
}

# Determine the KVM vendor module for the current CPU.
kvm_vendor_mod() {
    if grep -q "vmx" /proc/cpuinfo; then
        echo kvm_intel
    elif grep -q "svm" /proc/cpuinfo; then
        echo kvm_amd
    else
        # aarch64
        echo kvm
    fi
}

# Ensure /dev/kvm is available and apply platform-specific KVM tweaks.
# - Loads KVM modules if not present
# - On Linux 6.1 x86_64: applies nx_huge_pages=never for non-vulnerable CPUs,
#   checks favordynmods for vulnerable ones
# - On AMD: ensures AVIC is enabled
setup_kvm() {
    local kernel_version=$(uname -r)
    local arch=$(uname -m)
    local vendor_mod=$(kvm_vendor_mod)

    local need_kvm_reload=0
    local kvm_extra_params=()

    # Load KVM if not already available
    if [[ ! -c /dev/kvm ]]; then
        need_kvm_reload=1
    fi

    local itlb_multihit=/sys/devices/system/cpu/vulnerabilities/itlb_multihit
    local nx_huge_pages=/sys/module/kvm/parameters/nx_huge_pages
    # Linux 6.1 x86_64: mitigate boot-time regression
    if [[ $kernel_version == 6.1.* ]] && [[ $arch == x86_64 ]]; then

        say "Applying Linux 6.1 boot-time regression mitigations"
        if grep -q "Not affected" $itlb_multihit; then
            echo "CPU not vulnerable to iTLB multihit, using kvm.nx_huge_pages=never mitigation"
            if ! grep -q "never" $nx_huge_pages 2>/dev/null; then
                kvm_extra_params+=(nx_huge_pages=never)
                need_kvm_reload=1
            fi
        else
            echo "CPU vulnerable to iTLB_multihit, checking if favordynmods is enabled"
            if mount | grep cgroup | grep -q favordynmods; then
                echo "favordynmods is enabled"
            else
                say_warn "cgroups' favordynmods option not enabled; VM creation performance may be impacted"
            fi
        fi
    fi

    # AMD: ensure AVIC is enabled
    local avic_param=/sys/module/kvm_amd/parameters/avic
    if [[ $vendor_mod == "kvm_amd" ]]; then
        if ! grep -q "Y\|1" $avic_param; then
            echo "AVIC not enabled, will reload kvm_amd with avic=1"
            need_kvm_reload=1
        fi
    fi

    if [[ $need_kvm_reload -eq 1 ]]; then
        echo "Reloading KVM modules"
        reload_kvm_modules "$vendor_mod" "${kvm_extra_params[@]}"
        ok_or_die "Could not reload kvm modules"
    fi

    tail -v $itlb_multihit $nx_huge_pages
    if [[ $vendor_mod == "kvm_amd" ]]; then
        tail -v $avic_param
    fi

    [[ -c /dev/kvm ]] || die "/dev/kvm not found. Aborting."
}

# Modifies the processors CPU governor and P-state configuration (x86_64 only) for consistent performance. This means
# - Disable turbo boost (Intel only) by writing 1 to /sys/devices/system/cpu/intel_pstate/no_turbo
# - Lock the CPUs' P-state to the highest non-turbo one (Intel only) by writing 100 to /sys/devices/system/cpu/intel_pstate/{min,max}_perf_pct
# - Set the cpu frequency governor to performance by writing "performance" to /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
apply_performance_tweaks() {
  # m6a instances do not support the amd_pstate driver (yet), so nothing we can do there
  if [[ -d /sys/devices/system/cpu/intel_pstate ]]; then
    # Disable turbo boost. Some of our tests are performance tests, and we want minimum variability wrt processor frequency
    # See also https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/processor_state_control.html
    echo 1 |sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo &> /dev/null

    # Save old values to restore later
    MIN_PERF_PCT=$(cat /sys/devices/system/cpu/intel_pstate/min_perf_pct)
    MAX_PERF_PCT=$(cat /sys/devices/system/cpu/intel_pstate/max_perf_pct)

    # Force the CPU to continuously stay in the highest, non-turbo P-state. The P-state will determine the
    # CPU's clock frequency.
    # https://www.kernel.org/doc/html/v4.12/admin-guide/pm/intel_pstate.html
    echo 100 |sudo tee /sys/devices/system/cpu/intel_pstate/min_perf_pct &> /dev/null
    echo 100 |sudo tee /sys/devices/system/cpu/intel_pstate/max_perf_pct &> /dev/null
  fi

  # If CPU is Intel Granite Rapids (Xeon 6, FMS 06-AD-XX), disable C6 and C6P states.
  # We've observed significant volatility in our performance tests on Intel Granite Rapids CPUs
  # (Xeon 6, FMS 06-AD-XX), specifically in many of our latency metrics. After spending time investigating
  # this, it seems like cross-CPU communication becomes prohibitively slow with the deepest C-states
  # enabled. Since GNR chips have higher core density (96 per socket vs. SPR's 48 per socket), we believe
  # that the tail latency of transitioning out of the deepest C-states explains the volatility.

  # Disabling these deep states appear to stabilise the performance, so for consistency in our CI, we will disable them.

  # NB: The performance volatility only appears to affect Granite Rapids instances with low load
  # (e.g., our performance integration tests). The assumption is that when the load is high, cores
  # are unlikely to enter deeper C-states, so inter-CPU communication does not encounter the overhead
  # of transitioning out of deeper C-states.
  model=$(awk '/^model\s+:/ {print $3; exit}' /proc/cpuinfo)
  family=$(awk '/^cpu family\s+:/ {print $4; exit}' /proc/cpuinfo)
  if [[ "$family" -eq 6 && "$model" -eq 173 ]]; then
    say "Intel Granite Rapids CPU detected. Disabling C6 and C6P C-states"
    for state in /sys/devices/system/cpu/cpu[0-9]*/cpuidle/state*/; do
      if [[ -f "$state/name" && $(cat "$state/name") == C6* ]]; then
        echo 1 | sudo tee "$state/disable" &> /dev/null
      fi
    done
  fi

  # The governor is a linux component that can adjust CPU frequency. "performance" tells it to always run CPUs at
  # their maximum safe frequency. It seems to be the default for Amazon Linux, but it doesn't hurt to make this explicit.
  # See also https://wiki.archlinux.org/title/CPU_frequency_scaling
  echo performance | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor &> /dev/null
}

unapply_performance_tweaks() {
  if [[ -d /sys/devices/system/cpu/intel_pstate ]]; then
    # reenable turbo boost
    echo 0 |sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo &> /dev/null

    # restore p-state limits
    echo $MIN_PERF_PCT |sudo tee /sys/devices/system/cpu/intel_pstate/min_perf_pct &> /dev/null
    echo $MAX_PERF_PCT |sudo tee /sys/devices/system/cpu/intel_pstate/max_perf_pct &> /dev/null
  fi

  # reenable Granite Rapids C-states
  model=$(awk '/^model\s+:/ {print $3; exit}' /proc/cpuinfo)
  family=$(awk '/^cpu family\s+:/ {print $4; exit}' /proc/cpuinfo)
  if [[ "$family" -eq 6 && "$model" -eq 173 ]]; then
    for state in /sys/devices/system/cpu/cpu[0-9]*/cpuidle/state*/; do
      if [[ -f "$state/name" && $(cat "$state/name") == C6* ]]; then
        echo 0 | sudo tee "$state/disable" &> /dev/null
      fi
    done
  fi

  # We do not reset the governor, as keeping track of each CPUs configured governor is not trivial here. On our CI
  # instances, the performance governor is current the default anyway (2023/11/14)
}


# `$0 test` - run integration tests
# Please see `$0 help` for more information.
#
cmd_test() {
    do_ab_test=0
    do_build=1
    do_archive=1
    do_kvm_check=1
    do_build_dir_check=1
    do_artifacts_check=1
    # Parse any command line args.
    while [ $# -gt 0 ]; do
        case "$1" in
            "-h"|"--help")      { cmd_help; exit 1; } ;;
            "-c"|"--cpuset-cpus")
                shift
                local cpuset_cpus="$1"
                ;;
            "-m"|"--cpuset-mems")
                shift
                local cpuset_mems="$1"
                ;;
            "--artifacts")
                shift
                local artifacts="$1"
                ;;
            "--performance")
                local performance_tweaks=1;
                ;;
            "--ab")
                do_ab_test=1
                ;;
            "--no-build")
                do_build=0
                ;;
            "--no-archive")
                do_archive=0
                ;;
            "--no-kvm-check")
                do_kvm_check=0
                ;;
            "--no-build-dir-check")
                do_build_dir_check=0
                ;;
            "--no-artifacts-check")
                do_artifacts_check=0
                ;;
            "--")               { shift; break;     } ;;
            *)
                die "Unknown argument: $1. Please use --help for help."
            ;;
        esac
        shift
    done

    # Check prerequisites.
    [ $do_kvm_check != 0 ] && with_kvm_module_lock setup_kvm
    ensure_devctr
    [ $do_build_dir_check != 0 ] && ensure_build_dir
    if [ $do_artifacts_check != 0 ]; then
      if [ -z $artifacts ]; then
        cmd_ensure_current_artifacts
      else
        cmd_ensure_current_artifacts $artifacts
      fi
    fi

    if [ $do_build != 0 ]; then
      cmd_build --release
      if [ -n "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" ]; then
        cmd_build --release --rev "$BUILDKITE_PULL_REQUEST_BASE_BRANCH"
        ok_or_die "Failed to build Firecracker!"
      fi
    fi

    # If we got to here, we've got all we need to continue.
    say "Kernel version: $(uname -r)"
    say "$(sed '/^processor.*: 0$/,/^processor.*: 1$/!d; /^processor.*: 1$/d' /proc/cpuinfo)"
    say "RPM firmware versions: $(rpm -q microcode_ctl amd-ucode-firmware linux-firmware)"

    env |grep -P "^(AWS_EMF_|BUILDKITE|CODECOV_)" > env.list
    if [[ $performance_tweaks -eq 1 ]]; then
      if [[ "$(uname --machine)" == "x86_64" ]]; then
        say "Detected CI and performance tests, tuning CPU frequency scaling and idle states for reduced variability"

        apply_performance_tweaks
      fi

      # It seems that even if the tests using huge pages run sequentially on ag=1 agents, right-sizing the huge pages
      # pool to the total number of huge pages used across all tests results in spurious failures with pool depletion
      # anyway (something else on the host seems to be stealing our huge pages, and we cannot "ear mark" them for
      # Firecracker processes). Thus, just allocate 48GB of them and call it a day.
      say "Setting up huge pages pool"
      num_hugetlbfs_pages=24552

      huge_pages_old=$(cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
      huge_pages_new=$(echo $num_hugetlbfs_pages |sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
    fi

    if [[ "$huge_pages_new" -ne "$num_hugetlbfs_pages" ]]; then
      die "Failed to allocate $num_hugetlbfs_pages hugetlbfs pages, only got $huge_pages_new"
    fi

    say "Starting test run ..."

    test_script="./tools/test.sh"

    if [ $do_ab_test -eq 1 ]; then
      test_script="./tools/ab_test.py"
    fi

    # Testing (running Firecracker via the jailer) needs root access,
    # in order to set-up the Firecracker jail (manipulating cgroups, net
    # namespaces, etc).
    # We need to run a privileged container to get that kind of access.
    run_devctr \
        --privileged \
        --security-opt seccomp=unconfined \
        --ulimit core=0 \
        --ulimit nofile=4096:4096 \
        --ulimit memlock=-1:-1 \
        --workdir "$CTR_FC_ROOT_DIR" \
        --cpuset-cpus="$cpuset_cpus" \
        --cpuset-mems="$cpuset_mems" \
        --env-file env.list \
        -- \
        $test_script "$@"

    ret=$?

    say "Finished test run ..."

    # Running as root would have created some root-owned files under the build
    # dir. Let's fix that.
    cmd_fix_perms

    # undo performance tweaks (in case the instance gets recycled for a non-perf test)
    if [[ $performance_tweaks -eq 1 ]]; then
      if [[ "$(uname --machine)" == "x86_64" ]]; then
        unapply_performance_tweaks
      fi

      echo $huge_pages_old |sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages >/dev/null
    fi

    # do not leave behind env.list file
    rm env.list

    # archive everything in the `test_result` to speed up upload/download
    # to s3 if we are in CI
    if [ $do_archive != 0 ] && [ -n "$BUILDKITE" ] && [ "$BUILDKITE" = "true" ]; then
      tar -czf data.tar.gz -C test_results .
      rm -r test_results/*
      mv data.tar.gz test_results
    fi

    return $ret
}


# `$0 shell` - drop to a shell prompt inside the dev container
# Please see `$0 help` for more information.
#
cmd_shell() {

    # By default, we run the container as the current user.
    privileged=false

    # Parse any command line args.
    while [ $# -gt 0 ]; do
        case "$1" in
            "-h"|"--help")          { cmd_help; exit 1; } ;;
            "-p"|"--privileged")    { privileged=true;  } ;;
            "--")               { shift; break;     } ;;
            *)
                die "Unknown argument: $1. Please use --help for help."
            ;;
        esac
        shift
    done

    # Make sure we have what we need to continue.
    ensure_devctr
    ensure_build_dir

    if [[ $privileged = true ]]; then
        # If requested, spin up a privileged container.
        #
        say "Dropping to a privileged shell prompt ..."
        say "Note: $FC_ROOT_DIR is bind-mounted under $CTR_FC_ROOT_DIR"
        say_warn "You are running as root; any files that get created under" \
            "$CTR_FC_ROOT_DIR will be owned by root."
        run_devctr \
            --privileged \
            --ulimit nofile=4096:4096 \
            --ulimit memlock=-1:-1 \
            --security-opt seccomp=unconfined \
            --workdir "$CTR_FC_ROOT_DIR" \
            -- \
            bash
        ret=$?

        # Running as root may have created some root-owned files under the build
        # dir. Let's fix that.
        #
        cmd_fix_perms
    else
        say "Dropping to shell prompt as user $(whoami) ..."
        say "Note: $FC_ROOT_DIR is bind-mounted under $CTR_FC_ROOT_DIR"
        say_warn "You won't be able to run Firecracker via the jailer," \
            "but you can still build it."
        say "You can use \`$0 shell --privileged\` to get a root shell."

        [ -w /dev/kvm ] || \
            say_warn "WARNING: user $(whoami) doesn't have permission to" \
                "access /dev/kvm. You won't be able to run Firecracker."

        run_devctr \
            --user "$(id -u):$(id -g)" \
            --ulimit nofile=4096:4096 \
            --ulimit memlock=-1:-1 \
            --device=/dev/kvm:/dev/kvm \
            --workdir "$CTR_FC_ROOT_DIR" \
            --env PS1="$(whoami)@\h:\w\$ " \
            -- \
            bash --norc
        ret=$?
    fi

    return $ret
}

cmd_sh() {
    ensure_build_dir
    run_devctr \
        --privileged \
        --ulimit nofile=4096:4096 \
        --ulimit memlock=-1:-1 \
        --workdir "$CTR_FC_ROOT_DIR" \
        -- \
        bash --norc -c "$*"
}

cmd_sandbox() {
    cmd_build --release
    cmd_ensure_current_artifacts
    cmd_sh "tmux new env PYTEST_ADDOPTS=--pdbcls=IPython.terminal.debugger:TerminalPdb PYTHONPATH=tests IPYTHONDIR=\$PWD/.ipython ipython -i ./tools/sandbox.py $@"
    cmd_fix_perms ".ipython"
}

cmd_sandbox_native() {
    cmd_build --release

    source /etc/os-release
    case $ID$VERSION_ID in
        ubuntu22.04)
            sudo apt install python3-pip python3.11-dev gcc tmux
            ;;
        al2023)
            sudo yum -y install python3.11-pip python3.11-devel gcc tmux
            ;;
    esac
    python3.11 -m venv sandbox
    source sandbox/bin/activate
    pip3.11 install ipython requests requests_unixsocket2 psutil tenacity filelock
    pip3.11 install jsonschema aws_embedded_metrics
    pip3.11 install packaging pytest
    cmd_ensure_current_artifacts
    tmux neww sudo --preserve-env=HOME,PATH,TMUX env PYTHONPATH=tests IPYTHONDIR=\$PWD/.ipython ipython -i ./tools/sandbox.py $@
}

cmd_test_debug() {
    cmd_ensure_current_artifacts
    cmd_sh "tmux new ./tools/test.sh --pdb $@"
}

# Auto-format all source code, to match the Firecracker requirements. For the
# moment, this is just a wrapper over `cargo fmt --all`
# Example: `devtool fmt`
#
cmd_fmt() {
    cmd_sh "cargo fmt --all"
    cmd_sh "cargo sort"
    cmd_sh "cd tests; black --config pyproject.toml . ../tools ../.buildkite"
    cmd_sh "cd tests; isort . ../tools ../.buildkite"
    cmd_sh "mdformat $(git ls-files '*.md' | tr '\n' ' ')"
}

cmd_mkdocs() {
    cmd_sh "cargo doc --workspace --no-deps --document-private-items"
}

cmd_checkstyle() {
    if [[ -z "$BUILDKITE" ]]; then
      cmd_sh "git-secrets --register-aws && git-secrets --scan"
    fi

    cmd_test --no-build --no-kvm-check --no-build-dir-check --no-artifacts-check -- -n 4 --dist worksteal integration_tests/style || exit 1
    cmd_test --no-build --no-kvm-check --no-build-dir-check --no-artifacts-check -- -n 4 --doctest-modules framework || exit 1
}

cmd_checkbuild() {
    TARGET_ARCH=$(uname -m)
    SUPPORTED_ARCHS=(x86_64 aarch64)
    while [ $# -gt 0 ]; do
        case "$1" in
            "-h"|"--help") { cmd_help; exit 1; } ;;
            "-m"|"--arch") { TARGET_ARCH=$2; shift; } ;;
            "--all") {
                for arch in ${SUPPORTED_ARCHS[*]}; do
                    say "Running checkbuild -m $arch"
                    cmd_checkbuild -m $arch || return $?
                    done
                }
                say "Build check passed for ${SUPPORTED_ARCHS[*]}"
                return 0
                ;;
            *)
                die "Unknown argument: $1. Please use --help for help."
        ;;
        esac
        shift
    done

    if ! grep -q $TARGET_ARCH <<< "${SUPPORTED_ARCHS[*]}"; then
        die "Unknown architecture: $TARGET_ARCH. Supported architectures: ${SUPPORTED_ARCHS[*]}"
    fi

    # Use GNU target to check build as musl has issues with cross-compilation
    cmd_sh "cargo clippy --target ${TARGET_ARCH}-unknown-linux-gnu --all --all-targets --all-features -- -D warnings" \
        || die "Error running build checks for $TARGET_ARCH"
    say "Build check passed for $TARGET_ARCH"
}

# Check if able to run firecracker.
# ../docs/getting-started.md#prerequisites
ensure_kvm_rw () {
    [[ -c /dev/kvm && -w /dev/kvm && -r /dev/kvm ]] || \
        say_err "FAILED: user $(whoami) doesn't have permission to" \
                "access /dev/kvm."
}

check_kernver () {
    KERN_MAJOR=5
    KERN_MINOR=10
    (uname -r | awk -v MAJOR=$KERN_MAJOR -v MINOR=$KERN_MINOR '{ split($0,kver,".");
    if( (kver[1] + (kver[2] / 100) ) <  MAJOR + (MINOR/100) )
    {
      exit 1;
    } }') ||
    say_err "FAILED: Kernel version must be >= $KERN_MAJOR.$KERN_MINOR"
}

# Check Production Host Setup
# ../docs/prod-host-setup.md

check_KPTI () {
    (grep -q "^Mitigation: PTI$" \
      /sys/devices/system/cpu/vulnerabilities/meltdown) || \
    say_warn "WARNING: KPTI NOT SUPPORTED."
}

check_KSM () {
    (grep -q "^0$" /sys/kernel/mm/ksm/run) || \
    say_warn "WARNING: KSM ENABLED."
}

check_vulns () {
    for f in /sys/devices/system/cpu/vulnerabilities/* ; do
        if $(grep -q "Vulnerable" ${f}) ; then
            say_warn "WARNING: `basename $f`: VULNERABLE.";
        fi
    done
}

check_swap () {
    (grep -q "swap.img" /proc/swaps ) && \
    say_warn "WARNING: SWAP ENABLED."
}

check_EPT() {
    if [ "$(uname --machine)" = "x86_64" ]; then
        (grep -q "Y" /sys/module/kvm_intel/parameters/ept ; [ $? -ne 1 ]) || \
        say_warn "WARNING: EPT DISABLED. Performance will be affected."
    fi
}

check_vm() {
    if [ $(dmesg | grep -c -i "hypervisor detected") -gt 0 ]; then
        say_warn "WARNING: you are running in a virtual machine." \
    "Firecracker is not well tested under nested virtualization."
    fi
}

cmd_checkenv() {
    # Parse any command line args.
    while [ $# -gt 0 ]; do
        case "$1" in
            "-h"|"--help")      { cmd_help; exit 1; } ;;
            *)
                die "Unknown argument: $1. Please use --help for help."
        ;;
        esac
        shift
    done
    PROD_DOC="../docs/prod-host-setup.md"
    QUICKSTART="../docs/getting-started.md#prerequisites"
    say "Checking prerequisites for running Firecracker."
    say "Please check $QUICKSTART in case of any error."
    ensure_kvm_rw
    check_kernver
    check_vm
    say "Checking Host Security Configuration."
    say "Please check $PROD_DOC in case of any error."
    check_KSM
    check_swap
    check_EPT
    check_vulns
}

cmd_install() {
    # By default we install release/musl binaries.
    profile="release"
    target="$TARGET_PREFIX""musl"
    install_path="/usr/local/bin"
    binaries=("firecracker" "jailer" "seccompiler-bin" "rebase-snap" "cpu-template-helper")

    # Parse any command line args.
    while [ $# -gt 0 ]; do
        case "$1" in
            "-h"|"--help") { cmd_help; exit 1; } ;;
            "-p"|"--path")
                shift;
                install_path=$1;
                ;;
            "--debug")      { profile="debug";      } ;;
            "--release")    { profile="release";    } ;;
            *)
                die "Unknown argument: $1. Please use --help for help."
            ;;
        esac
        shift
    done

    # Check that the binaries exist first
    for binary in "${binaries[@]}"; do
        bin_path=$( build_bin_path "$target" "$profile" "$binary" )
        if [ ! -f "$bin_path" ]; then
            die "Missing release binary. Needed file: $bin_path\n"\
            "To build the binaries, run:\n\t$0 build --$profile"
        fi
    done

    # Install the binaries
    for binary in "${binaries[@]}"; do
        say "Installing $binary in $install_path"
        install -m 755 -D -t "$install_path" "$( build_bin_path "$target" "$profile" "$binary" )"
    done
}

cmd_build_ci_artifacts() {
    # Check prerequisites
    ensure_devctr

    # We need to run nested Docker here, so run this container as privileged.
    run_devctr \
        --privileged \
        --workdir "$CTR_FC_ROOT_DIR" \
        -- \
        ./resources/rebuild.sh "$@"

    cmd_fix_perms
}


main() {

    if [ $# = 0 ]; then
    die "No command provided. Please use \`$0 help\` for help."
    fi

    # Parse main command line args.
    #
    while [ $# -gt 0 ]; do
        case "$1" in
            -h|--help)              { cmd_help; exit 1;     } ;;
            -y|--unattended)        # purposefully ignored
                ;;
            -*)
                die "Unknown arg: $1. Please use \`$0 help\` for help."
            ;;
            *)
                break
            ;;
        esac
        shift
    done

    # $1 is now a command name. Check if it is a valid command and, if so,
    # run it.
    #
    declare -f "cmd_$1" > /dev/null
    ok_or_die "Unknown command: $1. Please use \`$0 help\` for help."

    cmd=cmd_$1
    shift

    # $@ is now a list of command-specific args
    #
    $cmd "$@"
}

main "$@"
