[pytorch hash update] update the pinned pytorch hash #23680
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: trunk | |
| on: | |
| push: | |
| branches: | |
| - main | |
| - release/* | |
| tags: | |
| - ciflow/trunk/* | |
| pull_request: | |
| paths: | |
| - .ci/docker/ci_commit_pins/pytorch.txt | |
| - .ci/scripts/** | |
| workflow_dispatch: | |
| concurrency: | |
| group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | |
| cancel-in-progress: true | |
| jobs: | |
| test-models-macos-cpu: | |
| name: test-models-macos-cpu | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| strategy: | |
| matrix: | |
| # Mac runners are expensive and limited, and non reliable. | |
| # Do some basic testing for macos jobs, and rely mostly on | |
| # test-models-linux-aarch64 job instead. | |
| model: [emformer_join, ic4, llama2, mobilebert, mv3, resnet50, vit, w2l] | |
| backend: [xnnpack-quantization-delegation] | |
| include: | |
| - model: efficient_sam | |
| backend: portable | |
| - model: llama | |
| backend: portable | |
| - model: llama3_2_vision_encoder | |
| backend: portable | |
| - model: mv3 | |
| backend: portable | |
| fail-fast: false | |
| with: | |
| runner: macos-m1-stable | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| script: | | |
| MODEL_NAME=${{ matrix.model }} | |
| BUILD_TOOL=cmake | |
| BACKEND=${{ matrix.backend }} | |
| bash .ci/scripts/setup-conda.sh | |
| # Setup MacOS dependencies as there is no Docker support on MacOS atm | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}" | |
| # Build and test executorch | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" | |
| test-arm-backend-zephyr: | |
| name: test-arm-backend-zephyr | |
| uses: pytorch/test-infra/.github/workflows/linux_job.yml@main | |
| strategy: | |
| matrix: | |
| target: [ethos-u55, cortex-m55, ethos-u85] | |
| fail-fast: false | |
| with: | |
| runner: linux.2xlarge | |
| docker-image: ci-image:executorch-ubuntu-22.04-zephyr-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 120 | |
| script: | | |
| #!/bin/bash | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| # Test zephyr backend | |
| set -e | |
| # Support comma-separated TARGET_LIST or ${{ matrix.target }} list, e.g., TARGET_LIST="ethos-u55,cortex-m55,ethos-u85" | |
| if [ -z "${TARGET_LIST:-}" ]; then | |
| IFS=',' read -r -a TARGETS <<< "${{ matrix.target }}" | |
| else | |
| IFS=',' read -r -a TARGETS <<< "${TARGET_LIST}" | |
| fi | |
| export EXECUTORCH_PROJ_ROOT=$(realpath $(pwd)) | |
| ZEPHYR_README_PATH="zephyr/README.md" | |
| ZEPHYR_SAMPLES_README_PATH="zephyr/samples/hello-executorch/README.md" | |
| # Source utility scripts | |
| . .ci/scripts/utils.sh | |
| . .ci/scripts/zephyr-utils.sh | |
| # check that zephyr/README.md and zephyr/executorch.yaml are in sync | |
| verify_zephyr_readme | |
| # Based on instructions in zephyr/README.md and zephyr/samples/hello-executorch/README.md | |
| run_command_block_from_readme "${ZEPHYR_README_PATH}" "<!-- RUN install_reqs -->" | |
| # Make sure to backup the zephyr_scratch folder if it exists to allow for local | |
| # testing that does not lose code/data | |
| if [ -d "zephyr_scratch" ]; then | |
| mv "zephyr_scratch" "zephyr_scratch.backup.$(date +%Y%m%d%H%M%S)" | |
| fi | |
| mkdir -p zephyr_scratch/ | |
| cd zephyr_scratch | |
| export ZEPHYR_PROJ_ROOT=$(realpath $(pwd)) | |
| echo "---- Zephyr SDK ----" | |
| # Use ZephyrSDK if on the disk (e.g. setup in the docker) | |
| # Check for a zephyr-sdk-0.17.4 directory and make a symlink if found in parent directories | |
| if sdk_dir=$(find ../../.. -maxdepth 4 -type d -name 'zephyr-sdk-0.17.4' -print -quit) && [ -n "${sdk_dir}" ]; then | |
| echo "---- Found pre downloaded Zephyr SDK in ${sdk_dir} ----" | |
| ln -s "${sdk_dir}" . | |
| fi | |
| # Download and setup Zephyr SDK 0.17.4 if not already present | |
| if [ ! -d "zephyr-sdk-0.17.4" ]; then | |
| echo "---- Downloading Zephyr SDK ----" | |
| wget https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v0.17.4/zephyr-sdk-0.17.4_linux-x86_64.tar.xz | |
| tar -xf zephyr-sdk-0.17.4_linux-x86_64.tar.xz | |
| rm -f zephyr-sdk-0.17.4_linux-x86_64.tar.xz* | |
| fi | |
| ./zephyr-sdk-0.17.4/setup.sh -c -t arm-zephyr-eabi | |
| export ZEPHYR_SDK_INSTALL_DIR=$(realpath ./zephyr-sdk-0.17.4) | |
| cd ${ZEPHYR_PROJ_ROOT} | |
| run_command_block_from_readme "${ZEPHYR_README_PATH}" "<!-- RUN west_init -->" | |
| cp ${EXECUTORCH_PROJ_ROOT}/zephyr/executorch.yaml zephyr/submanifests/ | |
| run_command_block_from_readme "${ZEPHYR_README_PATH}" "<!-- RUN west_config -->" | |
| # Switch to executorch in this PR e.g. replace modules/lib/executorch with the root folder of this repo | |
| # instead of doing a re-checkout and figure out the correct commit hash etc | |
| rm -Rf modules/lib/executorch | |
| ln -s ${EXECUTORCH_PROJ_ROOT} modules/lib/executorch | |
| # Setup git local user for Executorch git to allows modules/lib/executorch/examples/arm/setup.sh be run inside CI later | |
| # Configure git user only if not already set | |
| if ! git config --get user.name >/dev/null 2>&1; then | |
| git config --global user.name "Github Executorch" | |
| fi | |
| if ! git config --get user.email >/dev/null 2>&1; then | |
| git config --global user.email "github_executorch@arm.com" | |
| fi | |
| run_command_block_from_readme "${ZEPHYR_README_PATH}" "<!-- RUN install_executorch -->" | |
| run_command_block_from_readme "${ZEPHYR_README_PATH}" "<!-- RUN install_arm_tools -->" | |
| for TARGET in "${TARGETS[@]}"; do | |
| TARGET="$(echo "$TARGET" | xargs)" # trim whitespace | |
| echo "---- ${TARGET} ----" | |
| rm -Rf build | |
| if [[ ${TARGET} == "ethos-u55" || ${TARGET} == "cortex-m55" ]]; then | |
| BOARD="corstone300" | |
| elif [[ ${TARGET} == "ethos-u85" ]]; then | |
| BOARD="corstone320" | |
| else | |
| echo "Fail unsupport target selection ${TARGET}" | |
| exit 1 | |
| fi | |
| echo "---- ${TARGET} Board ${BOARD} FVP setup ----" | |
| run_command_block_from_readme "${ZEPHYR_SAMPLES_README_PATH}" "<!-- RUN setup_${BOARD}_fvp -->" | |
| echo "---- ${TARGET} Create PTE ----" | |
| run_command_block_from_readme "${ZEPHYR_SAMPLES_README_PATH}" "<!-- RUN test_${TARGET}_generate_pte -->" | |
| echo "---- ${TARGET} Build and run ----" | |
| run_command_block_from_readme "${ZEPHYR_SAMPLES_README_PATH}" "<!-- RUN test_${TARGET}_build_and_run -->" | |
| done | |
| test-models-linux-aarch64: | |
| name: test-models-linux-aarch64 | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| model: [linear, add, add_mul, ic3, ic4, mv2, mv3, resnet18, resnet50, vit, w2l, mobilebert, emformer_join, emformer_transcribe] | |
| backend: [portable, xnnpack-quantization-delegation] | |
| runner: [linux.arm64.2xlarge] | |
| include: | |
| - model: lstm | |
| backend: portable | |
| runner: linux.arm64.2xlarge | |
| - model: mul | |
| backend: portable | |
| runner: linux.arm64.2xlarge | |
| - model: softmax | |
| backend: portable | |
| runner: linux.arm64.2xlarge | |
| - model: phi_4_mini | |
| backend: portable | |
| runner: linux.arm64.m7g.4xlarge | |
| - model: qwen2_5_1_5b | |
| backend: portable | |
| runner: linux.arm64.2xlarge | |
| - model: llama3_2_vision_encoder | |
| backend: portable | |
| runner: linux.arm64.2xlarge | |
| fail-fast: false | |
| with: | |
| runner: ${{ matrix.runner }} | |
| docker-image: ci-image:executorch-ubuntu-22.04-gcc11-aarch64 | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| MODEL_NAME=${{ matrix.model }} | |
| BUILD_TOOL="cmake" | |
| BACKEND=${{ matrix.backend }} | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}" | |
| # Build and test ExecuTorch | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" | |
| test-custom-ops-macos: | |
| name: test-custom-ops-macos | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| strategy: | |
| matrix: | |
| include: | |
| - build-tool: cmake | |
| fail-fast: false | |
| with: | |
| runner: macos-m1-stable | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| script: | | |
| BUILD_TOOL=${{ matrix.build-tool }} | |
| bash .ci/scripts/setup-conda.sh | |
| # Setup MacOS dependencies as there is no Docker support on MacOS atm | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}" | |
| # Build and test custom ops | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/portable/custom_ops/test_custom_ops.sh "${BUILD_TOOL}" | |
| test-selective-build-macos: | |
| name: test-selective-build-macos | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| strategy: | |
| matrix: | |
| include: | |
| - build-tool: cmake | |
| fail-fast: false | |
| with: | |
| runner: macos-m1-stable | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| script: | | |
| BUILD_TOOL=${{ matrix.build-tool }} | |
| bash .ci/scripts/setup-conda.sh | |
| # Setup MacOS dependencies as there is no Docker support on MacOS atm | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}" | |
| # Build and test selective build | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/selective_build/test_selective_build.sh "${BUILD_TOOL}" | |
| test-demo-backend-delegation: | |
| name: test-demo-backend-delegation | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| include: | |
| - build-tool: buck2 | |
| - build-tool: cmake | |
| fail-fast: false | |
| with: | |
| runner: linux.2xlarge | |
| docker-image: ci-image:executorch-ubuntu-22.04-clang12 | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| BUILD_TOOL=${{ matrix.build-tool }} | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}" | |
| # Test selective build | |
| PYTHON_EXECUTABLE=python bash examples/portable/scripts/test_demo_backend_delegation.sh "${BUILD_TOOL}" | |
| test-arm-backend-ethos-u: | |
| name: test-arm-backend-ethos-u | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| include: | |
| - test_arm_baremetal: test_pytest_ops_ethos_u55 | |
| - test_arm_baremetal: test_pytest_models_ethos_u55 | |
| - test_arm_baremetal: test_run_ethos_u55 | |
| - test_arm_baremetal: test_pytest_ops_ethos_u85 | |
| - test_arm_baremetal: test_pytest_models_ethos_u85 | |
| - test_arm_baremetal: test_run_ethos_u85 | |
| - test_arm_baremetal: test_smaller_stories_llama | |
| - test_arm_baremetal: test_memory_allocation | |
| fail-fast: false | |
| with: | |
| runner: linux.2xlarge.memory | |
| docker-image: ci-image:executorch-ubuntu-22.04-arm-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 120 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| source .ci/scripts/utils.sh | |
| install_executorch "--use-pt-pinned-commit" | |
| .ci/scripts/setup-arm-baremetal-tools.sh | |
| # Increase number of files user can monitor to bypass buck failures. | |
| # Hopefully this is high enough for this setup. | |
| sudo sysctl fs.inotify.max_user_watches=1048576 # 1024 * 1024 | |
| ARM_TEST=${{ matrix.test_arm_baremetal }} | |
| # Test test_arm_baremetal.sh with test | |
| backends/arm/test/test_arm_baremetal.sh "${ARM_TEST}" | |
| test-arm-backend-vkml: | |
| name: test-arm-backend-vkml | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| include: | |
| - test_arm_baremetal: test_pytest_ops_vkml | |
| fail-fast: false | |
| with: | |
| runner: linux.2xlarge.memory | |
| docker-image: ci-image:executorch-ubuntu-22.04-arm-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 120 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| source .ci/scripts/utils.sh | |
| install_executorch "--use-pt-pinned-commit" | |
| .ci/scripts/setup-arm-baremetal-tools.sh --disable-ethos-u-deps --enable-mlsdk-deps --install-mlsdk-deps-with-pip | |
| # Increase number of files user can monitor to bypass buck failures. | |
| # Hopefully this is high enough for this setup. | |
| sudo sysctl fs.inotify.max_user_watches=1048576 # 1024 * 1024 | |
| ARM_TEST=${{ matrix.test_arm_baremetal }} | |
| backends/arm/test/test_arm_baremetal.sh "${ARM_TEST}" | |
| test-arm-ootb-linux: | |
| name: test-arm-ootb-linux | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| include: | |
| - test_arm_ootb: run_ootb_tests_ethos_u | |
| - test_arm_ootb: run_ootb_tests_tosa | |
| - test_arm_ootb: run_deit_e2e_ethos_u | |
| fail-fast: false | |
| with: | |
| runner: linux.2xlarge | |
| docker-image: ci-image:executorch-ubuntu-22.04-arm-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| # Follow the steps required before running the notebooks | |
| # Try to mirror these as closely as possible | |
| source .ci/scripts/utils.sh | |
| install_executorch "--use-pt-pinned-commit" | |
| .ci/scripts/setup-arm-baremetal-tools.sh | |
| source examples/arm/arm-scratch/setup_path.sh | |
| # Install requirements for converting notebooks | |
| pip install notebook | |
| # Run OOTB tests | |
| OOTB_TEST=${{ matrix.test_arm_ootb }} | |
| backends/arm/test/test_arm_ootb.sh $OOTB_TEST | |
| test-coreml-delegate: | |
| name: test-coreml-delegate | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| with: | |
| runner: macos-14-xlarge | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| script: | | |
| BUILD_TOOL=cmake | |
| bash .ci/scripts/setup-conda.sh | |
| # Setup MacOS dependencies as there is no Docker support on MacOS atm | |
| GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}" | |
| # Build and test coreml delegate | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/build_all.sh | |
| test-static-llama-ane: | |
| name: test-static-llama-ane | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| with: | |
| runner: macos-m1-stable | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| script: | | |
| set -eux | |
| bash .ci/scripts/setup-conda.sh | |
| eval "$(conda shell.bash hook)" | |
| # Install requirements | |
| ${CONDA_RUN} sh install_requirements.sh | |
| ${CONDA_RUN} sh backends/apple/coreml/scripts/install_requirements.sh | |
| ${CONDA_RUN} python install_executorch.py | |
| ${CONDA_RUN} sh examples/models/llama/install_requirements.sh | |
| # Test ANE llama | |
| ${CONDA_RUN} sh .ci/scripts/test_ane_static_llama.sh | |
| test-llama-torchao-lowbit: | |
| name: test-llama-torchao-lowbit | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| with: | |
| runner: macos-m1-stable | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| script: | | |
| set -eux | |
| bash .ci/scripts/setup-conda.sh | |
| eval "$(conda shell.bash hook)" | |
| # Install requirements | |
| ${CONDA_RUN} EXECUTORCH_BUILD_KERNELS_TORCHAO=1 python install_executorch.py | |
| ${CONDA_RUN} sh examples/models/llama/install_requirements.sh | |
| # Run test | |
| ${CONDA_RUN} sh .ci/scripts/test_llama_torchao_lowbit.sh | |
| test-llama-runner-linux: | |
| # Test Both linux x86 and linux aarch64 | |
| name: test-llama-runner-linux | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| dtype: [fp32] | |
| mode: [portable, xnnpack+custom] | |
| runner: [linux.2xlarge, linux.arm64.2xlarge] | |
| docker-image: [executorch-ubuntu-22.04-clang12, executorch-ubuntu-22.04-gcc11-aarch64] | |
| include: | |
| - dtype: bf16 | |
| mode: portable | |
| runner: linux.2xlarge | |
| docker-image: executorch-ubuntu-22.04-clang12 | |
| - dtype: bf16 | |
| mode: portable | |
| runner: linux.arm64.2xlarge | |
| docker-image: executorch-ubuntu-22.04-gcc11-aarch64 | |
| - dtype: bf16 | |
| mode: custom | |
| runner: linux.arm64.2xlarge | |
| docker-image: executorch-ubuntu-22.04-gcc11-aarch64 | |
| # Excluding specific runner + docker image combinations that don't make sense: | |
| # - Excluding the ARM64 gcc image on the x86 runner (linux.2xlarge) | |
| # - Excluding the x86 clang image on the ARM64 runner (linux.arm64.2xlarge) | |
| exclude: | |
| - runner: linux.2xlarge | |
| docker-image: executorch-ubuntu-22.04-gcc11-aarch64 | |
| - runner: linux.arm64.2xlarge | |
| docker-image: executorch-ubuntu-22.04-clang12 | |
| fail-fast: false | |
| with: | |
| runner: ${{ matrix.runner }} | |
| docker-image: ci-image:${{ matrix.docker-image }} | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 900 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| DTYPE=${{ matrix.dtype }} | |
| BUILD_TOOL="cmake" | |
| MODE=${{ matrix.mode }} | |
| ARTIFACTS_DIR_NAME="artifacts-to-be-uploaded/${DTYPE}-${MODE}" | |
| ARTIFACTS_DIR_NAME="${ARTIFACTS_DIR_NAME/+/-}" | |
| # Setup executorch | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}" | |
| # Install requirements for export_llama | |
| PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh | |
| # Test llama2 | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh -model stories110M -build_tool "${BUILD_TOOL}" -dtype "${DTYPE}" -mode "${MODE}" -upload "${ARTIFACTS_DIR_NAME}" | |
| test-llama-runner-macos: | |
| name: test-llama-runner-mac | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| strategy: | |
| matrix: | |
| dtype: [fp32] | |
| mode: [mps, coreml, xnnpack+custom+quantize_kv] | |
| fail-fast: false | |
| with: | |
| runner: macos-m1-stable | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 900 | |
| script: | | |
| DTYPE=${{ matrix.dtype }} | |
| MODE=${{ matrix.mode }} | |
| bash .ci/scripts/setup-conda.sh | |
| # Setup executorch | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool cmake | |
| if [[ "${MODE}" == "coreml" ]]; then | |
| # Install coreml delegate | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/install_requirements.sh | |
| echo "Finishing installing coreml." | |
| fi | |
| # Install requirements for export_llama | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/models/llama/install_requirements.sh | |
| # Test llama2 | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh -model stories110M -build_tool cmake -dtype "${DTYPE}" -mode "${MODE}" | |
| test-torchao-huggingface-checkpoints: | |
| name: test-torchao-huggingface-checkpoints | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| model: [qwen3_4b, phi_4_mini, lfm2_5_1_2b] | |
| runner: [linux.2xlarge] | |
| docker-image: [executorch-ubuntu-22.04-clang12] | |
| backend: [xnnpack] | |
| include: | |
| - model: qwen3_4b | |
| runner: linux.arm64.2xlarge | |
| docker-image: executorch-ubuntu-22.04-gcc11-aarch64 | |
| backend: torchao | |
| - model: phi_4_mini | |
| runner: linux.arm64.2xlarge | |
| docker-image: executorch-ubuntu-22.04-gcc11-aarch64 | |
| backend: torchao | |
| - model: lfm2_5_1_2b | |
| runner: linux.arm64.2xlarge | |
| docker-image: executorch-ubuntu-22.04-gcc11-aarch64 | |
| backend: torchao | |
| fail-fast: false | |
| with: | |
| runner: ${{ matrix.runner }} | |
| docker-image: ci-image:${{ matrix.docker-image }} | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 900 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool cmake | |
| if [[ "${{ matrix.backend }}" == "torchao" ]]; then | |
| BUILD_TORCHAO_EXPERIMENTAL=1 TORCHAO_BUILD_CPU_AARCH64=1 TORCHAO_BUILD_KLEIDIAI=1 TORCHAO_ENABLE_ARM_NEON_DOT=1 TORCHAO_PARALLEL_BACKEND=OPENMP pip install --no-build-isolation third-party/ao | |
| fi | |
| pip install -U "huggingface_hub[cli]<1.0" | |
| bash .ci/scripts/test_torchao_huggingface_checkpoints.sh ${{ matrix.model }} --test_with_runner ${{ matrix.backend == 'torchao' && '--use_torchao_kernels' || '' }} | |
| test-multimodal-macos: | |
| if: ${{ !github.event.pull_request.head.repo.fork }} | |
| name: test-multimodal-macos | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| secrets: inherit | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| model: ["gemma3-4b"] # llava gives segfault so not covering. | |
| with: | |
| secrets-env: EXECUTORCH_HF_TOKEN | |
| runner: macos-15-xlarge | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| script: | | |
| echo "::group::Set up ExecuTorch" | |
| bash .ci/scripts/setup-conda.sh | |
| eval "$(conda shell.bash hook)" | |
| # Install requirements | |
| ${CONDA_RUN} python install_executorch.py | |
| echo "::endgroup::" | |
| echo "::group::Set up Huggingface" | |
| ${CONDA_RUN} pip install -U "huggingface_hub[cli]<1.0" accelerate | |
| ${CONDA_RUN} huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN | |
| OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) | |
| ${CONDA_RUN} pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION} | |
| ${CONDA_RUN} pip list | |
| echo "::endgroup::" | |
| echo "::group::Test ${{ matrix.model }}" | |
| ${CONDA_RUN} python .ci/scripts/test_huggingface_optimum_model.py --model ${{ matrix.model }} --quantize --recipe xnnpack | |
| echo "::endgroup::" | |
| test-qnn-model: | |
| name: test-qnn-model | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| dtype: [fp32] | |
| model: [dl3, mv3, mv2, ic4, ic3, vit, mb, w2l, conv_former] | |
| fail-fast: false | |
| with: | |
| runner: linux.2xlarge | |
| docker-image: ci-image:executorch-ubuntu-22.04-qnn-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 900 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool cmake | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh ${{ matrix.model }} "cmake" "qnn" | |
| test-qnn-optimum-model: | |
| name: test-qnn-optimum-model | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| dtype: [fp32] | |
| model: [cvt, dit, efficientnet, focalnet, mobilevit_v1, mobilevit_v2, pvt, swin, albert, bert, distilbert, roberta] # eurobert requires transfomer >= 4.48.0, skip for now | |
| fail-fast: false | |
| with: | |
| runner: linux.2xlarge | |
| docker-image: ci-image:executorch-ubuntu-22.04-qnn-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 900 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool cmake | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh ${{ matrix.model }} "cmake" "qnn" | |
| test-models-macos-coreml: | |
| name: test-models-macos-coreml | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| strategy: | |
| matrix: | |
| model: [dl3, edsr, efficient_sam, emformer_join, emformer_transcribe, ic3, ic4, mobilebert, mv2, mv3, resnet50, vit, w2l] | |
| fail-fast: false | |
| with: | |
| runner: macos-m1-stable | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| script: | | |
| MODEL_NAME=${{ matrix.model }} | |
| BUILD_TOOL=cmake | |
| BACKEND="coreml-pybind" | |
| # Set model specific overrides | |
| if [[ "${MODEL_NAME}" == "mobilebert" ]]; then | |
| # See https://github.com/pytorch/executorch/issues/12907 | |
| # mobilebert has nan output on FP16, and high MSE on fp32, so we disable runtime test now | |
| BACKEND="coreml" | |
| fi | |
| if [[ "${MODEL_NAME}" == "efficient_sam" ]]; then | |
| # See https://github.com/pytorch/executorch/issues/12906 | |
| # efficient_sam fails to run on CoreML | |
| BACKEND="coreml" | |
| fi | |
| bash .ci/scripts/setup-conda.sh | |
| # Setup MacOS dependencies as there is no Docker support on MacOS atm | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}" | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/install_requirements.sh | |
| echo "Finishing installing coreml." | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" | |
| test-models-macos-mps: | |
| name: test-models-macos-mps | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| strategy: | |
| fail-fast: false | |
| with: | |
| runner: macos-m1-stable | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| script: | | |
| BUILD_TOOL=cmake | |
| bash .ci/scripts/setup-conda.sh | |
| # Setup MacOS dependencies as there is no Docker support on MacOS atm | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}" | |
| # Build and test mps model | |
| for MODEL_NAME in mv3 ic4 resnet50 edsr mobilebert w2l; do | |
| echo "::group::Exporting mps model: $MODEL_NAME" | |
| PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "mps" | |
| echo "::endgroup::" | |
| done | |
| test-huggingface-transformers-xnnpack: | |
| # NB: Don't run this on fork PRs because they won't have access to the secret and would fail anyway | |
| if: ${{ !github.event.pull_request.head.repo.fork }} | |
| name: test-huggingface-transformers-xnnpack | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| secrets: inherit | |
| strategy: | |
| matrix: | |
| config: [ | |
| # XNNPack. | |
| llama3.2-1b|xnnpack|--quantize, | |
| qwen3-0.6b|xnnpack|--quantize, | |
| qwen3-1.7b|xnnpack|--quantize, | |
| gemma3-1b|xnnpack|--quantize, | |
| # phi4-mini|xnnpack|--quantize, transformers v5.0.0rc0 introduces a data-dependent branching in transformers/modeling_rope_utils.py:61 | |
| smollm2-135m|xnnpack|--quantize, | |
| smollm3-3b|xnnpack|--quantize | |
| ] | |
| fail-fast: false | |
| with: | |
| secrets-env: EXECUTORCH_HF_TOKEN | |
| runner: linux.2xlarge.memory | |
| docker-image: ci-image:executorch-ubuntu-22.04-clang12 | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| upload-artifact: profiling-artifacts-${{ strategy.job-index }} | |
| script: | | |
| set -eux | |
| IFS='|' read -r MODEL RECIPE QUANTIZE <<< "${{ matrix.config }}" | |
| echo "Model: $MODEL" | |
| echo "Recipe: $RECIPE" | |
| echo "Quantize: $QUANTIZE" | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| echo "::group::Setup ExecuTorch" | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake" | |
| echo "::endgroup::" | |
| echo "::group::Setup Huggingface" | |
| pip install -U "huggingface_hub[cli]<1.0" accelerate | |
| huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN | |
| OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) | |
| pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION} | |
| echo "::endgroup::" | |
| echo "::group::Test MODEL: $MODEL RECIPE: $RECIPE QUANTIZE: $QUANTIZE" | |
| export OUTPUT_DIR="$(pwd)/${MODEL}_${RECIPE}_${QUANTIZE}" | |
| python .ci/scripts/test_huggingface_optimum_model.py --model "$MODEL" --recipe "$RECIPE" $QUANTIZE --model_dir "$OUTPUT_DIR" | |
| echo "::endgroup::" | |
| # Build executor_runner with ETdump enabled | |
| PYTHON_EXECUTABLE=python cmake -DPYTHON_EXECUTABLE=python \ | |
| -DCMAKE_INSTALL_PREFIX=cmake-out \ | |
| -DEXECUTORCH_ENABLE_LOGGING=1 \ | |
| -DCMAKE_BUILD_TYPE=Release \ | |
| -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ | |
| -DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \ | |
| -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ | |
| -DEXECUTORCH_BUILD_EXTENSION_NAMED_DATA_MAP=ON \ | |
| -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ | |
| -DEXECUTORCH_BUILD_XNNPACK=ON \ | |
| -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ | |
| -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ | |
| -DEXECUTORCH_BUILD_KERNELS_LLM=ON \ | |
| -DEXECUTORCH_BUILD_DEVTOOLS=ON \ | |
| -DEXECUTORCH_ENABLE_EVENT_TRACER=ON \ | |
| -Bcmake-out . | |
| cmake --build cmake-out -j16 --target install --config Release | |
| echo "::group::Generate artifacts for performance profiling" | |
| ./cmake-out/executor_runner \ | |
| --model_path ${OUTPUT_DIR}/model.pte \ | |
| --etdump_path ${OUTPUT_DIR}/etdump.etdp | |
| export TSV_PATH=artifacts-to-be-uploaded/${MODEL}_op_prof.tsv | |
| mkdir -p $(dirname "$TSV_PATH") | |
| python3 -m devtools.inspector.inspector_cli \ | |
| --etdump_path ${OUTPUT_DIR}/etdump.etdp \ | |
| --tsv_path ${TSV_PATH} | |
| echo "::endgroup::" | |
| test-huggingface-transformers-macos: | |
| # NB: Don't run this on fork PRs because they won't have access to the secret and would fail anyway | |
| if: ${{ !github.event.pull_request.head.repo.fork }} | |
| name: test-huggingface-transformers-macos | |
| uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| secrets: inherit | |
| # Models below selected based on https://huggingface.co/models?pipeline_tag=text-generation&num_parameters=min:0,max:3B&sort=trending. | |
| strategy: | |
| matrix: | |
| config: [ | |
| # # XNNPack. (Skipping for now due to intermittent segmentation faults, see https://github.com/huggingface/optimum-executorch/issues/122.) | |
| # llama3.2-1b|xnnpack|--quantize, | |
| # qwen3-0.6b|xnnpack|--quantize, | |
| # qwen3-1.7b|xnnpack|--quantize, | |
| # gemma3-1b|xnnpack|--quantize, | |
| # phi4-mini|xnnpack|--quantize, | |
| # smollm2-135m|xnnpack|--quantize, | |
| # smollm3-3b|xnnpack|--quantize, | |
| # qwen3-1.7b|xnnpack|--quantize, | |
| # CoreML. | |
| llama3.2-1b|coreml_fp32_gpu|--quantize, | |
| qwen3-0.6b|coreml_fp32_gpu|--quantize, | |
| smollm2-135m|coreml_fp32_gpu|--quantize, | |
| olmo-1b|coreml_fp32_gpu|--quantize, | |
| bert|coreml_fp32_gpu|--quantize, | |
| distilbert|coreml_fp32_gpu|--quantize | |
| ] | |
| fail-fast: false | |
| with: | |
| secrets-env: EXECUTORCH_HF_TOKEN | |
| runner: macos-15-xlarge | |
| python-version: '3.11' | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 90 | |
| script: | | |
| set -eux | |
| IFS='|' read -r MODEL RECIPE QUANTIZE <<< "${{ matrix.config }}" | |
| echo "Model: $MODEL" | |
| echo "Recipe: $RECIPE" | |
| echo "Quantize: $QUANTIZE" | |
| echo "::group::Set up ExecuTorch" | |
| bash .ci/scripts/setup-conda.sh | |
| eval "$(conda shell.bash hook)" | |
| # Install requirements | |
| ${CONDA_RUN} python install_executorch.py | |
| echo "::endgroup::" | |
| echo "::group::Set up Huggingface" | |
| ${CONDA_RUN} pip install -U "huggingface_hub[cli]<1.0" accelerate | |
| ${CONDA_RUN} huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN | |
| OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) | |
| ${CONDA_RUN} pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION} | |
| ${CONDA_RUN} pip list | |
| echo "::endgroup::" | |
| # Run test | |
| ${CONDA_RUN} python .ci/scripts/test_huggingface_optimum_model.py --model ${MODEL} --recipe ${RECIPE} ${QUANTIZE} | |
| test-llama-runner-qnn-linux: | |
| name: test-llama-runner-qnn-linux | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| matrix: | |
| dtype: [fp32] | |
| pt2e_quantize: [qnn_16a16w, qnn_8a8w] | |
| mode: [qnn] | |
| fail-fast: false | |
| with: | |
| runner: linux.2xlarge | |
| docker-image: ci-image:executorch-ubuntu-22.04-qnn-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 900 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| BUILD_TOOL="cmake" | |
| DTYPE=${{ matrix.dtype }} | |
| MODE=${{ matrix.mode }} | |
| PT2E_QUANTIZE=${{ matrix.pt2e_quantize }} | |
| ./install_requirements.sh --use-pt-pinned-commit | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh | |
| # Setup executorch | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}" | |
| # Install requirements for export_llama | |
| PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh | |
| # Test llama2 | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh -model stories110M -build_tool "${BUILD_TOOL}" -mode "${MODE}" -dtype "${DTYPE}" -pt2e_quantize "${PT2E_QUANTIZE}" | |
| # this is for filtering out the qnn changes such that qnn jobs only triggered when the specific files are changed | |
| changes: | |
| runs-on: ubuntu-latest | |
| outputs: | |
| qnn: ${{ steps.filter.outputs.qnn }} | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - uses: dorny/paths-filter@v3 | |
| id: filter | |
| with: | |
| filters: | | |
| qnn: | |
| - 'backends/qualcomm/**' | |
| - 'examples/qualcomm/**' | |
| - 'examples/models/llama/**' | |
| test-static-llama-qnn-eval-linux: | |
| needs: changes # has dependency on changes jobs defined above | |
| if: needs.changes.outputs.qnn == 'true' | |
| name: test-static-llama-qnn-eval-linux | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| permissions: | |
| id-token: write | |
| contents: read | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| config: | |
| - name: "baseline" | |
| flags: "" | |
| threshold: 62.0 | |
| with: | |
| runner: linux.2xlarge | |
| docker-image: ci-image:executorch-ubuntu-22.04-qnn-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 180 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| BUILD_TOOL="cmake" | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh | |
| # Setup executorch | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}" | |
| # Setup install_requirements for llama | |
| PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh | |
| echo ">>> Running config: ${{ matrix.config.name }}" | |
| PYTHON_EXECUTABLE=python bash .ci/scripts/test_qnn_static_llama_eval.sh \ | |
| --flags "${{ matrix.config.flags }}" \ | |
| --threshold "${{ matrix.config.threshold }}" | |
| unittest-release: | |
| uses: ./.github/workflows/_unittest.yml | |
| permissions: | |
| id-token: write | |
| contents: read | |
| with: | |
| build-mode: Release | |
| build-tool: cmake | |
| docker-image: ci-image:executorch-ubuntu-22.04-clang12 | |
| test-models-windows: | |
| uses: pytorch/test-infra/.github/workflows/windows_job.yml@main | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| model: [mv3, resnet50, vit, mobilebert, emformer_transcribe] | |
| backend: [portable, xnnpack-q8] | |
| with: | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 60 | |
| script: | | |
| git config --global http.sslBackend openssl | |
| git submodule update --init --recursive | |
| conda init powershell | |
| powershell -Command "& { | |
| Set-PSDebug -Trace 1 | |
| \$ErrorActionPreference = 'Stop' | |
| \$PSNativeCommandUseErrorActionPreference = \$true | |
| .ci/scripts/setup-windows.ps1 | |
| .ci/scripts/test_model.ps1 -modelName ${{ matrix.model }} -backend ${{ matrix.backend }} | |
| }" | |
| test-cortex-m-e2e: | |
| name: test-cortex-m-e2e | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| strategy: | |
| matrix: | |
| model: [mv2, mv3] | |
| fail-fast: false | |
| permissions: | |
| id-token: write | |
| contents: read | |
| with: | |
| runner: linux.2xlarge.memory | |
| docker-image: ci-image:executorch-ubuntu-22.04-arm-sdk | |
| submodules: 'recursive' | |
| ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | |
| timeout: 120 | |
| script: | | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| source .ci/scripts/utils.sh | |
| install_executorch "--use-pt-pinned-commit" | |
| # Install arm dependencies | |
| .ci/scripts/setup-arm-baremetal-tools.sh | |
| source examples/arm/arm-scratch/setup_path.sh | |
| # Build cortex-m test runner with bundled IO support | |
| backends/cortex_m/test/build_test_runner.sh | |
| # Export model and run on FVP | |
| bash .ci/scripts/test_cortex_m_e2e.sh ${{ matrix.model }} |