Skip to content

Create unified script and workflow for llama-fast models validationin #4

Create unified script and workflow for llama-fast models validationin

Create unified script and workflow for llama-fast models validationin #4

Workflow file for this run

name: executorch
on:
push:
branches:
- main
pull_request:
workflow_dispatch:
jobs:
gather-models:
runs-on: ubuntu-22.04
outputs:
models: ${{ steps.gather-models.outputs.models }}
steps:
- uses: actions/checkout@v3
with:
submodules: 'false'
- uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Extract the list of models to test
id: gather-models
run: |
set -eux
PYTHONPATH="${PWD}" python ./scripts/gather_test_models.py
test-models-mobile-linux:
name: test-models-mobile-linux
needs: gather-models
strategy:
matrix: ${{ fromJSON(needs.gather-models.outputs.models) }}
runs-on: "32-core-ubuntu"
env:
LLAMA_FAST_ROOT: ${{ github.workspace }}
steps:
- name: Checkout repo
uses: actions/checkout@v3
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Print machine info
run: |
echo "Platform: $(uname -s)"
echo "Processor type: $(uname -p)"
echo "Number of processors: $(nproc)"
echo "RAM: $(free -h | awk '/Mem/ {print $2}')"
echo "Disk space: $(df -h --total | awk '/total/ {print $2}')"
- name: Install ExecuTorch
run: |
bash ${LLAMA_FAST_ROOT}/scripts/install_et.sh
- name: Download checkpoints
run: |
bash ${LLAMA_FAST_ROOT}/scripts/install_et.sh download_checkpoints ${{ matrix.checkpoint_name }} ${{ matrix.resources }}
- name: Run inferencec
run: |
pushd ${LLAMA_FAST_ROOT}
export CHECKPOINT_PATH=${LLAMA_FAST_ROOT}/checkpoints/${CHECKPOINT_NAME}/${CHECKPOINT_NAME}.pt
export MODEL_NAME=${CHECKPOINT_NAME}
python generate.py --checkpoint-path ${CHECKPOINT_PATH} --temperature 0 > ${PWD}/output_eager
cat ${PWD}/output_eager
python export.py --checkpoint-path ${CHECKPOINT_PATH} --output-pte-path ${PWD}/${MODEL_NAME}.pte
python generate.py --checkpoint-path ${CHECKPOINT_PATH} --temperature 0 --pte-path ${PWD}/${MODEL_NAME}.pte > ${PWD}/output_et
cat ${PWD}/output_et
echo "Tests complete."
test-models-mobile-macos-m1:
name: test-models-mobile-macos-m1
needs: gather-models
strategy:
matrix: ${{ fromJSON(needs.gather-models.outputs.models) }}
runs-on: "macos-12"
env:
LLAMA_FAST_ROOT: ${{ github.workspace }}
steps:
- name: Checkout repo
uses: actions/checkout@v3
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Print machine info
run: |
echo "Platform: $(uname -s)"
echo "Processor type: $(uname -p)"
echo "Number of processors: $(sysctl -n hw.ncpu)"
echo "RAM: $(sysctl -n hw.memsize | awk '{print $0/1024/1024/1024"g"}')"
echo "Disk space: $(df -h / | awk 'NR==2 {print $2}')"
- name: Install ExecuTorch
run: |
bash ${LLAMA_FAST_ROOT}/scripts/install_et.sh
- name: Download checkpoints
run: |
bash ${LLAMA_FAST_ROOT}/scripts/install_et.sh download_checkpoints ${{ matrix.checkpoint_name }} ${{ matrix.resources }}
- name: Run inference
run: |
pushd ${LLAMA_FAST_ROOT}
export CHECKPOINT_PATH=${LLAMA_FAST_ROOT}/checkpoints/${CHECKPOINT_NAME}/${CHECKPOINT_NAME}.pt
export MODEL_NAME=${CHECKPOINT_NAME}
python generate.py --checkpoint-path ${CHECKPOINT_PATH} --temperature 0 > ${PWD}/output_eager
cat ${PWD}/output_eager
python export.py --checkpoint-path ${CHECKPOINT_PATH} --output-pte-path ${PWD}/${MODEL_NAME}.pte
python generate.py --checkpoint-path ${CHECKPOINT_PATH} --temperature 0 --pte-path ${PWD}/${MODEL_NAME}.pte > ${PWD}/output_et
cat ${PWD}/output_et
echo "Tests complete."