-
Notifications
You must be signed in to change notification settings - Fork 224
110 lines (102 loc) · 3.92 KB
/
executorch.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
name: executorch
on:
push:
branches:
- main
pull_request:
workflow_dispatch:
jobs:
gather-models:
runs-on: ubuntu-22.04
outputs:
models: ${{ steps.gather-models.outputs.models }}
steps:
- uses: actions/checkout@v3
with:
submodules: 'false'
- uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Extract the list of models to test
id: gather-models
run: |
set -eux
PYTHONPATH="${PWD}" python ./scripts/gather_test_models.py
test-models-mobile-linux:
name: test-models-mobile-linux
needs: gather-models
strategy:
matrix: ${{ fromJSON(needs.gather-models.outputs.models) }}
runs-on: "32-core-ubuntu"
env:
LLAMA_FAST_ROOT: ${{ github.workspace }}
steps:
- name: Checkout repo
uses: actions/checkout@v3
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Print machine info
run: |
echo "Platform: $(uname -s)"
echo "Processor type: $(uname -p)"
echo "Number of processors: $(nproc)"
echo "RAM: $(free -h | awk '/Mem/ {print $2}')"
echo "Disk space: $(df -h --total | awk '/total/ {print $2}')"
- name: Install ExecuTorch
run: |
bash ${LLAMA_FAST_ROOT}/scripts/install_et.sh
- name: Download checkpoints
run: |
bash ${LLAMA_FAST_ROOT}/.ci/scripts/download_checkpoints.sh ${{ matrix.checkpoint_name }} "${{ matrix.resources }}"
- name: Run inferencec
run: |
pushd ${LLAMA_FAST_ROOT}
export CHECKPOINT_PATH=${LLAMA_FAST_ROOT}/checkpoints/${CHECKPOINT_NAME}/${CHECKPOINT_NAME}.pt
export MODEL_NAME=${CHECKPOINT_NAME}
python generate.py --checkpoint-path ${CHECKPOINT_PATH} --temperature 0 > ${PWD}/output_eager
cat ${PWD}/output_eager
python export.py --checkpoint-path ${CHECKPOINT_PATH} --output-pte-path ${PWD}/${MODEL_NAME}.pte
python generate.py --checkpoint-path ${CHECKPOINT_PATH} --temperature 0 --pte-path ${PWD}/${MODEL_NAME}.pte > ${PWD}/output_et
cat ${PWD}/output_et
echo "Tests complete."
test-models-mobile-macos:
name: test-models-mobile-macos
needs: gather-models
strategy:
matrix: ${{ fromJSON(needs.gather-models.outputs.models) }}
runs-on: "macos-12"
env:
LLAMA_FAST_ROOT: ${{ github.workspace }}
steps:
- name: Checkout repo
uses: actions/checkout@v3
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Print machine info
run: |
echo "Platform: $(uname -s)"
echo "Processor type: $(uname -p)"
echo "Number of processors: $(sysctl -n hw.ncpu)"
echo "RAM: $(sysctl -n hw.memsize | awk '{print $0/1024/1024/1024"g"}')"
echo "Disk space: $(df -h / | awk 'NR==2 {print $2}')"
- name: Install ExecuTorch
run: |
bash ${LLAMA_FAST_ROOT}/scripts/install_et.sh
- name: Download checkpoints
run: |
bash ${LLAMA_FAST_ROOT}/.ci/scripts/download_checkpoints.sh ${{ matrix.checkpoint_name }} "${{ matrix.resources }}"
- name: Run inference
run: |
pushd ${LLAMA_FAST_ROOT}
export CHECKPOINT_PATH=${LLAMA_FAST_ROOT}/checkpoints/${CHECKPOINT_NAME}/${CHECKPOINT_NAME}.pt
export MODEL_NAME=${CHECKPOINT_NAME}
python generate.py --checkpoint-path ${CHECKPOINT_PATH} --temperature 0 > ${PWD}/output_eager
cat ${PWD}/output_eager
python export.py --checkpoint-path ${CHECKPOINT_PATH} --output-pte-path ${PWD}/${MODEL_NAME}.pte
python generate.py --checkpoint-path ${CHECKPOINT_PATH} --temperature 0 --pte-path ${PWD}/${MODEL_NAME}.pte > ${PWD}/output_et
cat ${PWD}/output_et
echo "Tests complete."