-
Notifications
You must be signed in to change notification settings - Fork 31
/
CMakeLists.txt
153 lines (126 loc) · 5.2 KB
/
CMakeLists.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
message(STATUS "TPP Project CMakeLists.txt BEGIN")
cmake_minimum_required(VERSION 3.13.4)
# CMP0116: Ninja generators transform `DEPFILE`s from `add_custom_command()`
# New in CMake 3.20. https://cmake.org/cmake/help/latest/policy/CMP0116.html
if(POLICY CMP0116)
cmake_policy(SET CMP0116 OLD)
endif()
project(tpp-dialect LANGUAGES CXX C)
set(CMAKE_CXX_STANDARD 17 CACHE STRING "C++ standard to conform to")
set(TPP_GPU "" CACHE STRING "Enables GPU runtime (default: '')")
set_property(CACHE TPP_GPU PROPERTY STRINGS "" "cuda")
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
message(STATUS "TPP-MLIR out-of-tree build.")
find_package(MLIR REQUIRED CONFIG)
message(STATUS "Using MLIRConfig.cmake in: ${MLIR_DIR}")
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin)
set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/lib)
list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}")
list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
include(TableGen)
include(AddLLVM)
include(AddMLIR)
include(HandleLLVMOptions)
else()
message(STATUS "TPP-MLIR in-tree build.")
set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir)
set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include)
set(MLIR_GENERATED_INCLUDE_DIR ${LLVM_BINARY_DIR}/tools/mlir/include)
set(MLIR_INCLUDE_DIRS "${MLIR_INCLUDE_DIR};${MLIR_GENERATED_INCLUDE_DIR}")
endif()
set(MLIR_BINARY_DIR ${CMAKE_BINARY_DIR})
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules")
set(TPP_MAIN_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/include")
set(TPP_GEN_INCLUDE_DIR "${PROJECT_BINARY_DIR}/include")
include_directories(${LLVM_INCLUDE_DIRS})
include_directories(${MLIR_INCLUDE_DIRS})
include_directories(${TPP_MAIN_INCLUDE_DIR})
include_directories(${TPP_GEN_INCLUDE_DIR})
link_directories(${LLVM_BUILD_LIBRARY_DIR})
add_definitions(${LLVM_DEFINITIONS})
set(LLVM_LIT_ARGS "-sv" CACHE STRING "lit default options")
include(openmp)
# libxsmm
include(xsmm)
message (STATUS "LIBXSMM Include dir: ${XSMM_INCLUDE_DIRS}")
include(xsmm-dnn)
message (STATUS "LIBXSMM-DNN Include dir: ${XSMM_DNN_INCLUDE_DIRS}")
# onednn
include(one-dnn)
# Sanitizers (include as late as possible aka append)
include(sanitizers)
# Sub directories
add_subdirectory(include)
add_subdirectory(lib)
add_subdirectory(runtime)
add_subdirectory(tools)
add_subdirectory(test)
# Benchmarks
set(BENCHMARK_DIR "${PROJECT_SOURCE_DIR}/benchmarks")
set(CONFIG_DIR "${BENCHMARK_DIR}/config")
# Run baseline benchmarks with default iterations to track simple performance
set(BENCH_CFGS
${CONFIG_DIR}/base/base.json
${CONFIG_DIR}/base/pack.json
${CONFIG_DIR}/base/mha.json
${CONFIG_DIR}/base/named-ops.json
)
string(JOIN ',' BENCH_CFGS_STR ${BENCH_CFGS})
# Run a small set of benchmarks with small iterations to test the benchmarks and run locally on small machines
add_custom_target(benchmarks ${BENCHMARK_DIR}/driver.py -v --build ${PROJECT_BINARY_DIR} -n 10
-c ${BENCH_CFGS_STR}
DEPENDS tpp-opt tpp-run xsmm_dnn_mlp
WORKING_DIRECTORY ${BENCHMARK_DIR}
COMMENT Run Base Benchmarks)
# Run OpenMP benchmarks with default iterations to track simple performance
set(BENCH_OMP_CFGS
${CONFIG_DIR}/omp/dnn-fp32.json
${CONFIG_DIR}/omp/dnn-bf16.json
${CONFIG_DIR}/omp/mlir-fp32.json
${CONFIG_DIR}/omp/mlir-bf16.json
${CONFIG_DIR}/omp/torch-dynamo.json
)
string(JOIN ',' BENCH_OMP_CFGS_STR ${BENCH_OMP_CFGS})
add_custom_target(benchmarks-omp ${BENCHMARK_DIR}/driver.py -v --build ${PROJECT_BINARY_DIR} -n 10
-c ${BENCH_OMP_CFGS_STR}
DEPENDS tpp-opt tpp-run xsmm_dnn_mlp
WORKING_DIRECTORY ${BENCHMARK_DIR}
COMMENT Run Benchmarks)
# GPU Benchmarks
if (TPP_GPU)
if (TPP_GPU MATCHES "cuda")
set(BENCH_CUDA_CFGS
${CONFIG_DIR}/GPU/cuda.json
)
set(BENCH_CUDA_DEPS
bench_cuda_matmul
)
endif()
string(JOIN ',' BENCH_GPU_CFGS_STR ${BENCH_CUDA_CFGS})
# Run a small set of benchmarks with small iterations to test the benchmarks and run locally on small machines
add_custom_target(benchmarks-gpu ${BENCHMARK_DIR}/driver.py -v --build ${PROJECT_BINARY_DIR} -n 10
-c ${BENCH_GPU_CFGS_STR}
DEPENDS tpp-opt tpp-run ${BENCH_CUDA_DEPS}
WORKING_DIRECTORY ${BENCHMARK_DIR}
COMMENT Run Quick GPU Benchmarks)
endif()
# Run PyTorch Dynamo generated models as benchmarks
set(BENCH_PT_CFGS
${CONFIG_DIR}/pytorch/torch_dynamo.json
)
string(JOIN ',' BENCH_PT_CFGS_STR ${BENCH_PT_CFGS})
add_custom_target(benchmarks-pt ${BENCHMARK_DIR}/driver.py -v --build ${PROJECT_BINARY_DIR} -n 10
-c ${BENCH_PT_CFGS_STR}
DEPENDS tpp-opt tpp-run
WORKING_DIRECTORY ${BENCHMARK_DIR}
COMMENT Run PyTorch Models)
# Code reformat
set(CI_DIR "${PROJECT_SOURCE_DIR}/scripts/ci")
add_custom_target(reformat-python ${CI_DIR}/lint-python.sh -i)
add_custom_target(reformat-clang ${CI_DIR}/lint-clang.sh -i)
add_custom_target(reformat DEPENDS reformat-clang reformat-python)
# Code format checks
add_custom_target(check-python ${CI_DIR}/lint-python.sh)
add_custom_target(check-clang ${CI_DIR}/lint-clang.sh)
add_custom_target(check-format DEPENDS check-clang check-python)