-
Notifications
You must be signed in to change notification settings - Fork 166
/
launch.sh
executable file
·314 lines (275 loc) · 10.5 KB
/
launch.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
#!/usr/bin/env bash
set -euo pipefail
# Ensure the script is being executed in the cccl/ root
cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/..";
print_help() {
echo "Usage: $0 [-c|--cuda <CUDA version>] [-H|--host <Host compiler>] [-d|--docker]"
echo "Launch a development container. If no CUDA version or Host compiler are specified,"
echo "the top-level devcontainer in .devcontainer/devcontainer.json will be used."
echo ""
echo "Options:"
echo " -c, --cuda Specify the CUDA version. E.g., 12.2"
echo " --cuda-ext Use a docker image with extended CTK libraries."
echo " -H, --host Specify the host compiler. E.g., gcc12"
echo " -d, --docker Launch the development environment in Docker directly without using VSCode."
echo " --gpus gpu-request GPU devices to add to the container ('all' to pass all GPUs)."
echo " -e, --env list Set additional container environment variables."
echo " -v, --volume list Bind mount a volume."
echo " -h, --help Display this help message and exit."
}
# Assign variable one scope above the caller
# Usage: local "$1" && _upvar $1 "value(s)"
# Param: $1 Variable name to assign value to
# Param: $* Value(s) to assign. If multiple values, an array is
# assigned, otherwise a single value is assigned.
# See: http://fvue.nl/wiki/Bash:_Passing_variables_by_reference
_upvar() {
if unset -v "$1"; then
if (( $# == 2 )); then
eval $1=\"\$2\";
else
eval $1=\(\"\${@:2}\"\);
fi;
fi
}
parse_options() {
local -;
set -euo pipefail;
# Read the name of the variable in which to return unparsed arguments
local UNPARSED="${!#}";
# Splice the unparsed arguments variable name from the arguments list
set -- "${@:1:$#-1}";
local OPTIONS=c:e:H:dhv:
local LONG_OPTIONS=cuda:,cuda-ext,env:,host:,gpus:,volume:,docker,help
# shellcheck disable=SC2155
local PARSED_OPTIONS="$(getopt -n "$0" -o "${OPTIONS}" --long "${LONG_OPTIONS}" -- "$@")"
# shellcheck disable=SC2181
if [[ $? -ne 0 ]]; then
exit 1
fi
eval set -- "${PARSED_OPTIONS}"
while true; do
case "$1" in
-c|--cuda)
cuda_version="$2"
shift 2
;;
--cuda-ext)
cuda_ext=true
shift
;;
-e|--env)
env_vars+=("$1" "$2")
shift 2
;;
-H|--host)
host_compiler="$2"
shift 2
;;
--gpus)
gpu_request="$2"
shift 2
;;
-d|--docker)
docker_mode=true
shift
;;
-h|--help)
print_help
exit 0
;;
-v|--volume)
volumes+=("$1" "$2")
shift 2
;;
--)
shift
_upvar "${UNPARSED}" "${@}"
break
;;
*)
echo "Invalid option: $1"
print_help
exit 1
;;
esac
done
}
# shellcheck disable=SC2155
launch_docker() {
local -;
set -euo pipefail
inline_vars() {
cat - \
`# inline local workspace folder` \
| sed "s@\${localWorkspaceFolder}@$(pwd)@g" \
`# inline local workspace folder basename` \
| sed "s@\${localWorkspaceFolderBasename}@$(basename "$(pwd)")@g" \
`# inline container workspace folder` \
| sed "s@\${containerWorkspaceFolder}@${WORKSPACE_FOLDER:-}@g" \
`# inline container workspace folder basename` \
| sed "s@\${containerWorkspaceFolderBasename}@$(basename "${WORKSPACE_FOLDER:-}")@g" \
`# translate local envvars to shell syntax` \
| sed -r 's/\$\{localEnv:([^\:]*):?(.*)\}/${\1:-\2}/g'
}
args_to_path() {
local -a keys=("${@}")
keys=("${keys[@]/#/[}")
keys=("${keys[@]/%/]}")
echo "$(IFS=; echo "${keys[*]}")"
}
json_string() {
python3 -c "import json,sys; print(json.load(sys.stdin)$(args_to_path "${@}"))" 2>/dev/null | inline_vars
}
json_array() {
python3 -c "import json,sys; [print(f'\"{x}\"') for x in json.load(sys.stdin)$(args_to_path "${@}")]" 2>/dev/null | inline_vars
}
json_map() {
python3 -c "import json,sys; [print(f'{k}=\"{v}\"') for k,v in json.load(sys.stdin)$(args_to_path "${@}").items()]" 2>/dev/null | inline_vars
}
devcontainer_metadata_json() {
docker inspect --type image --format '{{json .Config.Labels}}' "$DOCKER_IMAGE" \
| json_string '"devcontainer.metadata"'
}
###
# Read relevant values from devcontainer.json
###
local devcontainer_json="${path}/devcontainer.json";
# Read image
local DOCKER_IMAGE="$(json_string '"image"' < "${devcontainer_json}")"
# Always pull the latest copy of the image
docker pull "$DOCKER_IMAGE"
# Read workspaceFolder
local WORKSPACE_FOLDER="$(json_string '"workspaceFolder"' < "${devcontainer_json}")"
# Read remoteUser
local REMOTE_USER="$(json_string '"remoteUser"' < "${devcontainer_json}")"
# If remoteUser isn't in our devcontainer.json, read it from the image's "devcontainer.metadata" label
if test -z "${REMOTE_USER:-}"; then
REMOTE_USER="$(devcontainer_metadata_json | json_string "-1" '"remoteUser"')"
fi
# Read runArgs
local -a RUN_ARGS="($(json_array '"runArgs"' < "${devcontainer_json}"))"
# Read initializeCommand
local -a INITIALIZE_COMMAND="($(json_array '"initializeCommand"' < "${devcontainer_json}"))"
# Read containerEnv
local -a ENV_VARS="($(json_map '"containerEnv"' < "${devcontainer_json}" | sed -r 's/(.*)=(.*)/--env \1=\2/'))"
# Read mounts
local -a MOUNTS="($(
tee < "${devcontainer_json}" \
1>/dev/null \
>(json_array '"mounts"') \
>(json_string '"workspaceMount"') \
| xargs -r -I% echo --mount '%'
))"
###
# Update run arguments and container environment variables
###
# Only pass `-it` if the shell is a tty
if ! ${CI:-'false'} && tty >/dev/null 2>&1 && (exec </dev/tty); then
RUN_ARGS+=("-it")
fi
for flag in rm init; do
if [[ " ${RUN_ARGS[*]} " != *" --${flag} "* ]]; then
RUN_ARGS+=("--${flag}")
fi
done
# Prefer the user-provided --gpus argument
if test -n "${gpu_request:-}"; then
RUN_ARGS+=(--gpus "${gpu_request}")
else
# Otherwise read and infer from hostRequirements.gpu
local GPU_REQUEST="$(json_string '"hostRequirements"' '"gpu"' < "${devcontainer_json}")"
if test "${GPU_REQUEST:-false}" = true; then
RUN_ARGS+=(--gpus all)
elif test "${GPU_REQUEST:-false}" = optional && \
command -v nvidia-container-runtime >/dev/null 2>&1; then
RUN_ARGS+=(--gpus all)
fi
fi
RUN_ARGS+=(--workdir "${WORKSPACE_FOLDER:-/home/coder/cccl}")
if test -n "${REMOTE_USER:-}"; then
ENV_VARS+=(--env NEW_UID="$(id -u)")
ENV_VARS+=(--env NEW_GID="$(id -g)")
ENV_VARS+=(--env REMOTE_USER="$REMOTE_USER")
RUN_ARGS+=(-u root:root)
RUN_ARGS+=(--entrypoint "${WORKSPACE_FOLDER:-/home/coder/cccl}/.devcontainer/docker-entrypoint.sh")
fi
if test -n "${SSH_AUTH_SOCK:-}" && test -e "${SSH_AUTH_SOCK:-}"; then
ENV_VARS+=(--env "SSH_AUTH_SOCK=/tmp/ssh-auth-sock")
MOUNTS+=(--mount "source=${SSH_AUTH_SOCK},target=/tmp/ssh-auth-sock,type=bind")
fi
# Append user-provided volumes
if test -v volumes && test ${#volumes[@]} -gt 0; then
MOUNTS+=("${volumes[@]}")
fi
# Append user-provided envvars
if test -v env_vars && test ${#env_vars[@]} -gt 0; then
ENV_VARS+=("${env_vars[@]}")
fi
# Run the initialize command before starting the container
if test "${#INITIALIZE_COMMAND[@]}" -gt 0; then
eval "${INITIALIZE_COMMAND[*]@Q}"
fi
exec docker run \
"${RUN_ARGS[@]}" \
"${ENV_VARS[@]}" \
"${MOUNTS[@]}" \
"${DOCKER_IMAGE}" \
"$@"
}
launch_vscode() {
local -;
set -euo pipefail;
# Since Visual Studio Code allows only one instance per `devcontainer.json`,
# this code prepares a unique temporary directory structure for each launch of a devcontainer.
# By doing so, it ensures that multiple instances of the same environment can be run
# simultaneously. The script replicates the `devcontainer.json` from the desired CUDA
# and compiler environment into this temporary directory, adjusting paths to ensure the
# correct workspace is loaded. A special URL is then generated to instruct VSCode to
# launch the development container using this temporary configuration.
local workspace="$(basename "$(pwd)")"
local tmpdir="$(mktemp -d)/${workspace}"
mkdir -p "${tmpdir}"
mkdir -p "${tmpdir}/.devcontainer"
cp -arL "${path}/devcontainer.json" "${tmpdir}/.devcontainer"
sed -i "s@\${localWorkspaceFolder}@$(pwd)@g" "${tmpdir}/.devcontainer/devcontainer.json"
local path="${tmpdir}"
local hash="$(echo -n "${path}" | xxd -pu - | tr -d '[:space:]')"
local url="vscode://vscode-remote/dev-container+${hash}/home/coder/cccl"
local launch=""
if type open >/dev/null 2>&1; then
launch="open"
elif type xdg-open >/dev/null 2>&1; then
launch="xdg-open"
fi
if [ -n "${launch}" ]; then
echo "Launching VSCode Dev Container URL: ${url}"
code --new-window "${tmpdir}"
exec "${launch}" "${url}" >/dev/null 2>&1
fi
}
main() {
local -a unparsed;
parse_options "$@" unparsed;
set -- "${unparsed[@]}";
# If no CTK/Host compiler are provided, just use the default environment
if [[ -z ${cuda_version:-} ]] && [[ -z ${host_compiler:-} ]]; then
path=".devcontainer"
else
if ${cuda_ext:-false}; then
cuda_suffix="ext"
fi
path=".devcontainer/cuda${cuda_version}${cuda_suffix:-}-${host_compiler}"
if [[ ! -f "${path}/devcontainer.json" ]]; then
echo "Unknown CUDA [${cuda_version}] compiler [${host_compiler}] combination"
echo "Requested devcontainer ${path}/devcontainer.json does not exist"
exit 1
fi
fi
if ${docker_mode:-'false'}; then
launch_docker "$@"
else
launch_vscode
fi
}
main "$@"