diff --git a/conda/activate.sh b/conda/activate.sh deleted file mode 100755 index 7c0923798..000000000 --- a/conda/activate.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/usr/bin/env bash -source activate /c/ProgramData/Miniconda3/Scripts/activate root \ No newline at end of file diff --git a/conda/build.sh b/conda/build.sh deleted file mode 100755 index ab6176884..000000000 --- a/conda/build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - - -# NOTE: This is a shim for next versions of the pytorch binary build workflows -# TODO: Remove this once we fully move binary builds on master to GHA - -SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -DESIRED_CUDA=${DESIRED_CUDA:-cpu} bash ${SCRIPTPATH}/build_pytorch.sh diff --git a/conda/build_pytorch.sh b/conda/build_pytorch.sh deleted file mode 100755 index 8edc86bf0..000000000 --- a/conda/build_pytorch.sh +++ /dev/null @@ -1,457 +0,0 @@ -#!/usr/bin/env bash -if [[ -x "/remote/anaconda_token" ]]; then - . /remote/anaconda_token || true -fi - -set -ex - -# TODO there is a LOT of duplicate code everywhere. There's duplicate code for -# mac siloing of pytorch and conda installations with wheel/build_wheel.sh. -# There's also duplicate versioning logic amongst *all* the building scripts - -# Env variables that should be set -# PYTORCH_FINAL_PACKAGE_DIR -# Absolute path (in docker space) to folder where final packages will be -# stored. -# -# MACOS env variables that should be set -# MAC_PACKAGE_WORK_DIR -# Absolute path to a workdir in which to clone an isolated conda -# installation and pytorch checkout. If the pytorch checkout already exists -# then it will not be overwritten. -# -# WINDOWS env variables that should be set -# WIN_PACKAGE_WORK_DIR -# Absolute path to a workdir in which to clone an isolated conda -# installation and pytorch checkout. If the pytorch checkout already exists -# then it will not be overwritten. - -# Function to retry functions that sometimes timeout or have flaky failures -retry () { - $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) -} - -# Parse arguments and determine version -########################################################### -if [[ -n "$DESIRED_CUDA" && -n "$PYTORCH_BUILD_VERSION" && -n "$PYTORCH_BUILD_NUMBER" ]]; then - desired_cuda="$DESIRED_CUDA" - build_version="$PYTORCH_BUILD_VERSION" - build_number="$PYTORCH_BUILD_NUMBER" -else - if [ "$#" -ne 3 ]; then - echo "Illegal number of parameters. Pass cuda version, pytorch version, build number" - echo "CUDA version should be Mm with no dot, e.g. '80'" - echo "DESIRED_PYTHON should be M.m, e.g. '2.7'" - exit 1 - fi - - desired_cuda="$1" - build_version="$2" - build_number="$3" -fi -if [[ "$desired_cuda" != cpu ]]; then - desired_cuda="$(echo $desired_cuda | tr -d cuda. )" -fi -echo "Building cuda version $desired_cuda and pytorch version: $build_version build_number: $build_number" - -if [[ "$OSTYPE" == "msys" ]]; then - export PATH="/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:.:$PATH" -fi - -# Version: setup.py uses $PYTORCH_BUILD_VERSION.post$PYTORCH_BUILD_NUMBER if -# PYTORCH_BUILD_NUMBER > 1 -if [[ -n "$OVERRIDE_PACKAGE_VERSION" ]]; then - # This will be the *exact* version, since build_number<1 - build_version="$OVERRIDE_PACKAGE_VERSION" - build_number=0 -fi - -# differentiate package name for cross compilation to avoid collision -if [[ -n "$CROSS_COMPILE_ARM64" || "$(uname -m)" == "arm64" ]]; then - export PYTORCH_LLVM_PACKAGE="" -fi - -export PYTORCH_BUILD_VERSION=$build_version -export PYTORCH_BUILD_NUMBER=$build_number - -if [[ -z "$PYTORCH_BRANCH" ]]; then - PYTORCH_BRANCH="v$build_version" -fi - -# Fill in missing env variables -if [ -z "$ANACONDA_TOKEN" ]; then - # Token needed to upload to the conda channel above - echo "ANACONDA_TOKEN is unset. Please set it in your environment before running this script"; -fi -if [[ -z "$ANACONDA_USER" ]]; then - # This is the channel that finished packages will be uploaded to - ANACONDA_USER='pytorch' -fi -if [[ -z "$GITHUB_ORG" ]]; then - GITHUB_ORG='pytorch' -fi -if [[ -z "$CMAKE_ARGS" ]]; then - # These are passed to tools/build_pytorch_libs.sh::build() - CMAKE_ARGS=() -fi -if [[ -z "$EXTRA_CAFFE2_CMAKE_FLAGS" ]]; then - # These are passed to tools/build_pytorch_libs.sh::build_caffe2() - EXTRA_CAFFE2_CMAKE_FLAGS=() -fi - -if [[ -z "$DESIRED_PYTHON" ]]; then - DESIRED_PYTHON=('3.8') -fi - -if [[ "$OSTYPE" == "darwin"* ]]; then - if [[ "$(uname -m)" == "arm64" ]]; then - DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer - else - DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer - fi -fi -if [[ "$desired_cuda" == 'cpu' ]]; then - cpu_only=1 -else - # Switch desired_cuda to be M.m to be consistent with other scripts in - # pytorch/builder - cuda_nodot="$desired_cuda" - - if [[ ${#cuda_nodot} -eq 2 ]]; then - desired_cuda="${desired_cuda:0:1}.${desired_cuda:1:1}" - elif [[ ${#cuda_nodot} -eq 3 ]]; then - desired_cuda="${desired_cuda:0:2}.${desired_cuda:2:1}" - else - echo "unknown cuda version $cuda_nodot" - exit 1 - fi -fi -if [[ "$OSTYPE" == "darwin"* ]]; then - # Produce macOS builds with torch.distributed support. - # This is enabled by default on Linux, but disabled by default on macOS, - # because it requires an non-bundled compile-time dependency (libuv - # through gloo). This dependency is made available through meta.yaml, so - # we can override the default and set USE_DISTRIBUTED=1. - export USE_DISTRIBUTED=1 - - # testing cross compilation - if [[ -n "$CROSS_COMPILE_ARM64" ]]; then - export CMAKE_OSX_ARCHITECTURES=arm64 - export USE_MKLDNN=OFF - export USE_QNNPACK=OFF - export BUILD_TEST=OFF - fi -fi - -echo "Will build for all Pythons: ${DESIRED_PYTHON[@]}" -echo "Will build for CUDA version: ${desired_cuda}" - -SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" - -if [[ -z "$MAC_PACKAGE_WORK_DIR" ]]; then - MAC_PACKAGE_WORK_DIR="$(pwd)/tmp_conda_${DESIRED_PYTHON}_$(date +%H%M%S)" -fi -if [[ "$OSTYPE" == "msys" && -z "$WIN_PACKAGE_WORK_DIR" ]]; then - WIN_PACKAGE_WORK_DIR="$(echo $(pwd -W) | tr '/' '\\')\\tmp_conda_${DESIRED_PYTHON}_$(date +%H%M%S)" -fi - -# Clone the Pytorch repo -########################################################### -if [[ "$(uname)" == 'Darwin' ]]; then - mkdir -p "$MAC_PACKAGE_WORK_DIR" || true - if [[ -n ${GITHUB_ACTIONS} ]]; then - pytorch_rootdir="${PYTORCH_ROOT:-${MAC_PACKAGE_WORK_DIR}/pytorch}" - else - pytorch_rootdir="${MAC_PACKAGE_WORK_DIR}/pytorch" - fi -elif [[ "$OSTYPE" == "msys" ]]; then - mkdir -p "$WIN_PACKAGE_WORK_DIR" || true - if [[ -n ${GITHUB_ACTIONS} ]]; then - pytorch_rootdir="${PYTORCH_ROOT:-(realpath ${WIN_PACKAGE_WORK_DIR})\\pytorch}" - pytorch_rootdir=$(cygpath -m "${pytorch_rootdir}") - else - pytorch_rootdir="$(realpath ${WIN_PACKAGE_WORK_DIR})/pytorch" - fi - git config --system core.longpaths true - # The jobs are seperated on Windows, so we don't need to clone again. - if [[ -d "$NIGHTLIES_PYTORCH_ROOT" ]]; then - cp -R "$NIGHTLIES_PYTORCH_ROOT" "$pytorch_rootdir" - fi -elif [[ -d '/pytorch' ]]; then - # All docker binary builds - pytorch_rootdir='/pytorch' -else - # Shouldn't actually happen anywhere. Exists for builds outisde of nightly - # infrastructure - pytorch_rootdir="$(pwd)/root_${GITHUB_ORG}pytorch${PYTORCH_BRANCH}" -fi -if [[ ! -d "$pytorch_rootdir" ]]; then - git clone "https://github.com/${PYTORCH_REPO}/pytorch" "$pytorch_rootdir" - pushd "$pytorch_rootdir" - git checkout "$PYTORCH_BRANCH" - popd -fi -pushd "$pytorch_rootdir" -git submodule update --init --recursive -echo "Using Pytorch from " -git --no-pager log --max-count 1 -popd - -# Windows builds need to install conda -if [[ "$(uname)" == 'Darwin' ]]; then - tmp_conda="${MAC_PACKAGE_WORK_DIR}/conda" - miniconda_sh="${MAC_PACKAGE_WORK_DIR}/miniconda.sh" - rm -rf "$tmp_conda" - rm -f "$miniconda_sh" - retry curl -sS https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh -o "$miniconda_sh" - chmod +x "$miniconda_sh" && \ - "$miniconda_sh" -b -p "$tmp_conda" && \ - rm "$miniconda_sh" - export PATH="$tmp_conda/bin:$PATH" - # TODO(huydhn): We can revert the pin after https://github.com/conda/conda-build/issues/5167 is resolved - retry conda install -yq conda-build=3.28.4 -elif [[ "$OSTYPE" == "msys" ]]; then - export tmp_conda="${WIN_PACKAGE_WORK_DIR}\\conda" - export miniconda_exe="${WIN_PACKAGE_WORK_DIR}\\miniconda.exe" - rm -rf "$tmp_conda" - rm -f "$miniconda_exe" - curl -sSk https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-Windows-x86_64.exe -o "$miniconda_exe" - "$SOURCE_DIR/install_conda.bat" && rm "$miniconda_exe" - pushd $tmp_conda - export PATH="$(pwd):$(pwd)/Library/usr/bin:$(pwd)/Library/bin:$(pwd)/Scripts:$(pwd)/bin:$PATH" - popd - retry conda install -yq conda-build=3.26.1 -fi - -cd "$SOURCE_DIR" - -# Determine which build folder to use -########################################################### -if [[ -n "$TORCH_CONDA_BUILD_FOLDER" ]]; then - build_folder="$TORCH_CONDA_BUILD_FOLDER" -else - if [[ "$OSTYPE" == 'darwin'* ]]; then - build_folder='pytorch' - elif [[ -n "$cpu_only" ]]; then - build_folder='pytorch-cpu' - else - build_folder="pytorch-$cuda_nodot" - fi - build_folder="$build_folder-$build_version" -fi -if [[ ! -d "$build_folder" ]]; then - echo "ERROR: Cannot find the build_folder: $build_folder" - exit 1 -fi -meta_yaml="$build_folder/meta.yaml" -echo "Using conda-build folder $build_folder" - -# Switch between CPU or CUDA configurations -########################################################### -build_string_suffix="$PYTORCH_BUILD_NUMBER" -if [[ -n "$cpu_only" ]]; then - export USE_CUDA=0 - export CONDA_CUDATOOLKIT_CONSTRAINT="" - export CONDA_TRITON_CONSTRAINT="" - export MAGMA_PACKAGE="" - export CUDA_VERSION="0.0" - export CUDNN_VERSION="0.0" - if [[ "$OSTYPE" != "darwin"* ]]; then - build_string_suffix="cpu_${build_string_suffix}" - fi - export PYTORCH_BUILD_VARIANT="cpu" -else - # Switch the CUDA version that /usr/local/cuda points to. This script also - # sets CUDA_VERSION and CUDNN_VERSION - echo "Switching to CUDA version $desired_cuda" - export PYTORCH_BUILD_VARIANT="cuda" - . ./switch_cuda_version.sh "$desired_cuda" - # TODO, simplify after anaconda fixes their cudatoolkit versioning inconsistency. - # see: https://github.com/conda-forge/conda-forge.github.io/issues/687#issuecomment-460086164 - if [[ "$desired_cuda" == "12.6" ]]; then - export CONDA_CUDATOOLKIT_CONSTRAINT=" - pytorch-cuda >=12.6,<12.7 # [not osx]" - export MAGMA_PACKAGE=" - magma-cuda126 # [not osx and not win]" - elif [[ "$desired_cuda" == "12.4" ]]; then - export CONDA_CUDATOOLKIT_CONSTRAINT=" - pytorch-cuda >=12.4,<12.5 # [not osx]" - export MAGMA_PACKAGE=" - magma-cuda124 # [not osx and not win]" - elif [[ "$desired_cuda" == "12.1" ]]; then - export CONDA_CUDATOOLKIT_CONSTRAINT=" - pytorch-cuda >=12.1,<12.2 # [not osx]" - export MAGMA_PACKAGE=" - magma-cuda121 # [not osx and not win]" - elif [[ "$desired_cuda" == "11.8" ]]; then - export CONDA_CUDATOOLKIT_CONSTRAINT=" - pytorch-cuda >=11.8,<11.9 # [not osx]" - export MAGMA_PACKAGE=" - magma-cuda118 # [not osx and not win]" - else - echo "unhandled desired_cuda: $desired_cuda" - exit 1 - fi - - if [[ "$OSTYPE" != "msys" ]]; then - # TODO: Remove me when Triton has a proper release channel - TRITON_VERSION=$(cat $pytorch_rootdir/.ci/docker/triton_version.txt) - if [[ -n "$OVERRIDE_PACKAGE_VERSION" && "$OVERRIDE_PACKAGE_VERSION" =~ .*dev.* ]]; then - TRITON_SHORTHASH=$(cut -c1-10 $pytorch_rootdir/.github/ci_commit_pins/triton.txt) - export CONDA_TRITON_CONSTRAINT=" - torchtriton==${TRITON_VERSION}+${TRITON_SHORTHASH} # [py < 313]" - else - export CONDA_TRITON_CONSTRAINT=" - torchtriton==${TRITON_VERSION} # [py < 313]" - fi - fi - - build_string_suffix="cuda${CUDA_VERSION}_cudnn${CUDNN_VERSION}_${build_string_suffix}" -fi - -# Some tricks for sccache with conda builds on Windows -if [[ "$OSTYPE" == "msys" && "$USE_SCCACHE" == "1" ]]; then - rm -rf /c/cb - mkdir -p /c/cb/pytorch_1000000000000 - export CONDA_BLD_PATH="C:\\cb" - export CONDA_BUILD_EXTRA_ARGS="--dirty" -else - export CONDA_BUILD_EXTRA_ARGS="" -fi - -# Build PyTorch with Gloo's TCP_TLS transport -if [[ "$(uname)" == 'Linux' ]]; then - export USE_GLOO_WITH_OPENSSL=1 - - # Defining and Setting _GLIBCXX_USE_CXX11_ABI flag in order to make sure we are setting - # -fabi-version=11 flag in the pytorch CMake lists - export _GLIBCXX_USE_CXX11_ABI=0 -fi - -# Loop through all Python versions to build a package for each -for py_ver in "${DESIRED_PYTHON[@]}"; do - # TODO: Enable TLS support for 3.12 builds (or disable it for the rest - if [[ "$(uname)" == 'Linux' && "${py_ver}" == '3.12' ]]; then - export USE_GLOO_WITH_OPENSSL=0 - fi - - build_string="py${py_ver}_${build_string_suffix}" - folder_tag="${build_string}_$(date +'%Y%m%d')" - - # Create the conda package into this temporary folder. This is so we can find - # the package afterwards, as there's no easy way to extract the final filename - # from conda-build - output_folder="out_$folder_tag" - rm -rf "$output_folder" - mkdir "$output_folder" - - # We need to build the compiler activation scripts first on Windows - if [[ "$OSTYPE" == "msys" ]]; then - vs_package="vs$VC_YEAR" - - time VSDEVCMD_ARGS=${VSDEVCMD_ARGS[@]} \ - conda build -c "$ANACONDA_USER" \ - --no-anaconda-upload \ - --output-folder "$output_folder" \ - $vs_package - - cp "$vs_package/conda_build_config.yaml" "pytorch-nightly/conda_build_config.yaml" - fi - - # Output the meta.yaml for easy debugging - echo 'Finalized meta.yaml is' - cat "$meta_yaml" - - # Build the package - echo "Build $build_folder for Python version $py_ver" - conda config --set anaconda_upload no - - if [[ "$OSTYPE" == "msys" ]]; then - # Don't run tests on windows (they were ignored mostly anyways) - NO_TEST="--no-test" - # Fow windows need to keep older conda version - conda install -y conda-package-handling conda==22.9.0 - else - conda install -y conda-package-handling conda==23.5.2 - fi - - echo "Calling conda-build at $(date)" - # TODO: Remove atalman channel once we can wend numpy from - # anaconda or pytorch or pytorch nightly channel - time CMAKE_ARGS=${CMAKE_ARGS[@]} \ - EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ - PYTORCH_GITHUB_ROOT_DIR="$pytorch_rootdir" \ - PYTORCH_BUILD_STRING="$build_string" \ - PYTORCH_MAGMA_CUDA_VERSION="$cuda_nodot" \ - conda build -c "$ANACONDA_USER" -c atalman \ - ${NO_TEST:-} \ - --no-anaconda-upload \ - --python "$py_ver" \ - --output-folder "$output_folder" \ - --no-test $CONDA_BUILD_EXTRA_ARGS \ - "$build_folder" - echo "Finished conda-build at $(date)" - - # Create a new environment to test in - # TODO these reqs are hardcoded for pytorch-nightly - test_env="env_$folder_tag" - retry conda create -yn "$test_env" python="$py_ver" - source activate "$test_env" - - # Extract the package for testing - ls -lah "$output_folder" - built_package="$(find $output_folder/ -name '*pytorch*.tar.bz2')" - # Set correct platform for cross compiled package - if [[ -n "$CROSS_COMPILE_ARM64" ]]; then - conda convert "$built_package" -p osx-arm64 -f --output-dir "$output_folder" - built_package="$(find $output_folder/osx-arm64 -name '*pytorch*.tar.bz2')" - fi - - # Copy the built package to the host machine for persistence before testing - if [[ -n "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then - mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true - cp "$built_package" "$PYTORCH_FINAL_PACKAGE_DIR/" - fi - - # Install the built package and run tests, unless it's for mac cross compiled arm64 - if [[ -z "$CROSS_COMPILE_ARM64" ]]; then - # Install the package as if from local repo instead of tar.bz2 directly in order - # to trigger runtime dependency installation. See https://github.com/conda/conda/issues/1884 - # Notes: - # - pytorch-nightly is included to install torchtriton - # - nvidia is included for cuda builds, there's no harm in listing the channel for cpu builds - if [[ "$OSTYPE" == "msys" ]]; then - # note the extra slash: `pwd -W` returns `c:/path/to/dir`, we need to add an extra slash for the URI - local_channel="/$(pwd -W)/$output_folder" - else - local_channel="$(pwd)/$output_folder" - fi - - CONDA_CHANNEL="pytorch-test" - if [[ -n "$OVERRIDE_PACKAGE_VERSION" && "$OVERRIDE_PACKAGE_VERSION" =~ .*dev.* ]]; then - CONDA_CHANNEL="pytorch-nightly" - fi - - conda install -y -c "file://$local_channel" pytorch==$PYTORCH_BUILD_VERSION -c pytorch -c numba/label/dev -c $CONDA_CHANNEL -c nvidia - - echo "$(date) :: Running tests" - pushd "$pytorch_rootdir" - if [[ "$cpu_only" == 1 ]]; then - "${SOURCE_DIR}/../run_tests.sh" 'conda' "$py_ver" 'cpu' - else - "${SOURCE_DIR}/../run_tests.sh" 'conda' "$py_ver" "cu$cuda_nodot" - fi - popd - echo "$(date) :: Finished tests" - fi - - # Clean up test folder - source deactivate - conda env remove -yn "$test_env" - rm -rf "$output_folder" -done - -# Cleanup the tricks for sccache with conda builds on Windows -if [[ "$OSTYPE" == "msys" ]]; then - # Please note sometimes we get Device or resource busy during - # this cleanup step. We don't want to fail the build because of this - # hence adding +e, -e around the cleanup step - set +e - rm -rf /c/cb/pytorch_1000000000000 - set -e - unset CONDA_BLD_PATH -fi -unset CONDA_BUILD_EXTRA_ARGS - -unset PYTORCH_BUILD_VERSION -unset PYTORCH_BUILD_NUMBER diff --git a/conda/cpuonly/meta.yaml b/conda/cpuonly/meta.yaml deleted file mode 100644 index c7d5a54dc..000000000 --- a/conda/cpuonly/meta.yaml +++ /dev/null @@ -1,12 +0,0 @@ -package: - name: cpuonly - version: 2.0 - -build: - track_features: - - cpuonly - noarch: generic - -requirements: - run: - - pytorch-mutex 1.0 cpu diff --git a/conda/debugging_pytorch.sh b/conda/debugging_pytorch.sh deleted file mode 100644 index 4cce4f225..000000000 --- a/conda/debugging_pytorch.sh +++ /dev/null @@ -1,22 +0,0 @@ -# builds inside docker image for debugging -# also see setup_ccache.sh - -# copied from pytorch-0.4.1/build.sh -export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;6.1;7.0" -export TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -export NCCL_ROOT_DIR=/usr/local/cuda -export TH_BINARY_BUILD=1 -export USE_STATIC_CUDNN=1 -export USE_STATIC_NCCL=1 -export ATEN_STATIC_CUDA=1 -export USE_CUDA_STATIC_LINK=1 - -. ./switch_cuda_version.sh 9.0 - - -conda install -y cmake numpy=1.17 setuptools pyyaml mkl=2018 mkl-include typing_extension ninja magma-cuda80 -c pytorch - -export CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" -git clone https://github.com/pytorch/pytorch -b nightly2 --recursive -cd pytorch -python setup.py install diff --git a/conda/nccl2/meta.yaml b/conda/nccl2/meta.yaml deleted file mode 100644 index 72a2bfe4c..000000000 --- a/conda/nccl2/meta.yaml +++ /dev/null @@ -1,7 +0,0 @@ -package: - name: nccl2 - version: 1.0 - -build: - track_features: - - nccl2 diff --git a/conda/nnpack/build.sh b/conda/nnpack/build.sh deleted file mode 100644 index 4b611438b..000000000 --- a/conda/nnpack/build.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!bin/bash - -mkdir build -cd build -cmake .. -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=true -make -j$(getconf _NPROCESSORS_CONF) -make install -cd .. - -# These are the files we actually care about. If we wanted to -# isolate them, we could make install into a different location -# and then copy them into $PREFIX - -# cp -p include/nnpack.h $PREFIX/include -# cp -p lib/libnnpack.a $PREFIX/lib -# cp -p lib/libpthreadpool.a $PREFIX/lib -# cp -p deps/pthreadpool/include/pthreadpool.h $PREFIX/include diff --git a/conda/nnpack/meta.yaml b/conda/nnpack/meta.yaml deleted file mode 100644 index d77081486..000000000 --- a/conda/nnpack/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -package: - name: nnpack - version: "0.0.1" - -# 2018-01-02 -source: - git_url: https://github.com/Maratyszcza/NNPACK.git - git_rev: 73f278cb28a44b59b0b4e2b83f3a47554aeabae0 - -build: - number: 1 - script_env: - - http_proxy - - https_proxy - -about: - home: https://github.com/Maratyszcza/NNPACK - license: BSD-2-Clause - license_file: LICENSE diff --git a/conda/publish_conda.sh b/conda/publish_conda.sh deleted file mode 100755 index a3f6ae8c3..000000000 --- a/conda/publish_conda.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -#set -e - -# Publish conda packages from pytorch-nightly to pytorch - -if [[ -z "$1" ]]; then - echo "Usage ./publish_conda.sh torchaudio==0.3.0" - exit 1 -fi - -export PLATFORMS=('linux-64' 'osx-64') - -for platform in "${PLATFORMS[@]}"; do - for url in $(conda search --platform "$platform" "$1[channel=pytorch-nightly]" --json | jq -r '.[][].url'); do - echo "$url" - file="$(basename "$url")" - curl -L -o "$file" "$url" - anaconda upload -u pytorch "$file" - done -done diff --git a/conda/pytorch-cuda/conda_build_config.yaml b/conda/pytorch-cuda/conda_build_config.yaml deleted file mode 100644 index b3c3453b5..000000000 --- a/conda/pytorch-cuda/conda_build_config.yaml +++ /dev/null @@ -1,8 +0,0 @@ -version: - - 11.7 - - 11.8 - - 12.1 - - 12.4 -target_platform: - - win-64 - - linux-64 diff --git a/conda/pytorch-cuda/meta.yaml b/conda/pytorch-cuda/meta.yaml deleted file mode 100644 index 063a350e1..000000000 --- a/conda/pytorch-cuda/meta.yaml +++ /dev/null @@ -1,85 +0,0 @@ -# Package to manage cuda version in PyTorch. -# -# Windows anaconda packages are packaged differently, -# All dlls are kept within *-dev packages hence we need -# include the dev packages for Windows see: -# https://github.com/pytorch/vision/issues/7185#issuecomment-1420002413 -# -# Please note: Build number should be advanced with -# every deployment. After the deployment to production -# use following links to validate the correctness of -# deployment: -# https://conda.anaconda.org/pytorch/noarch/ -# https://conda.anaconda.org/pytorch/noarch/repodata.json -{% set build = 7 %} -{% set cuda_constraints=">=11.7,<11.8" %} -{% set libcufft_constraints=">=10.7.2.50,<10.9.0.58" %} -{% set libcublas_constraints=">=11.10.1.25,<11.11.3.6" %} -{% set libcusolver_constraints=">=11.3.5.50,<11.4.1.48" %} -{% set libcusparse_constraints=">=11.7.3.50,<11.7.5.86" %} -{% set libnpp_constraints=">=11.7.3.21,<11.8.0.86" %} -{% set libnvjpeg_constraints=">=11.7.2.34,<11.9.0.86" %} -{% if version == '11.8' %} -{% set cuda_constraints=">=11.8,<12.0" %} -{% set libcufft_constraints=">=10.9.0.58,<11.0.0.21" %} -{% set libcublas_constraints=">=11.11.3.6,<12.0.1.189" %} -{% set libcusolver_constraints=">=11.4.1.48,<11.4.2.57" %} -{% set libcusparse_constraints=">=11.7.5.86,<12.0.0.76" %} -{% set libnpp_constraints=">=11.8.0.86,<12.0.0.30" %} -{% set libnvjpeg_constraints=">=11.9.0.86,<12.0.0.28" %} -{% elif version == '12.1' %} -{% set cuda_constraints=">=12.1,<12.2" %} -{% set libcufft_constraints=">=11.0.2.4,<11.0.2.54" %} -{% set libcublas_constraints=">=12.1.0.26,<12.1.3.1" %} -{% set libcusolver_constraints=">=11.4.4.55,<11.4.5.107" %} -{% set libcusparse_constraints=">=12.0.2.55,<12.1.0.106" %} -{% set libnpp_constraints=">=12.0.2.50,<12.1.0.40" %} -{% set libnvjpeg_constraints=">=12.1.0.39,<12.2.0.2" %} -{% set libnvjitlink_constraints=">=12.1.105,<12.2.0" %} -{% elif version == '12.4' %} -{% set cuda_constraints=">=12.4,<12.5" %} -{% set libcufft_constraints=">=11.2.1.3,<11.2.3.18" %} -{% set libcublas_constraints=">=12.4.5.8,<12.5.2.13" %} -{% set libcusolver_constraints=">=11.6.1.9,<11.6.2.40" %} -{% set libcusparse_constraints=">=12.3.1.170,<12.4.1.18" %} -{% set libnpp_constraints=">=12.2.5.30,<12.3.0.116" %} -{% set libnvjpeg_constraints=">=12.3.1.117,<12.3.2.38" %} -{% set libnvjitlink_constraints=">=12.4.127,<12.5.40" %} -{% endif %} - -package: - name: pytorch-cuda - version: {{ version }} -build: - number: {{ build }} -requirements: - run: - - cuda-libraries {{ cuda_constraints }} - - cuda-nvtx {{ cuda_constraints }} - - libnvjpeg {{ libnvjpeg_constraints }} - - cuda-cudart {{ cuda_constraints }} - - cuda-cupti {{ cuda_constraints }} - - cuda-nvrtc {{ cuda_constraints }} - - cuda-runtime {{ cuda_constraints }} - - libcufft {{ libcufft_constraints }} - - libcublas {{ libcublas_constraints }} - - libcusolver {{ libcusolver_constraints }} - - libcusparse {{ libcusparse_constraints }} -{% if version >= '12.1' %} - - libnvjitlink {{ libnvjitlink_constraints }} -{% endif %} - - libnpp {{ libnpp_constraints }} - - cuda-libraries-dev {{ cuda_constraints }} # [win64] - - libnvjpeg-dev {{ libnvjpeg_constraints }} # [win64] - - cuda-cudart-dev {{ cuda_constraints }} # [win64] - - cuda-nvrtc-dev {{ cuda_constraints }} # [win64] - - libcufft-dev {{ libcufft_constraints }} # [win64] - - libcublas-dev {{ libcublas_constraints }} # [win64] - - libcusolver-dev {{ libcusolver_constraints }} # [win64] - - libcusparse-dev {{ libcusparse_constraints }} # [win64] - - libnpp-dev {{ libnpp_constraints }} # [win64] -test: - commands: - - echo "pytorch-cuda metapackage is created." -about: - summary: Metapackage to select the PyTorch variant. Use conda's pinning mechanism in your environment to control which variant you want. diff --git a/conda/pytorch-mutex/conda_build_config.yaml b/conda/pytorch-mutex/conda_build_config.yaml deleted file mode 100644 index e18e53eb4..000000000 --- a/conda/pytorch-mutex/conda_build_config.yaml +++ /dev/null @@ -1,3 +0,0 @@ -build_variant: - - cpu - - cuda diff --git a/conda/pytorch-mutex/meta.yaml b/conda/pytorch-mutex/meta.yaml deleted file mode 100644 index fa7f916ec..000000000 --- a/conda/pytorch-mutex/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{% set version = "1.0" %} -{% set build = 0 %} - -{% if build_variant == 'cuda' %} -# prefer cuda builds via a build number offset -{% set build = build + 100 %} -{% endif %} - -package: - name: pytorch-mutex - version: {{ version }} -build: - number: {{ build }} - string: {{ build_variant }} - noarch: generic - # also lower cpu priority with track_features - {% if build_variant == 'cpu' %} - track_features: - - pytorch-mutex - {% endif %} - run_exports: - - {{ pin_subpackage('pytorch-mutex', exact=True) }} -requirements: {} - # None, pytorch should depend on pytorch-mutex -test: - commands: - - echo "pytorch-mutex metapackage is created." -about: - summary: Metapackage to select the PyTorch variant. Use conda's pinning mechanism in your environment to control which variant you want. diff --git a/conda/pytorch-nightly/bld.bat b/conda/pytorch-nightly/bld.bat deleted file mode 100644 index 78d108816..000000000 --- a/conda/pytorch-nightly/bld.bat +++ /dev/null @@ -1,111 +0,0 @@ -@echo off - -set TH_BINARY_BUILD=1 -set PYTORCH_BUILD_VERSION=%PKG_VERSION% -set PYTORCH_BUILD_NUMBER=%PKG_BUILDNUM% - -set INSTALL_TEST=0 - -if "%USE_CUDA%" == "0" ( - set build_with_cuda= -) else ( - set build_with_cuda=1 - set desired_cuda=%CUDA_VERSION% - :: Set up nodot version for use with magma - set desired_cuda_nodot=%CUDA_VERSION:.=% -) - -if "%build_with_cuda%" == "" goto cuda_flags_end - -set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%desired_cuda% -set CUDA_BIN_PATH=%CUDA_PATH%\bin -set TORCH_NVCC_FLAGS=-Xfatbin -compress-all -set TORCH_CUDA_ARCH_LIST=5.0;6.0;6.1;7.0;7.5;8.0;8.6;9.0 -if "%desired_cuda%" == "11.8" ( - set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;3.7+PTX - set TORCH_NVCC_FLAGS=-Xfatbin -compress-all --threads 2 -) -if "%desired_cuda:~0,3%" == "12." ( - set TORCH_NVCC_FLAGS=-Xfatbin -compress-all --threads 2 -) - -:cuda_flags_end - -set DISTUTILS_USE_SDK=1 - -set libuv_ROOT=%PREFIX%\Library -echo libuv_ROOT=%libuv_ROOT% - -IF "%USE_SCCACHE%" == "1" ( - mkdir %SRC_DIR%\tmp_bin - curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output %SRC_DIR%\tmp_bin\sccache.exe - curl -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output %SRC_DIR%\tmp_bin\sccache-cl.exe - set "PATH=%SRC_DIR%\tmp_bin;%PATH%" - set SCCACHE_IDLE_TIMEOUT=1500 -) - -IF "%build_with_cuda%" == "" goto cuda_end - -set MAGMA_VERSION=2.5.4 - -curl https://s3.amazonaws.com/ossci-windows/magma_%MAGMA_VERSION%_cuda%desired_cuda_nodot%_release.7z -k -O -7z x -aoa magma_%MAGMA_VERSION%_cuda%desired_cuda_nodot%_release.7z -omagma_cuda%desired_cuda_nodot%_release -set MAGMA_HOME=%cd%\magma_cuda%desired_cuda_nodot%_release - -set "PATH=%CUDA_BIN_PATH%;%PATH%" - - -:: randomtemp is used to resolve the intermittent build error related to CUDA. -:: code: https://github.com/peterjc123/randomtemp-rust -:: issue: https://github.com/pytorch/pytorch/issues/25393 -:: -:: CMake requires a single command as CUDA_NVCC_EXECUTABLE, so we push the wrappers -:: randomtemp.exe and sccache.exe into a batch file which CMake invokes. -curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.4/randomtemp.exe --output %SRC_DIR%\tmp_bin\randomtemp.exe -echo @"%SRC_DIR%\tmp_bin\randomtemp.exe" "%SRC_DIR%\tmp_bin\sccache.exe" "%CUDA_PATH%\bin\nvcc.exe" %%* > "%SRC_DIR%/tmp_bin/nvcc.bat" -cat %SRC_DIR%/tmp_bin/nvcc.bat -set CUDA_NVCC_EXECUTABLE=%SRC_DIR%/tmp_bin/nvcc.bat -:: CMake doesn't accept back-slashes in the path -for /F "usebackq delims=" %%n in (`cygpath -m "%CUDA_PATH%\bin\nvcc.exe"`) do set CMAKE_CUDA_COMPILER=%%n -set CMAKE_CUDA_COMPILER_LAUNCHER=%SRC_DIR%\tmp_bin\randomtemp.exe;%SRC_DIR%\tmp_bin\sccache.exe - -:cuda_end - -set CMAKE_GENERATOR=Ninja - -IF NOT "%USE_SCCACHE%" == "1" goto sccache_end - -set SCCACHE_IDLE_TIMEOUT=0 - -sccache --stop-server -sccache --start-server -sccache --zero-stats - -set CC=sccache-cl -set CXX=sccache-cl - -:sccache_end - -python setup.py install -if errorlevel 1 exit /b 1 - -IF "%USE_SCCACHE%" == "1" ( - sccache --show-stats - taskkill /im sccache.exe /f /t || ver > nul - taskkill /im nvcc.exe /f /t || ver > nul -) - -if NOT "%build_with_cuda%" == "" ( - copy "%CUDA_BIN_PATH%\cudnn*64_*.dll*" %SP_DIR%\torch\lib - copy "%NVTOOLSEXT_PATH%\bin\x64\nvToolsExt64_*.dll*" %SP_DIR%\torch\lib - :: cupti library file name changes aggressively, bundle it to avoid - :: potential file name mismatch. - copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" %SP_DIR%\torch\lib - - ::copy zlib if it exist in windows/system32 - if exist "C:\Windows\System32\zlibwapi.dll" ( - copy "C:\Windows\System32\zlibwapi.dll" %SP_DIR%\torch\lib - ) -) - -exit /b 0 diff --git a/conda/pytorch-nightly/build.sh b/conda/pytorch-nightly/build.sh deleted file mode 100755 index c5426f799..000000000 --- a/conda/pytorch-nightly/build.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env bash -set -ex - -export CMAKE_LIBRARY_PATH=$PREFIX/lib:$PREFIX/include:$CMAKE_LIBRARY_PATH -export CMAKE_PREFIX_PATH=$PREFIX -export TH_BINARY_BUILD=1 # links CPU BLAS libraries thrice in a row (was needed for some MKL static linkage) -export PYTORCH_BUILD_VERSION=$PKG_VERSION -export PYTORCH_BUILD_NUMBER=$PKG_BUILDNUM -export USE_LLVM="/opt/llvm_no_cxx11_abi" -export LLVM_DIR="$USE_LLVM/lib/cmake/llvm" -export PACKAGE_TYPE="conda" - -# set OPENSSL_ROOT_DIR=/opt/openssl if it exists -if [[ -e /opt/openssl ]]; then - export OPENSSL_ROOT_DIR=/opt/openssl - export CMAKE_INCLUDE_PATH="/opt/openssl/include":$CMAKE_INCLUDE_PATH -fi - -# Why do we disable Ninja when ninja is included in the meta.yaml? Well, using -# ninja in the conda builds leads to a system python2.7 library being called -# which leads to ascii decode errors when building third_party/onnx. Is the -# ninja n this conda env being picked up? We still need ninja in the meta.yaml -# for cpp_tests I believe though. TODO figure out what's going on here and fix -# it. It would be nice to use ninja in the builds of the conda binaries as well -export USE_NINJA=OFF -export INSTALL_TEST=0 # dont install test binaries into site-packages - -# MacOS build is simple, and will not be for CUDA -if [[ "$OSTYPE" == "darwin"* ]]; then - export USE_LLVM=$CMAKE_PREFIX_PATH - export LLVM_DIR=$USE_LLVM/lib/cmake/llvm - MACOSX_DEPLOYMENT_TARGET=11.0 \ - CXX=clang++ \ - CC=clang \ - python setup.py install - exit 0 -fi - -DEPS_LIST=() -# not needed if using conda's cudatoolkit package. Uncomment to statically link a new CUDA version that's not available in conda yet -# if [[ -n "$build_with_cuda" ]]; then -# cuda_majmin="$(echo $CUDA_VERSION | cut -f1,2 -d'.')" -# DEPS_LIST+=("/usr/local/cuda/lib64/libcudart.so.$cuda_majmin") -# DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1") -# DEPS_LIST+=("/usr/local/cuda/lib64/libnvrtc.so.$cuda_majmin") -# DEPS_LIST+=("/usr/local/cuda/lib64/libnvrtc-builtins.so") -# fi - - -if [[ -z "$USE_CUDA" || "$USE_CUDA" == 1 ]]; then - build_with_cuda=1 -fi -if [[ -n "$build_with_cuda" ]]; then - export TORCH_NVCC_FLAGS="-Xfatbin -compress-all" - TORCH_CUDA_ARCH_LIST="5.0;6.0;6.1;7.0;7.5;8.0;8.6" - export USE_STATIC_CUDNN=0 # link with cudnn dynamically - export USE_CUSPARSELT=1 # link with cusparselt - - if [[ $CUDA_VERSION == 11.8* ]]; then - TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;3.7+PTX;9.0" - #for cuda 11.8 include all dynamic loading libraries - DEPS_LIST=(/usr/local/cuda/lib64/libcudnn*.so.9 /usr/local/cuda-11.8/extras/CUPTI/lib64/libcupti.so.11.8 /usr/local/cuda/lib64/libcusparseLt.so.0) - elif [[ $CUDA_VERSION == 12.1* || $CUDA_VERSION == 12.4* || $CUDA_VERSION == 12.6* ]]; then - # cuda 12 does not support sm_3x - TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;9.0" - # for cuda 12.1 (12.4) we use cudnn 9.1 and include all dynamic loading libraries - DEPS_LIST=(/usr/local/cuda/lib64/libcudnn*.so.9 /usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12 /usr/local/cuda/lib64/libcusparseLt.so.0) - fi - if [[ -n "$OVERRIDE_TORCH_CUDA_ARCH_LIST" ]]; then - TORCH_CUDA_ARCH_LIST="$OVERRIDE_TORCH_CUDA_ARCH_LIST" - fi - export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST" - export NCCL_ROOT_DIR=/usr/local/cuda - export USE_STATIC_NCCL=1 # links nccl statically (driven by tools/setup_helpers/nccl.py, some of the NCCL cmake files such as FindNCCL.cmake and gloo/FindNCCL.cmake) - - # not needed if using conda's cudatoolkit package. Uncomment to statically link a new CUDA version that's not available in conda yet - # export ATEN_STATIC_CUDA=1 # links ATen / libcaffe2_gpu.so with static CUDA libs, also sets up special cufft linkage - # export USE_CUDA_STATIC_LINK=1 # links libcaffe2_gpu.so with static CUDA libs. Likely both these flags can be de-duplicated -fi - -fname_with_sha256() { - HASH=$(sha256sum $1 | cut -c1-8) - DIRNAME=$(dirname $1) - BASENAME=$(basename $1) - if [[ $BASENAME == "libnvrtc-builtins.so" || $BASENAME == "libcudnn"* ]]; then - echo $1 - else - INITNAME=$(echo $BASENAME | cut -f1 -d".") - ENDNAME=$(echo $BASENAME | cut -f 2- -d".") - echo "$DIRNAME/$INITNAME-$HASH.$ENDNAME" - fi -} - - - -# install -python setup.py install - -# copy over needed dependent .so files over and tag them with their hash -patched=() -for filepath in "${DEPS_LIST[@]}"; do - filename=$(basename $filepath) - destpath=$SP_DIR/torch/lib/$filename - cp $filepath $destpath - - patchedpath=$(fname_with_sha256 $destpath) - patchedname=$(basename $patchedpath) - if [[ "$destpath" != "$patchedpath" ]]; then - mv $destpath $patchedpath - fi - - patched+=("$patchedname") - echo "Copied $filepath to $patchedpath" -done - -# run patchelf to fix the so names to the hashed names -for ((i=0;i<${#DEPS_LIST[@]};++i)); do - find $SP_DIR/torch -name '*.so*' | while read sofile; do - origname="$(basename ${DEPS_LIST[i]})" - patchedname=${patched[i]} - set +e - patchelf --print-needed $sofile | grep $origname 2>&1 >/dev/null - ERRCODE=$? - set -e - if [ "$ERRCODE" -eq "0" ]; then - echo "patching $sofile entry $origname to $patchedname" - patchelf --replace-needed $origname $patchedname $sofile - fi - done -done - -# set RPATH of _C.so and similar to $ORIGIN, $ORIGIN/lib and conda/lib -find $SP_DIR/torch -name "*.so*" -maxdepth 1 -type f | while read sofile; do - echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../..' - patchelf --set-rpath '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../..' --force-rpath \ - $sofile - patchelf --print-rpath $sofile -done - -# set RPATH of lib/ files to $ORIGIN and conda/lib -find $SP_DIR/torch/lib -name "*.so*" -maxdepth 1 -type f | while read sofile; do - echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/../../../..' - patchelf --set-rpath '$ORIGIN:$ORIGIN/../../../..' --force-rpath $sofile - patchelf --print-rpath $sofile -done diff --git a/conda/pytorch-nightly/conda_build_config.yaml b/conda/pytorch-nightly/conda_build_config.yaml deleted file mode 100644 index 5188bb0eb..000000000 --- a/conda/pytorch-nightly/conda_build_config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -blas_impl: - - mkl # [x86_64] -c_compiler: - - vs2017 # [win] -cxx_compiler: - - vs2017 # [win] -python: - - 3.5 - - 3.6 -# This differs from target_platform in that it determines what subdir the compiler -# will target, not what subdir the compiler package will be itself. -# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32 -# code on win-64 miniconda. -cross_compiler_target_platform: - - win-64 # [win] -target_platform: - - win-64 # [win] -vc: - - 14 -zip_keys: - - # [win] - - vc # [win] - - c_compiler # [win] - - cxx_compiler # [win] diff --git a/conda/pytorch-nightly/meta.yaml b/conda/pytorch-nightly/meta.yaml deleted file mode 100644 index bbbce1bbf..000000000 --- a/conda/pytorch-nightly/meta.yaml +++ /dev/null @@ -1,110 +0,0 @@ -{% set build_variant = environ.get('PYTORCH_BUILD_VARIANT', 'cuda') %} -{% set cross_compile_arm64 = environ.get('CROSS_COMPILE_ARM64', 0) %} - -package: - name: pytorch - version: "{{ environ.get('PYTORCH_BUILD_VERSION') }}" - -source: - path: "{{ environ.get('PYTORCH_GITHUB_ROOT_DIR') }}" - -requirements: - build: - - cmake - - {{ compiler('c') }} # [win] - - pkg-config # [unix] - - libuv # [unix] - - host: - - python - - setuptools=72.1.0 - - pyyaml - {% if cross_compile_arm64 == 0 %} - - mkl-include # [x86_64] - - mkl=2020.2 # [py <= 311 and x86_64 and not win] - - mkl=2023.1 # [py >= 312 and x86_64] - - mkl-devel=2023.1 # [x86_64 and win] - {% endif %} - - typing_extensions - - ninja - - libuv # [win] - - numpy=1.22.3 # [py == 38] - - numpy=2.0.1 # [py >= 39] - - openssl=1.1.1l # [py >= 38 and py <= 310 and linux] - - openssl=1.1.1s # [py == 311 and linux] - - openssl=3.0.12 # [py >= 312 and linux] -{{ environ.get('PYTORCH_LLVM_PACKAGE', ' - llvmdev=9') }} -{{ environ.get('MAGMA_PACKAGE', '') }} - - run: - - python - {% if cross_compile_arm64 == 0 %} - - mkl >=2018 # [x86_64 and not win] - - mkl=2023.1 # [x86_64 and win] - {% endif %} - - libuv # [win] - - intel-openmp # [win] - # llvm-openmp 16 leads to wrong processor affinity for fork child, see #99625. - # Before a decent fix, force llvm-openmp version <16. - - llvm-openmp <16 # [linux or arm64] - - typing_extensions - - sympy - - filelock - - networkx - - jinja2 - - pyyaml - {% if cross_compile_arm64 == 0 %} - - blas * mkl # [x86_64] - {% endif %} - - pytorch-mutex 1.0 {{ build_variant }} # [not osx ] -{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT', '') }} -{{ environ.get('CONDA_TRITON_CONSTRAINT', '') }} - - {% if build_variant == 'cpu' %} - run_constrained: - - cpuonly - {% elif not osx %} - run_constrained: - - cpuonly <0 - {% endif %} - -build: - number: {{ environ.get('PYTORCH_BUILD_NUMBER', '1') }} - detect_binary_files_with_prefix: False - string: "{{ environ.get('PYTORCH_BUILD_STRING') }}" - script_env: - - BUILD_SPLIT_CUDA - - CUDA_VERSION - - CUDNN_VERSION - - CONDA_CUDATOOLKIT_CONSTRAINT - - CONDA_TRITON_CONSTRAINT - - USE_CUDA - - CMAKE_ARGS - - EXTRA_CAFFE2_CMAKE_FLAGS - - DEVELOPER_DIR - - DEBUG - - USE_FBGEMM - - USE_GLOO_WITH_OPENSSL # [unix] - - USE_SCCACHE # [win] - - USE_DISTRIBUTED # [unix] - - CMAKE_OSX_ARCHITECTURES # [unix] - - USE_MKLDNN # [unix] - - USE_NNPACK # [unix] - - USE_QNNPACK # [unix] - - BUILD_TEST # [unix] - - USE_PYTORCH_METAL_EXPORT # [osx] - - USE_COREML_DELEGATE # [osx] - - _GLIBCXX_USE_CXX11_ABI # [unix] - - MAX_JOBS # [unix] - - OVERRIDE_TORCH_CUDA_ARCH_LIST - -test: - imports: - - torch - -about: - home: http://pytorch.org/ - license: BSD 3-Clause - license_family: BSD - license_file: LICENSE - summary: PyTorch is an optimized tensor library for deep learning using GPUs and CPUs. diff --git a/conda/setup_ccache.sh b/conda/setup_ccache.sh deleted file mode 100644 index 85ecbcffe..000000000 --- a/conda/setup_ccache.sh +++ /dev/null @@ -1,30 +0,0 @@ -# sets up ccache inside the docker image (when debugging) -if ! ls ~/ccache/bin/ccache -then - yum install -y automake autoconf - yum install -y asciidoc - mkdir -p ~/ccache - pushd /tmp - rm -rf ccache - git clone https://github.com/colesbury/ccache -b ccbin - pushd ccache - ./autogen.sh - ./configure - make install prefix=~/ccache - popd - popd - - mkdir -p ~/ccache/lib - mkdir -p ~/ccache/cuda - ln -s ~/ccache/bin/ccache ~/ccache/lib/cc - ln -s ~/ccache/bin/ccache ~/ccache/lib/c++ - ln -s ~/ccache/bin/ccache ~/ccache/lib/gcc - ln -s ~/ccache/bin/ccache ~/ccache/lib/g++ - ln -s ~/ccache/bin/ccache ~/ccache/cuda/nvcc - - ~/ccache/bin/ccache -M 25Gi -fi - -export PATH=~/ccache/lib:$PATH -export CUDA_NVCC_EXECUTABLE=~/ccache/cuda/nvcc -export CMAKE_CUDA_COMPILER_LAUNCHER=~/ccache/bin/ccache diff --git a/conda/switch_cuda_version.sh b/conda/switch_cuda_version.sh deleted file mode 100755 index afe3e1fdb..000000000 --- a/conda/switch_cuda_version.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -ex -o pipefail - -if [[ "$OSTYPE" == "msys" ]]; then - CUDA_DIR="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v$1" -else - CUDA_DIR="/usr/local/cuda-$1" -fi - -if ! ls "$CUDA_DIR" -then - echo "folder $CUDA_DIR not found to switch" -fi - -echo "Switching symlink to $CUDA_DIR" -mkdir -p /usr/local -rm -fr /usr/local/cuda -ln -s "$CUDA_DIR" /usr/local/cuda - -# Using nvcc instead of deducing from cudart version since it's unreliable (was 110 for cuda11.1 and 11.2) -CUDA_VERSION=$(nvcc --version | sed -n 4p | cut -f5 -d" " | cut -f1 -d",") -if [[ "$OSTYPE" == "msys" ]]; then - CUDNN_VERSION=$(find /usr/local/cuda/bin/cudnn64*.dll | head -1 | tr '._' ' ' | cut -d ' ' -f2) -else - CUDNN_VERSION=$(find /usr/local/cuda/lib64/libcudnn.so.* | sort | tac | head -1 | rev | cut -d"." -f -3 | rev) -fi -export CUDNN_VERSION - -ls -alh /usr/local/cuda - -echo "CUDA_VERSION=$CUDA_VERSION" -echo "CUDNN_VERSION=$CUDNN_VERSION" diff --git a/conda/vs2019/activate.bat b/conda/vs2019/activate.bat deleted file mode 100644 index 1967db270..000000000 --- a/conda/vs2019/activate.bat +++ /dev/null @@ -1,49 +0,0 @@ -:: Set env vars that tell distutils to use the compiler that we put on path -SET DISTUTILS_USE_SDK=1 -SET MSSdk=1 - -SET "VS_VERSION=16.0" -SET "VS_MAJOR=16" -SET "VS_YEAR=2019" - -set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out" -set "MSYS2_ENV_CONV_EXCL=CL" - -:: For Python 3.5+, ensure that we link with the dynamic runtime. See -:: http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info -set "PY_VCRUNTIME_REDIST=%PREFIX%\\bin\\vcruntime140.dll" - -if NOT "%VS15INSTALLDIR%" == "" if exist "%VS15INSTALLDIR%\VC\Auxiliary\Build\vcvarsall.bat" ( - set "VSINSTALLDIR=%VS15INSTALLDIR%\" - goto :vswhere -) - -for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [16^,17^) -property installationPath`) do ( - if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( - set "VSINSTALLDIR=%%i\" - goto :vswhere - ) -) - -:vswhere - -:: Shorten PATH to avoid the `input line too long` error. -SET MyPath=%PATH% - -setlocal EnableDelayedExpansion - -SET TempPath="%MyPath:;=";"%" -SET var= -FOR %%a IN (%TempPath%) DO ( - IF EXIST %%~sa ( - SET "var=!var!;%%~sa" - ) -) - -set "TempPath=!var:~1!" -endlocal & set "PATH=%TempPath%" - -:: Shorten current directory too -FOR %%A IN (.) DO CD "%%~sA" - -:: other things added by install_activate.bat at package build time diff --git a/conda/vs2019/conda_build_config.yaml b/conda/vs2019/conda_build_config.yaml deleted file mode 100644 index 358052ec0..000000000 --- a/conda/vs2019/conda_build_config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -blas_impl: - - mkl # [x86_64] -c_compiler: - - vs2019 # [win] -cxx_compiler: - - vs2019 # [win] -python: - - 3.5 - - 3.6 -# This differs from target_platform in that it determines what subdir the compiler -# will target, not what subdir the compiler package will be itself. -# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32 -# code on win-64 miniconda. -cross_compiler_target_platform: - - win-64 # [win] -target_platform: - - win-64 # [win] -vc: - - 14 -zip_keys: - - # [win] - - vc # [win] - - c_compiler # [win] - - cxx_compiler # [win] diff --git a/conda/vs2019/install_activate.bat b/conda/vs2019/install_activate.bat deleted file mode 100644 index 8981af80d..000000000 --- a/conda/vs2019/install_activate.bat +++ /dev/null @@ -1,24 +0,0 @@ -set YEAR=2019 -set VER=16 - -mkdir "%PREFIX%\etc\conda\activate.d" -copy "%RECIPE_DIR%\activate.bat" "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - -if "%cross_compiler_target_platform%" == "win-64" ( - set "target_platform=amd64" - echo set "CMAKE_GENERATOR=Visual Studio %VER% %YEAR% Win64" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - if "%VSDEVCMD_ARGS%" == "" ( - echo call "VC\Auxiliary\Build\vcvarsall.bat" x64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - ) else ( - echo call "VC\Auxiliary\Build\vcvarsall.bat" x64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - ) - echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" -) else ( - set "target_platform=x86" - echo set "CMAKE_GENERATOR=Visual Studio %VER% %YEAR%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - echo call "VC\Auxiliary\Build\vcvars32.bat" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" -) - diff --git a/conda/vs2019/install_runtime.bat b/conda/vs2019/install_runtime.bat deleted file mode 100644 index e09a5ccfb..000000000 --- a/conda/vs2019/install_runtime.bat +++ /dev/null @@ -1,49 +0,0 @@ -set VC_PATH=x86 -if "%ARCH%"=="64" ( - set VC_PATH=x64 -) - -set MSC_VER=2019 - -rem :: This should always be present for VC installed with VS. Not sure about VC installed with Visual C++ Build Tools 2015 -rem FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKEY_LOCAL_MACHINE\Software\Microsoft\DevDiv\VC\Servicing\14.0\IDE.x64" /v UpdateVersion`) DO ( -rem set SP=%%A -rem ) - -rem if not "%SP%" == "%PKG_VERSION%" ( -rem echo "Version detected from registry: %SP%" -rem echo "does not match version of package being built (%PKG_VERSION%)" -rem echo "Do you have current updates for VS 2015 installed?" -rem exit 1 -rem ) - - -REM ========== REQUIRES Win 10 SDK be installed, or files otherwise copied to location below! -robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%LIBRARY_BIN%" *.dll /E -robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%PREFIX%" *.dll /E -if %ERRORLEVEL% GEQ 8 exit 1 - -REM ========== This one comes from visual studio 2019 -set "VC_VER=142" - -for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [16^,17^) -property installationPath`) do ( - if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( - set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat" - goto :eof - ) -) - -@setlocal -call "%VS15VARSALL%" x64 - -set "REDIST_ROOT=%VCToolsRedistDir%%VC_PATH%" - -robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%LIBRARY_BIN%" *.dll /E -if %ERRORLEVEL% LSS 8 exit 0 -robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%PREFIX%" *.dll /E -if %ERRORLEVEL% LSS 8 exit 0 -robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%LIBRARY_BIN%" *.dll /E -if %ERRORLEVEL% LSS 8 exit 0 -robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%PREFIX%" *.dll /E -if %ERRORLEVEL% LSS 8 exit 0 -@endlocal diff --git a/conda/vs2019/meta.yaml b/conda/vs2019/meta.yaml deleted file mode 100644 index 94a0ed4db..000000000 --- a/conda/vs2019/meta.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set vcver="14.2" %} -{% set vcfeature="14" %} -{% set vsyear="2019" %} -{% set fullver="15.4.27004.2010" %} - -package: - name: vs{{ vsyear }} - version: {{ fullver }} - -build: - skip: True [not win] - script_env: - - VSDEVCMD_ARGS # [win] - -outputs: - - name: vs{{ vsyear }}_{{ cross_compiler_target_platform }} - script: install_activate.bat - track_features: - # VS 2019 is binary-compatible with VS 2017/vc 14.1 and 2015/vc14. Tools are "v142". - strong: - - vc{{ vcfeature }} - about: - summary: Activation and version verification of MSVC {{ vcver }} (VS {{ vsyear }}) compiler - license: BSD 3-clause diff --git a/conda/vs2022/activate.bat b/conda/vs2022/activate.bat deleted file mode 100644 index fe18f7723..000000000 --- a/conda/vs2022/activate.bat +++ /dev/null @@ -1,51 +0,0 @@ -:: Set env vars that tell distutils to use the compiler that we put on path -set DISTUTILS_USE_SDK=1 -set MSSdk=1 - -set "VS_VERSION=17.4" -set "VS_MAJOR=17" -set "VC_YEAR=2022" -set "VC_VERSION_LOWER=17" -set "VC_VERSION_UPPER=18" - -set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out" -set "MSYS2_ENV_CONV_EXCL=CL" - -:: For Python 3.5+, ensure that we link with the dynamic runtime. See -:: http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info -set "PY_VCRUNTIME_REDIST=%PREFIX%\\bin\\vcruntime143.dll" - -if not "%VS15INSTALLDIR%" == "" if exist "%VS15INSTALLDIR%\VC\Auxiliary\Build\vcvarsall.bat" ( - set "VSINSTALLDIR=%VS15INSTALLDIR%\" - goto :vswhere -) - -for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do ( - if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( - set "VSINSTALLDIR=%%i\" - goto :vswhere - ) -) - -:vswhere - -:: Shorten PATH to avoid the `input line too long` error. -set MyPath=%PATH% - -setlocal EnableDelayedExpansion - -set TempPath="%MyPath:;=";"%" -set var= -for %%a in (%TempPath%) do ( - if exist %%~sa ( - set "var=!var!;%%~sa" - ) -) - -set "TempPath=!var:~1!" -endlocal & set "PATH=%TempPath%" - -:: Shorten current directory too -for %%A in (.) do cd "%%~sA" - -:: other things added by install_activate.bat at package build time diff --git a/conda/vs2022/conda_build_config.yaml b/conda/vs2022/conda_build_config.yaml deleted file mode 100644 index e2a4de3c2..000000000 --- a/conda/vs2022/conda_build_config.yaml +++ /dev/null @@ -1,25 +0,0 @@ -blas_impl: - - mkl # [x86_64] -c_compiler: - - vs2022 # [win] -cxx_compiler: - - vs2022 # [win] -python: - - 3.8 - - 3.9 - - 3.10 -# This differs from target_platform in that it determines what subdir the compiler -# will target, not what subdir the compiler package will be itself. -# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32 -# code on win-64 miniconda. -cross_compiler_target_platform: - - win-64 # [win] -target_platform: - - win-64 # [win] -vc: - - 14 -zip_keys: - - # [win] - - vc # [win] - - c_compiler # [win] - - cxx_compiler # [win] diff --git a/conda/vs2022/install_activate.bat b/conda/vs2022/install_activate.bat deleted file mode 100644 index eb85767d6..000000000 --- a/conda/vs2022/install_activate.bat +++ /dev/null @@ -1,24 +0,0 @@ -set YEAR=2022 -set VER=17 - -mkdir "%PREFIX%\etc\conda\activate.d" -copy "%RECIPE_DIR%\activate.bat" "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - -if "%cross_compiler_target_platform%" == "win-64" ( - set "target_platform=amd64" - echo set "CMAKE_GENERATOR=Visual Studio %VER% %YEAR% Win64" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - if "%VSDEVCMD_ARGS%" == "" ( - echo call "VC\Auxiliary\Build\vcvarsall.bat" x64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - ) else ( - echo call "VC\Auxiliary\Build\vcvarsall.bat" x64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - ) - echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" -) else ( - set "target_platform=x86" - echo set "CMAKE_GENERATOR=Visual Studio %VER% %YEAR%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - echo call "VC\Auxiliary\Build\vcvars32.bat" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" - echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" -) - diff --git a/conda/vs2022/install_runtime.bat b/conda/vs2022/install_runtime.bat deleted file mode 100644 index bac684dae..000000000 --- a/conda/vs2022/install_runtime.bat +++ /dev/null @@ -1,49 +0,0 @@ -set VC_PATH=x86 -if "%ARCH%"=="64" ( - set VC_PATH=x64 -) - -set MSC_VER=2022 - -rem :: This should always be present for VC installed with VS. Not sure about VC installed with Visual C++ Build Tools 2015 -rem FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKEY_LOCAL_MACHINE\Software\Microsoft\DevDiv\VC\Servicing\14.0\IDE.x64" /v UpdateVersion`) DO ( -rem set SP=%%A -rem ) - -rem if not "%SP%" == "%PKG_VERSION%" ( -rem echo "Version detected from registry: %SP%" -rem echo "does not match version of package being built (%PKG_VERSION%)" -rem echo "Do you have current updates for VS 2015 installed?" -rem exit 1 -rem ) - - -REM ========== REQUIRES Win 10 SDK be installed, or files otherwise copied to location below! -robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%LIBRARY_BIN%" *.dll /E -robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%PREFIX%" *.dll /E -if %ERRORLEVEL% GEQ 8 exit 1 - -REM ========== This one comes from visual studio 2022 -set "VC_VER=143" - -for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [17^,18^) -property installationPath`) do ( - if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( - set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat" - goto :eof - ) -) - -@setlocal -call "%VS15VARSALL%" x64 - -set "REDIST_ROOT=%VCToolsRedistDir%%VC_PATH%" - -robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%LIBRARY_BIN%" *.dll /E -if %ERRORLEVEL% LSS 8 exit 0 -robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%PREFIX%" *.dll /E -if %ERRORLEVEL% LSS 8 exit 0 -robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%LIBRARY_BIN%" *.dll /E -if %ERRORLEVEL% LSS 8 exit 0 -robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%PREFIX%" *.dll /E -if %ERRORLEVEL% LSS 8 exit 0 -@endlocal diff --git a/conda/vs2022/meta.yaml b/conda/vs2022/meta.yaml deleted file mode 100644 index 184c4c32d..000000000 --- a/conda/vs2022/meta.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set vcver="17.4" %} -{% set vcfeature="17" %} -{% set vsyear="2022" %} -{% set fullver="17.4.33110.190" %} - -package: - name: vs{{ vsyear }} - version: {{ fullver }} - -build: - skip: True [not win] - script_env: - - VSDEVCMD_ARGS # [win] - -outputs: - - name: vs{{ vsyear }}_{{ cross_compiler_target_platform }} - script: install_activate.bat - track_features: - # VS 2022 is binary-compatible with VS 2019/vc 14.2, VS 2017/vc 14.1 and 2015/vc14. Tools are "v143". - strong: - - vc{{ vcfeature }} - about: - summary: Activation and version verification of MSVC {{ vcver }} (VS {{ vsyear }}) compiler - license: BSD 3-clause