diff --git a/.github/workflows/check-pod.yaml b/.github/workflows/check-pod.yaml deleted file mode 100644 index cbae547af..000000000 --- a/.github/workflows/check-pod.yaml +++ /dev/null @@ -1,58 +0,0 @@ -name: Check Pod - -on: - pull_request: - push: - branches: - - main - -jobs: - check: - runs-on: macos-13 - steps: - - uses: actions/checkout@v2 - - - name: Select Specific Xcode Version (15.1) - run: | - sudo xcode-select -s /Applications/Xcode_15.1.app - echo "Selected Xcode version:" - xcodebuild -version - - # Run the steps we document in the Release Process. - # unzip commands included as proof-of-life for the Carthage output. - - name: Print Ruby version - run: ruby --version - - name: Print Carthage version - run: 'echo -n "carthage version: " && carthage version' - - name: Print CocoaPods version - run: 'echo -n "pod version: " && pod --version --verbose' - - name: Print Make version - run: make --version - - name: Build Carthage dependencies - run: make update - - name: Build Ably framework - run: make carthage_package - - name: Print contents of generated ZIP file - run: | - unzip -l Ably.framework.zip - unzip -l Ably.framework.zip | grep 'Mac/Ably.framework' - unzip -l Ably.framework.zip | grep 'tvOS/Ably.framework' - unzip -l Ably.framework.zip | grep 'iOS/Ably.framework' - - name: Validate pod - run: pod lib lint - # We move Ably.framework.zip into a directory. This is because, by - # default, macOS’s Archive Utility unzips directly-nested zip files, so - # if Ably.framework.zip were at the top level of the zip file that - # actions/upload-artifact creates, then Archive Utility would unzip - # Ably.framework.zip too, which we don’t want, since we want this file - # to be kept intact so that we can upload it to GitHub releases as - # described in CONTRIBUTING.md. - - name: Prepare built framework for archiving - run: | - mkdir -p carthage-built-framework-artifact-contents/carthage-built-framework - mv Ably.framework.zip carthage-built-framework-artifact-contents/carthage-built-framework - - name: Archive built framework - uses: actions/upload-artifact@v3 - with: - name: carthage-built-framework - path: carthage-built-framework-artifact-contents diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index 081cd98fb..000000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Docs Generation - -on: - pull_request: - push: - branches: - - main - tags: - - '*' - -jobs: - build: - runs-on: macos-latest - - permissions: - deployments: write - id-token: write - - steps: - - uses: actions/checkout@v2 - - - name: Select Specific Xcode Version (15.1) - run: | - sudo xcode-select -s /Applications/Xcode_15.1.app - echo "Selected Xcode version:" - xcodebuild -version - - - name: Install Dependencies - run: | - make submodules - bundle install - make update_carthage_dependencies_macos - - - name: Build Documentation - run: | - ./Scripts/jazzy.sh - ls -al Docs/jazzy - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-region: eu-west-2 - role-to-assume: arn:aws:iam::${{ secrets.ABLY_AWS_ACCOUNT_ID_SDK }}:role/ably-sdk-builds-ably-cocoa - role-session-name: "${{ github.run_id }}-${{ github.run_number }}" - - - name: Upload Documentation - uses: ably/sdk-upload-action@v1 - with: - sourcePath: Docs/jazzy - githubToken: ${{ secrets.GITHUB_TOKEN }} - artifactName: jazzydoc - diff --git a/.github/workflows/examples.yaml b/.github/workflows/examples.yaml deleted file mode 100644 index d418daddb..000000000 --- a/.github/workflows/examples.yaml +++ /dev/null @@ -1,69 +0,0 @@ -name: Examples Test - -on: - pull_request: - push: - branches: - - main - -jobs: - check: - runs-on: macos-13 - - env: - LC_CTYPE: en_US.UTF-8 - LANG: en_US.UTF-8 - ABLY_ENV: sandbox - - steps: - - name: Checkout repo - uses: actions/checkout@v2 - - - name: Select Xcode (15.1) - run: | - sudo xcode-select -s /Applications/Xcode_15.1.app - echo "Selected Xcode version:" - xcodebuild -version - - - name: Environment Info - run: ./Scripts/log-environment-information.sh - - - name: Reset Simulators - run: xcrun simctl erase all - - - name: Install Dependencies - run: | - make submodules - bundle install - make update_carthage_dependencies_ios - - - name: Run Examples Tests - working-directory: ./Examples/Tests - run: | - pod repo update - pod install - bundle exec fastlane scan -s Tests --output-directory "fastlane/test_output/examples/test_iOS17_2" - - - name: Carthage Installation - working-directory: ./Examples/AblyCarthage - run: | - echo 'Installing Carthage dependencies...' - carthage update --use-xcframeworks --platform iOS --no-use-binaries - echo 'Building AblyCarthage example...' - xcodebuild build -scheme "AblyCarthage" -destination "platform=iOS Simulator,name=iPhone 15" -configuration "Debug" - - - name: SPM Installation - working-directory: ./ - run: | - echo 'Current Branch: ' $GITHUB_HEAD_REF - echo 'Current Revision (SHA):' $GITHUB_SHA - echo Current Path: $(pwd) - export PACKAGE_URL=file://$(pwd) - export PACKAGE_BRANCH_NAME=$GITHUB_HEAD_REF - export PACKAGE_REVISION=$GITHUB_SHA - swift test --package-path Examples/SPM -v - - - name: Build APNS Example - working-directory: ./Examples/AblyPush - run: | - xcodebuild build -scheme "AblyPushExample" -destination "platform=iOS Simulator,name=iPhone 15" -configuration "Debug" diff --git a/.github/workflows/features.yml b/.github/workflows/features.yml deleted file mode 100644 index d1c123738..000000000 --- a/.github/workflows/features.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: Features - -on: - pull_request: - push: - branches: - - main - -jobs: - build: - uses: ably/features/.github/workflows/sdk-features.yml@main - with: - repository-name: ably-cocoa - secrets: inherit diff --git a/.github/workflows/integration-test-iOS17_2.yaml b/.github/workflows/integration-test-iOS17_2.yaml new file mode 100644 index 000000000..485df62e8 --- /dev/null +++ b/.github/workflows/integration-test-iOS17_2.yaml @@ -0,0 +1,70 @@ +name: "Integration Test: iOS 17.2" + +on: + pull_request: + push: + branches: + - main + +# IMPORTANT NOTES: +# - Changes made to this file needs to replicated across other integration-test-*.yaml files. +# - The Fastlane lane name is duplicated in more than one place within this workflow. + +jobs: + check: + runs-on: macos-13 + + env: + LC_CTYPE: en_US.UTF-8 + LANG: en_US.UTF-8 + ABLY_ENV: sandbox + + steps: + - name: Check out SDK repo + uses: actions/checkout@v2 + + - name: Select Specific Xcode Version (15.1) + run: | + sudo xcode-select -s /Applications/Xcode_15.1.app + echo "Selected Xcode version:" + xcodebuild -version + + - name: Log environment information + run: ./Scripts/log-environment-information.sh + + - name: Check out xcparse repo + uses: actions/checkout@v3 + with: + repository: ably-forks/xcparse + ref: emit-test-case-info + path: xcparse + + - id: get-xcparse-commit-sha + name: Get xcparse commit SHA + run: | + cd xcparse + echo "::set-output name=sha::$(git rev-parse HEAD)" + + - name: "actions/cache@v3 (xcparse binary)" + uses: actions/cache@v3 + with: + path: xcparse/.build/debug/xcparse + key: ${{ runner.os }}-xcparse-${{ steps.get-xcparse-commit-sha.outputs.sha }} + + - name: Install Dependencies and Run Tests Continuously + env: + TEST_OBSERVABILITY_SERVER_AUTH_KEY: ${{ secrets.TEST_OBSERVABILITY_SERVER_AUTH_KEY }} + run: | + brew install xcbeautify + brew install coreutils # for `timeout` + make submodules + bundle install + make update_carthage_dependencies_ios + Scripts/continuously-run-tests-and-upload-results.sh --lane test_iOS17_2 + + - name: Upload .xcresult bundles + uses: actions/upload-artifact@v3 + if: always() + with: + name: xcresult-bundles.tar.gz + path: xcresult-bundles.tar.gz diff --git a/.github/workflows/integration-test.yaml b/.github/workflows/integration-test.yaml deleted file mode 100644 index bbe44efbc..000000000 --- a/.github/workflows/integration-test.yaml +++ /dev/null @@ -1,119 +0,0 @@ -name: "Integration Test" - -on: - pull_request: - push: - branches: - - main - -# IMPORTANT NOTES: -# - Changes made to this file needs to replicated across other integration-test-*.yaml files. -# - The Fastlane lane name is duplicated in more than one place within this workflow. - -jobs: - check: - runs-on: macos-13 - - strategy: - fail-fast: false - matrix: - include: - - - platform: iOS - lane: test_iOS17_2 - - - platform: tvOS - lane: test_tvOS17_2 - - - platform: macOS - lane: test_macOS - - env: - LC_CTYPE: en_US.UTF-8 - LANG: en_US.UTF-8 - ABLY_ENV: sandbox - - steps: - - name: Check out repo - uses: actions/checkout@v2 - - - name: Select Xcode (15.1) - run: | - sudo xcode-select -s /Applications/Xcode_15.1.app - echo "Selected Xcode version:" - xcodebuild -version - - - name: Environment Info - run: ./Scripts/log-environment-information.sh - - - name: Check out xcparse repo - uses: actions/checkout@v3 - with: - repository: ably-forks/xcparse - ref: emit-test-case-info - path: xcparse - - - id: get-xcparse-commit-sha - name: Get xcparse commit SHA - run: | - cd xcparse - echo "::set-output name=sha::$(git rev-parse HEAD)" - - - name: "actions/cache@v3 (xcparse binary)" - uses: actions/cache@v3 - with: - path: xcparse/.build/debug/xcparse - key: ${{ runner.os }}-xcparse-${{ steps.get-xcparse-commit-sha.outputs.sha }} - - - name: Reset Simulators - run: xcrun simctl erase all - - - name: Install Dependencies - run: | - brew install xcbeautify - make submodules - bundle install - carthage update --use-xcframeworks --platform ${{ matrix.platform }} --no-use-binaries - - - name: Run Tests - run: bundle exec fastlane ${{ matrix.lane }} - - - name: Check Static Analyzer Output - id: analyzer-output - run: | - if [[ -z $(find ./derived_data -name "report-*.html") ]]; then - echo "Static Analyzer found no issues." - else - echo "Static Analyzer found some issues. HTML report will be available in Artifacts section. Failing build." - exit 1 - fi - - - name: Upload Static Analyzer Reports - if: ${{ failure() && steps.analyzer-output.outcome == 'failure' }} - uses: actions/upload-artifact@v2 - with: - name: static-analyzer-reports-${{ matrix.lane }} - path: ./derived_data/**/report-*.html - - - name: Upload Xcodebuild Logs - if: always() - uses: actions/upload-artifact@v2 - with: - name: xcodebuild-logs - path: ~/Library/Developer/Xcode/DerivedData/*/Logs - - - name: Upload Test Output - if: always() - uses: actions/upload-artifact@v2 - with: - name: test-output - path: fastlane/test_output - - - name: Upload Test Results - if: always() - env: - TEST_OBSERVABILITY_SERVER_AUTH_KEY: ${{ secrets.TEST_OBSERVABILITY_SERVER_AUTH_KEY }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - Scripts/upload_test_results.sh --job-name "check (${{ matrix.platform }}, ${{ matrix.lane }})" - diff --git a/Scripts/continuously-run-tests-and-upload-results.sh b/Scripts/continuously-run-tests-and-upload-results.sh new file mode 100755 index 000000000..2b9346109 --- /dev/null +++ b/Scripts/continuously-run-tests-and-upload-results.sh @@ -0,0 +1,166 @@ +#!/bin/bash + +set -e + +# 1. Check dependencies. + +if ! which timeout > /dev/null +then + echo "You need to install timeout (\`brew install coreutils\` on macOS)." 2>&1 + exit 1 +fi + +# 2. Grab command-line options. + +# https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash +while [[ "$#" -gt 0 ]]; do + case $1 in + -l|--lane) lane="$2"; shift ;; + -u|--upload-server-base-url) upload_server_base_url="$2"; shift ;; + *) echo "Unknown parameter passed: $1"; exit 1 ;; + esac + shift +done + +if [[ -z $lane ]] +then + echo "You need to specify the Fastlane lane to run (-l / --lane)." 2>&1 + exit 1 +fi + +# 3. Capture the time at which we started, to make sure we don’t exceed the +# maximum job running time. +started_at=`date +%s` +# https://docs.github.com/en/actions/learn-github-actions/usage-limits-billing-and-administration +let github_job_maximum_execution_seconds=6*60*60 +# We assume that the part of the job that ran before this script took at most 10 minutes, and that uploading the artifacts will take 30 minutes. +let must_end_by=$((started_at + github_job_maximum_execution_seconds - (10 + 30) * 60)) + +echo "We’ll make sure this script ends by `date -r${must_end_by}`." 2>&1 + +# 4. Run the tests in a loop and report the results. + +end_iteration_with_exit_value() { + if [[ -e xcresult-bundles ]] + then + echo "There are `du -d0 -h xcresult-bundles | awk -F '\t' '{print $1}'` of xcresult bundles to be uploaded." + tar --create --gzip xcresult-bundles > xcresult-bundles.tar.gz + echo "The file xcresult-bundles.tar.gz that will be uploaded as an artifact is `du -d0 -h xcresult-bundles.tar.gz | awk -F '\t' '{print $1}'`." + else + echo "There are no xcresult bundles to be uploaded." + fi + + exit $1 +} + +declare -i iteration=1 +while true +do + echo "BEGIN ITERATION ${iteration}" 2>&1 + + rm -rf fastlane/test_output + rm -rf xcodebuild_output + xcrun simctl erase all + + set +e + let allowed_execution_time=$must_end_by-`date +%s` + set -e + + if [[ $allowed_execution_time -le 0 ]]; then + echo "ITERATION ${iteration}: Allowed execution time reached. Exiting." 2>&1 + end_iteration_with_exit_value 0 + fi + + echo "ITERATION ${iteration}: Running fastlane with a timeout of ${allowed_execution_time} seconds." 2>&1 + + set +e + timeout --kill-after=20 ${allowed_execution_time} bundle exec fastlane --verbose $lane + tests_exit_value=$? + set -e + + if [[ tests_exit_value -eq 124 || tests_exit_value -eq 137 ]]; then + # Execution timed out. + echo "ITERATION ${iteration}: Cancelled the execution of fastlane since it exceeded timeout imposed by maximum GitHub running time. Terminating this script." + end_iteration_with_exit_value 0 + fi + + if [[ tests_exit_value -eq 0 ]] + then + echo "ITERATION ${iteration}: Tests passed." + else + echo "ITERATION ${iteration}: Tests failed (exit value ${tests_exit_value})." + fi + + echo "ITERATION ${iteration}: BEGIN xcodebuild raw output." + ls xcodebuild_output + cat xcodebuild_output/** + echo "ITERATION ${iteration}: END xcodebuild raw output." + + echo "ITERATION ${iteration}: Uploading results to observability server." + + # https://unix.stackexchange.com/questions/446847/conditionally-pass-params-to-a-script + optional_params=() + + if [[ ! -z $upload_server_base_url ]] + then + optional_params+=(--upload-server-base-url "${upload_server_base_url}") + fi + + set +e + ./Scripts/upload_test_results.sh \ + --iteration $iteration \ + "${optional_params[@]}" + # We defer failing the script until after copying the .xcresult bundle. + upload_exit_value=$? + set -e + + if [[ upload_exit_value -eq 0 ]] + then + echo "ITERATION ${iteration}: Upload succeeded." + else + echo "ITERATION ${iteration}: Upload failed (exit value ${upload_exit_value}). Will exit after copying result bundle." + fi + + # Find the .xcresult bundle and copy it to the directory that will eventually be saved as an artifact. + + result_bundles=$(find fastlane/test_output/sdk -name '*.xcresult') + if [[ -z $result_bundles ]] + then + number_of_result_bundles=0 + else + number_of_result_bundles=$(echo "${result_bundles}" | wc -l) + fi + + if [[ $number_of_result_bundles -eq 0 ]] + then + echo "ITERATION ${iteration}: No result bundles found." + end_iteration_with_exit_value 1 + fi + + if [[ $number_of_result_bundles -gt 1 ]] + then + echo -e "ITERATION ${iteration}: Multiple result bundles found:\n${result_bundles}" + end_iteration_with_exit_value 1 + fi + + echo "ITERATION ${iteration}: Report bundle found: ${result_bundles}" + + if [[ ! -d xcresult-bundles ]]; then + mkdir xcresult-bundles + fi + + mkdir "xcresult-bundles/${iteration}" + cp -r "${result_bundles}" "xcresult-bundles/${iteration}" + + echo "ITERATION ${iteration}: Copied result bundle to xcresult-bundles/${iteration}." + + if [[ upload_exit_value -ne 0 ]] + then + echo "ITERATION ${iteration}: Terminating due to failed upload." + end_iteration_with_exit_value $upload_exit_value + fi + + echo "END ITERATION ${iteration}" 2>&1 + + iteration+=1 +done diff --git a/Scripts/fetch-test-logs.sh b/Scripts/fetch-test-logs.sh new file mode 100755 index 000000000..60caa5e4b --- /dev/null +++ b/Scripts/fetch-test-logs.sh @@ -0,0 +1,493 @@ +#!/bin/bash + +# Retrieves the raw xcodebuild output for one or more test observability server +# uploads. +# +# Only works for tests that were run using the +# continuously-run-tests-and-upload-results script in this directory. + +# Usage: +# ./fetch-test-logs.sh --repo ably/ably-cocoa --test-case-id --filter [filter] +# +# or +# +# ./fetch-test-logs.sh --repo ably/ably-cocoa --upload-id + +# Options: +# +# -r / --repo : The 'org/name'-formatted name of the GitHub repo, for +# example 'ably/ably-cocoa'. +# +# -t / --test-case-id : The ID of a test case saved on the test +# observability server. Will fetch all uploads that match the filter specified +# using the --filter option, and then save the results inside the directory +# specified by the --output-directory option, in the following hierarchy, where +# uploads are split into those where the test case failed and those where it +# didn’t (which doesn’t necessarily imply that the test case passed; it may not +# have run at all): +# +# +# ├── info.json (contains metadata about the results in this directory) +# ├── upload_logs +# │   ├── failed +# │ │   └── xcodebuild-logs-upload-.txt, ... +# │   └── not_failed +# │ └── xcodebuild-logs-upload-.txt, ... +# └── test-case-logs (unless --no-extract-test-case-logs specified) +# ├── failed +# │   └── xcodebuild-logs-upload-.txt, ... +# └── not_failed +# └── xcodebuild-logs-upload-.txt, ... +# +# The upload_logs directory contains the full logs for that upload, and the +# test_case_logs directory contains just the segments of the logs that +# correspond to the specific test case. +# +# -d / --output-directory : Where to output the logs generated by the +# --test-case-id option to. Defaults to ./xcodebuild-logs-test-case--. +# +# -f / --filter : A URL query string describing a filter to be applied +# to the uploads fetched when using the --test-case-id option. For example, +# "branches[]=main&createdBefore=2022-02-20". +# +# -n / --no-extract-test-case-logs: Will cause the --test-case-id option to not +# attempt to extract the segment of the upload log that corresponds to the test +# case. +# +# -i / --upload-id : The ID of a upload saved on the test observability +# server. +# +# -u / --upload-server-base-url : Allows you to specify a URL to use as +# the upload server base URL. Defaults to https://test-observability.herokuapp.com. +# +# -o / --output-file : Where to output the logs generated by the +# --upload-id option to. Defaults to ./xcodebuild-logs-upload-.txt. +# +# -c / --cache-directory : Where to cache the GitHub logs. Defaults to +# ~/Library/Caches/com.ably.testObservabilityLogs. Will be created if doesn’t +# exist. +# +# -a / --no-use-github-auth: Will not prompt the user for an access token to be +# used for making requests to the GitHub API. Useful if all the required GitHub +# job logs are already cached locally. + +set -e + +check_dependencies() { + if ! which jq >/dev/null; then + echo "You need to install jq." 2>&1 + exit 1 + fi +} + +get_github_access_token() { + # https://stackoverflow.com/questions/3980668/how-to-get-a-password-from-a-shell-script-without-echoing#comment4260181_3980904 + read -s -p "Enter your GitHub access token (this will be used to fetch logs from the GitHub API): " github_access_token + + echo + + if [[ -z $github_access_token ]]; then + echo "You need to specify a GitHub access token." 2>&1 + exit 1 + fi + + echo +} + +# Args: +# $1: JSON representation of the test observability server upload +# $2: Path to write the logs to +fetch_and_write_logs_for_upload() { + upload_json=$1 + output_file=$2 + + # (TIL I learned that `echo` will interpret backslash sequences, which we + # don’t want. Appparently in general printf is recommended over echo.) + # https://stackoverflow.com/questions/43528202/prevent-echo-from-interpreting-backslash-escapes + github_repository=$(printf '%s' $upload_json | jq --raw-output '.githubRepository') + github_run_id=$(printf '%s' $upload_json | jq --raw-output '.githubRunId') + github_run_attempt=$(printf '%s' $upload_json | jq --raw-output '.githubRunAttempt') + github_job=$(printf '%s' $upload_json | jq --raw-output '.githubJob') + iteration=$(printf '%s' $upload_json | jq --raw-output '.iteration') + + echo "Upload comes from GitHub repository ${github_repository}. It has GitHub run ID ${github_run_id}, run attempt number ${github_run_attempt}, and job name ${github_job}. It corresponds to loop iteration ${iteration}." + + # Check whether we have a cached log for this job. + # (We cache the job logs because when running the tests continuously, with + # verbose logging enabled, a job log can be ~1.5GB.) + + log_file_name="github-log-${github_repository//\//-}-run-${github_run_id}-attempt-${github_run_attempt}-job-${github_job}" + log_file_path="${cache_directory}/${log_file_name}" + + if [[ -f "${log_file_path}" ]]; then + echo "GitHub job log file already exists at ${log_file_path}. Skipping download." 2>&1 + else + echo "GitHub job log file not yet downloaded." 2>&1 + + # (I wonder if this information that I’m fetching from GitHub is stuff that + # I should have just had in the upload in the first place? Not that + # important right now.) + + github_api_base_url="https://api.github.com" + + # From the GitHub API, fetch the jobs for this workflow run attempt. + # https://docs.github.com/en/rest/reference/actions#list-jobs-for-a-workflow-run-attempt + github_jobs_json=$(curl \ + --fail \ + -H "Accept: application/vnd.github.v3+json" \ + "${github_auth_curl_args[@]}" \ + "${github_api_base_url}/repos/${github_repository}/actions/runs/${github_run_id}/attempts/${github_run_attempt}/jobs") + + # From this list of jobs, find the one that corresponds to our upload. + github_job_id=$(printf "%s" $github_jobs_json | jq \ + --arg jobName "${github_job}" \ + '.jobs[] | select(.name == $jobName) | .id') + + if [[ -z $github_job_id ]]; then + echo "Could not find job with name ${github_job} in attempt ${github_run_attempt} of run ${github_run_id} in GitHub repository ${github_repository}." 2>&1 + exit 1 + fi + + echo "Upload corresponds to GitHub job ID ${github_job_id}. Downloading logs. This may take a while." + + # From the GitHub API, fetch the logs for this job and cache them. + # https://docs.github.com/en/rest/reference/actions#download-job-logs-for-a-workflow-run + + if [[ ! -d "${cache_directory}" ]]; then + mkdir -p "${cache_directory}" + fi + + curl \ + --fail \ + --location \ + -H "Accept: application/vnd.github.v3+json" \ + "${github_auth_curl_args[@]}" \ + "${github_api_base_url}/repos/${github_repository}/actions/jobs/${github_job_id}/logs" >"${log_file_path}.partial" + + mv "${log_file_path}.partial" "${log_file_path}" + + echo "Saved GitHub job logs to ${log_file_path}." + fi + + # Extract the part of the logs that corresponds to the raw xcodebuild output for this iteration. + # https://stackoverflow.com/a/18870500 + + echo "Finding xcodebuild output for iteration ${iteration}." + + xcodebuild_output_start_marker="ITERATION ${iteration}: BEGIN xcodebuild raw output" + xcodebuild_output_start_line_number=$(sed -n "/${xcodebuild_output_start_marker}/=" "${log_file_path}") + + if [[ -z "${xcodebuild_output_start_line_number}" ]]; then + echo "Couldn’t find start of xcodebuild raw output (couldn’t find marker \"${xcodebuild_output_start_marker}\")." 2>&1 + echo "This may be because the GitHub job hasn’t finished yet, or because the tests are not being run in a loop, or it may be an upload created before this functionality was implemented." 2>&1 + echo "You may need to delete the cached log file ${log_file_path}." 2>&1 + exit 1 + fi + + xcodebuild_output_end_marker="ITERATION ${iteration}: END xcodebuild raw output" + xcodebuild_output_end_line_number=$(sed -n "/${xcodebuild_output_end_marker}/=" "${log_file_path}") + + if [[ -z "${xcodebuild_output_end_line_number}" ]]; then + echo "Couldn’t find end of xcodebuild raw output (couldn’t find marker \"${xcodebuild_output_end_marker}\")." 2>&1 + exit 1 + fi + + # Strip the GitHub-added timestamps (which just correspond to the time that `cat` was executed on the log file, and hence aren’t of any use) from the start of each line. + + echo "Stripping GitHub timestamps." + + # https://arkit.co.in/print-given-range-of-lines-using-awk-perl-head-tail-and-python/ + sed -n "${xcodebuild_output_start_line_number},${xcodebuild_output_end_line_number} p" "${log_file_path}" | sed -e 's/^[^ ]* //' >"${output_file}" + + echo "Wrote xcodebuild output to ${output_file}." 2>&1 +} + +default_output_file_for_upload_id() { + echo "xcodebuild-logs-upload-$1.txt" +} + +run_for_test_case() { + # From the test observability server API, fetch the test case and extract its + # properties. + + echo "Fetching test case ${test_case_id} from ${upload_server_base_url}." 2>&1 + + test_case_json=$(curl --fail --header "Accept: application/json" "${upload_server_base_url}/repos/${repo}/test_cases/${test_case_id}") + + test_class_name=$(printf '%s' $test_case_json | jq --raw-output '.testClassName') + test_case_name=$(printf '%s' $test_case_json | jq --raw-output '.testCaseName') + + printf "Test case ${test_case_id} has test class name ${test_class_name} and test case name ${test_case_name}.\n\n" + + # From the test observability server API, fetch the filtered uploads. + + if [[ -z $filter ]]; then + filter_description="no filter" + filter_query="" + else + filter_description="filter ${filter}" + filter_query="?${filter}" + fi + + echo "Fetching uploads for test case ${test_case_id}, with ${filter_description}, from ${upload_server_base_url}." 2>&1 + + uploads_json=$(curl --fail --header "Accept: application/json" "${upload_server_base_url}/repos/${repo}/test_cases/${test_case_id}/uploads${filter_query}") + + number_of_uploads=$(printf '%s' $uploads_json | jq '. | length') + + if [[ ${number_of_uploads} -eq 1 ]]; then + echo "There is 1 upload". 2>&1 + else + echo "There are ${number_of_uploads} uploads". 2>&1 + fi + + echo + + mkdir "${output_directory}" + mkdir "${output_directory}/upload_logs" + mkdir "${output_directory}/test_case_logs" + + failed_upload_logs_output_directory="${output_directory}/upload_logs/failed" + mkdir "${failed_upload_logs_output_directory}" + not_failed_upload_logs_output_directory="${output_directory}/upload_logs/not_failed" + mkdir "${not_failed_upload_logs_output_directory}" + + failed_test_case_logs_output_directory="${output_directory}/test_case_logs/failed" + mkdir "${failed_test_case_logs_output_directory}" + not_failed_test_case_logs_output_directory="${output_directory}/test_case_logs/not_failed" + mkdir "${not_failed_test_case_logs_output_directory}" + + jq -n \ + --arg testCaseId "${test_case_id}" \ + --arg filter "${filter}" \ + --arg uploadServerBaseUrl "${upload_server_base_url}" \ + '{ fetchedAt: (now | todateiso8601), testCaseId: $testCaseId, filter: $filter, uploadServerBaseUrl: $uploadServerBaseUrl }' \ + >"${output_directory}/info.json" + + for ((i = 0; i < number_of_uploads; i += 1)); do + failed=$(printf '%s' $uploads_json | jq ".[${i}].failed") + upload_json=$(printf '%s' $uploads_json | jq ".[${i}].upload") + + upload_id=$(printf '%s' $upload_json | jq --raw-output '.id') + + echo "[$((i + 1)) of ${number_of_uploads}] Processing upload ${upload_id}." 2>&1 + + output_file_without_directory=$(default_output_file_for_upload_id "${upload_id}") + + if [[ $failed == "true" ]]; then + upload_log_output_file="${failed_upload_logs_output_directory}/${output_file_without_directory}" + test_case_log_output_file="${failed_test_case_logs_output_directory}/${output_file_without_directory}" + else + upload_log_output_file="${not_failed_upload_logs_output_directory}/${output_file_without_directory}" + test_case_log_output_file="${not_failed_test_case_logs_output_directory}/${output_file_without_directory}" + fi + + fetch_and_write_logs_for_upload "${upload_json}" "${upload_log_output_file}" + + if [[ -z "${no_extract_test_case_logs}" ]]; then + extract_logs_for_test_case "${test_class_name}" "${test_case_name}" "${upload_log_output_file}" "${test_case_log_output_file}" + fi + + echo + done +} + +# Args: +# $1: Test class name e.g. RealtimeClientPresenceTests +# $2: Test case name e.g. test__037__Presence__update__should_update_the_data_for_the_present_member_with_a_value() +# $3: Path of the xcodebuild logs for the entire test suite run +# $4: Path of where to write the test case logs to. +extract_logs_for_test_case() { + # Extract the part of the logs that corresponds to the raw xcodebuild output for this iteration. (We have similar code in fetch_and_write_logs_for_upload.) + + test_class_name=$1 + test_case_name=$2 + upload_log_file=$3 + output_file=$4 + + # (For some reason, the test case name in the observability server has + # trailing (), but in the xcodebuild logs it doesn’t. So strip them.) + sanitised_test_case_name="${test_case_name//[()]/}" + + echo "Finding logs for test class ${test_class_name}, test case ${test_case_name} in ${upload_log_file}." 2>&1 + + test_case_log_start_marker="Test Case.*${test_class_name} ${sanitised_test_case_name}.*started" + test_case_log_start_line_number=$(sed -n "/${test_case_log_start_marker}/=" "${upload_log_file}") + + if [[ -z "${test_case_log_start_line_number}" ]]; then + echo "Couldn’t find start of test case output (couldn’t find marker \"${test_case_log_start_marker}\")." 2>&1 + exit 1 + fi + + test_case_log_end_marker="Test Case.*${test_class_name} ${sanitised_test_case_name}.*(passed|failed)|Restarting after unexpected exit, crash, or test timeout in ${test_class_name}\/${sanitised_test_case_name}\(\)" + test_case_log_end_line_number=$(sed -En "/${test_case_log_end_marker}/=" "${upload_log_file}") + + if [[ -z "${test_case_log_end_line_number}" ]]; then + echo "Couldn’t find end of test case output (couldn’t find marker \"${test_case_log_end_marker}\")." 2>&1 + exit 1 + fi + + sed -n "${test_case_log_start_line_number},${test_case_log_end_line_number} p" "${upload_log_file}" >"${output_file}" + + echo "Wrote test case log to ${output_file}." 2>&1 +} + +run_for_upload() { + # From the test observability server API, fetch the upload, to find the + # GitHub run ID, attempt number, job name, and iteration. + + echo "Fetching upload ${upload_id} from ${upload_server_base_url}." 2>&1 + + upload_json=$(curl --fail --header "Accept: application/json" "${upload_server_base_url}/repos/${repo}/uploads/${upload_id}") + + fetch_and_write_logs_for_upload "${upload_json}" "${output_file}" +} + +check_dependencies + +# Grab and validate command-line options, and apply defaults. + +# https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash +while [[ "$#" -gt 0 ]]; do + case $1 in + -r | --repo) + repo="$2" + shift + ;; + -t | --test-case-id) + if [[ -z "$2" ]]; then + echo "You must specify a test case ID when using the --test-case-id option." 2>&1 + exit 1 + fi + test_case_id="$2" + shift + ;; + -d | --output-directory) + if [[ -z "$2" ]]; then + echo "You must specify an output directory when using the --output-directory option." 2>&1 + exit 1 + fi + output_directory="$2" + shift + ;; + -f | --filter) + if [[ -z "$2" ]]; then + echo "You must specify a filter when using the --filter option." 2>&1 + exit 1 + fi + filter="$2" + shift + ;; + -n | --no-extract-test-case-logs) no_extract_test_case_logs="1" ;; + -i | --upload-id) + if [[ -z "$2" ]]; then + echo "You must specify an upload ID when using the --upload-id option." 2>&1 + exit 1 + fi + upload_id="$2" + shift + ;; + -u | --upload-server-base-url) + if [[ -z "$2" ]]; then + echo "You must specify a base URL when using the --upload-server-base-url option." 2>&1 + exit 1 + fi + upload_server_base_url="$2" + shift + ;; + -o | --output-file) + if [[ -z "$2" ]]; then + echo "You must specify an output file when using the --output-file option." 2>&1 + exit 1 + fi + output_file="$2" + shift + ;; + -c | --cache-directory) + if [[ -z "$2" ]]; then + echo "You must specify a cache directory when using the --cache-directory option." 2>&1 + exit 1 + fi + cache_directory="$2" + shift + ;; + -a | --no-use-github-auth) no_use_github_auth="1" ;; + *) + echo "Unknown parameter passed: $1" 2>&1 + exit 1 + ;; + esac + shift +done + +if [[ -z $repo ]]; then + echo "You need to specify a repo (-r / --repo)." 2>&1 + exit 1 +fi + +if [[ -z $test_case_id && -z $upload_id ]]; then + echo "You need to specify the test case ID (-t / --test-case-id) or upload ID (-i / --upload-id)." 2>&1 + exit 1 +fi + +if [[ -n $test_case_id && -n $upload_id ]]; then + echo "You cannot specify both a test case ID and an upload ID." 2>&1 + exit 1 +fi + +if [[ -n $test_case_id && -n $upload_id ]]; then + echo "You cannot specify both a test case ID and an upload ID." 2>&1 + exit 1 +fi + +if [[ -z $test_case_id && -n $output_directory ]]; then + echo "You can only specify an output directory with a test case ID (-t / --test-case-id)." 2>&1 + exit 1 +fi + +if [[ -z $output_directory ]]; then + output_directory="xcodebuild-logs-test-case-${test_case_id}-$(date -Iseconds)" +fi + +if [[ -z $test_case_id && -n $filter ]]; then + echo "You can only specify a filter with a test case ID (-t / --test-case-id)." 2>&1 + exit 1 +fi + +if [[ -z $test_case_id && -n $no_extract_test_case_logs ]]; then + echo "You can only specify the --no-extract-test-case-logs option with a test case ID (-t / --test-case-id)." 2>&1 + exit 1 +fi + +if [[ -z $upload_server_base_url ]]; then + upload_server_base_url="https://test-observability.herokuapp.com" +fi + +if [[ -z $upload_id && -n $output_file ]]; then + echo "You can only specify an output file with an upload ID (-i / --upload-id)." 2>&1 + exit 1 +fi + +if [[ -z $output_file ]]; then + output_file=$(default_output_file_for_upload_id "${upload_id}") +fi + +if [[ -z $cache_directory ]]; then + cache_directory="${HOME}/Library/Caches/com.ably.testObservabilityLogs" +fi + +github_auth_curl_args=() +if [[ -z $no_use_github_auth ]]; then + # Get the GitHub access token from the user. We don’t allow them to specify it on the command line. + github_access_token="" + get_github_access_token + github_auth_curl_args+=(-H "Authorization: token ${github_access_token}") +fi + +# Run the appropriate function based on arguments. + +if [[ -n $test_case_id ]]; then + run_for_test_case +elif [[ -n $upload_id ]]; then + run_for_upload +fi diff --git a/Scripts/set-ci-length-and-parallelism.sh b/Scripts/set-ci-length-and-parallelism.sh new file mode 100755 index 000000000..efd705441 --- /dev/null +++ b/Scripts/set-ci-length-and-parallelism.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -e + +# Usage: +# ./set-ci-length-and-parallelism.sh --workflows --jobs-per-workflow + +# Check dependencies. +if ! which yq > /dev/null; then + echo "You need to install yq." 2>&1 + exit 1 +fi + +# Grab and validate command-line options. + +# https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash +while [[ "$#" -gt 0 ]]; do + case $1 in + --workflows) + if [[ -z "$2" ]]; then + echo "You must specify the number of workflows." 2>&1 + exit 1 + fi + num_workflows="$2" + shift + ;; + --jobs-per-workflow) + if [[ -z "$2" ]]; then + echo "You must specify the number of jobs per workflow." 2>&1 + exit 1 + fi + jobs_per_workflow="$2" + shift + ;; + *) + echo "Unknown parameter passed: $1" 2>&1 + exit 1 + ;; + esac + shift +done + +if [[ -z $num_workflows ]]; then + echo "You need to specify the number of workflows (--workflows)." 2>&1 + exit 1 +fi + +if [[ ! $num_workflows =~ ^-?[0-9]+$ ]]; then + echo "The number of workflows must be a number." 2>&1 + exit 1 +fi + +if [[ $num_workflows -lt 1 ]]; then + echo "The number of workflows must be 1 or more." 2>&1 + exit 1 +fi + +if [[ -z $jobs_per_workflow ]]; then + echo "You need to specify the number of jobs per workflow (--jobs-per-workflow)." 2>&1 + exit 1 +fi + +if [[ ! $jobs_per_workflow =~ ^-?[0-9]+$ ]]; then + echo "The number of jobs per workflow must be a number." 2>&1 + exit 1 +fi + +if [[ $jobs_per_workflow -lt 1 ]]; then + echo "The number of jobs per workflow must be 1 or more." 2>&1 + exit 1 +fi + +workflow_file_without_extension=".github/workflows/integration-test-iOS16_2" +workflow_file_extension=".yaml" + +workflow_file="${workflow_file_without_extension}${workflow_file_extension}" +workflow_name=$(yq .name $workflow_file) + +# First, we apply the number of jobs per workflow. + +yq -i '(.jobs.check | key) = "check-1"' $workflow_file +yq -i "(.jobs.check-1.steps[] | select(.with.path == \"xcresult-bundles.tar.gz\")).with.name = \"xcresult-bundles-1.tar.gz\"" $workflow_file + +for ((i=2; i <= $jobs_per_workflow; i += 1)) +do + yq -i ".jobs.check-${i} = .jobs.check-$(($i-1))" $workflow_file + yq -i ".jobs.check-${i}.needs = [\"check-$(($i-1))\"]" $workflow_file + yq -i "(.jobs.check-${i}.steps[] | select(.with.path == \"xcresult-bundles.tar.gz\")).with.name = \"xcresult-bundles-${i}.tar.gz\"" $workflow_file +done + +# Now, we duplicate the workflow file the requested number of times. + +mv $workflow_file "${workflow_file_without_extension}-1${workflow_file_extension}" + +for ((i=1; i <= $num_workflows; i += 1)) +do + new_workflow_file="${workflow_file_without_extension}-${i}${workflow_file_extension}" + + if [[ $i -gt 1 ]]; then + cp "${workflow_file_without_extension}-$((i-1))${workflow_file_extension}" $new_workflow_file + fi + + yq -i ".name = \"${workflow_name} (workflow ${i})\"" $new_workflow_file +done diff --git a/Source/ARTConnection.m b/Source/ARTConnection.m index f56a71a40..6b3a5dc23 100644 --- a/Source/ARTConnection.m +++ b/Source/ARTConnection.m @@ -198,7 +198,7 @@ - (NSString *)key_nosync { - (NSInteger)maxMessageSize { if (_maxMessageSize) return _maxMessageSize; - return _realtime.options.isProductionEnvironment ? [ARTDefault maxProductionMessageSize] : [ARTDefault maxSandboxMessageSize]; + return [ARTDefault maxMessageSize] ?: (_realtime.options.isProductionEnvironment ? [ARTDefault maxProductionMessageSize] : [ARTDefault maxSandboxMessageSize]); } - (ARTRealtimeConnectionState)state_nosync { diff --git a/Test/Tests/AuthTests.swift b/Test/Tests/AuthTests.swift index 9fb811768..d73fff608 100644 --- a/Test/Tests/AuthTests.swift +++ b/Test/Tests/AuthTests.swift @@ -3430,7 +3430,7 @@ class AuthTests: XCTestCase { // RSA10k - func skipped__test__115__authorize__server_time_offset__should_obtain_server_time_once_and_persist_the_offset_from_the_local_clock() throws { + func test__FLAKY__115__authorize__server_time_offset__should_obtain_server_time_once_and_persist_the_offset_from_the_local_clock() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) let rest = ARTRest(options: options) @@ -4058,7 +4058,7 @@ class AuthTests: XCTestCase { expect { try ARTTokenDetails.fromJson("[]" as ARTJsonCompatible) }.to(throwError()) } - func skipped__test__140__JWT_and_realtime__client_initialized_with_a_JWT_token_in_ClientOptions__with_valid_credentials__pulls_stats_successfully() throws { + func test__FLAKY__140__JWT_and_realtime__client_initialized_with_a_JWT_token_in_ClientOptions__with_valid_credentials__pulls_stats_successfully() throws { let test = Test() let options = try AblyTests.clientOptions(for: test) options.token = try getJWTToken(for: test) @@ -4210,7 +4210,7 @@ class AuthTests: XCTestCase { // RSA8g - func skipped__test__146__JWT_and_realtime__when_using_authCallback__with_valid_credentials__pulls_stats_successfully() throws { + func test__FLAKY__146__JWT_and_realtime__when_using_authCallback__with_valid_credentials__pulls_stats_successfully() throws { let test = Test() let options = try AblyTests.clientOptions(for: test) options.authCallback = { _, completion in diff --git a/Test/Tests/PushAdminTests.swift b/Test/Tests/PushAdminTests.swift index 852b45221..3a92d80d7 100644 --- a/Test/Tests/PushAdminTests.swift +++ b/Test/Tests/PushAdminTests.swift @@ -223,7 +223,7 @@ class PushAdminTests: XCTestCase { } } - func skipped__test__002__publish__should_publish_successfully() throws { + func test__FLAKY__002__publish__should_publish_successfully() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) let realtime = ARTRealtime(options: options) @@ -257,7 +257,7 @@ class PushAdminTests: XCTestCase { } } - func skipped__test__003__publish__should_fail_with_a_bad_recipient() throws { + func test__FLAKY__003__publish__should_fail_with_a_bad_recipient() throws { let test = Test() let realtime = ARTRealtime(options: try AblyTests.commonAppSetup(for: test)) defer { realtime.dispose(); realtime.close() } @@ -285,7 +285,7 @@ class PushAdminTests: XCTestCase { } } - func skipped__test__004__publish__should_fail_with_an_empty_recipient() throws { + func test__FLAKY__004__publish__should_fail_with_an_empty_recipient() throws { let test = Test() let realtime = ARTRealtime(options: try AblyTests.commonAppSetup(for: test)) defer { realtime.dispose(); realtime.close() } diff --git a/Test/Tests/RealtimeClientChannelTests.swift b/Test/Tests/RealtimeClientChannelTests.swift index 966ab5479..bead7a373 100644 --- a/Test/Tests/RealtimeClientChannelTests.swift +++ b/Test/Tests/RealtimeClientChannelTests.swift @@ -155,7 +155,7 @@ class RealtimeClientChannelTests: XCTestCase { } // RTL1 - func skipped__test__001__Channel__should_process_all_incoming_messages_and_presence_messages_as_soon_as_a_Channel_becomes_attached() throws { + func test__FLAKY__001__Channel__should_process_all_incoming_messages_and_presence_messages_as_soon_as_a_Channel_becomes_attached() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) let client1 = AblyTests.newRealtime(options).client @@ -1236,7 +1236,7 @@ class RealtimeClientChannelTests: XCTestCase { } } - func skipped__test__045__Channel__attach__happens_when_connection_is_CONNECTED_if_it_s_currently__DISCONNECTED() throws { + func test__FLAKY__045__Channel__attach__happens_when_connection_is_CONNECTED_if_it_s_currently__DISCONNECTED() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) let client = ARTRealtime(options: options) @@ -2776,8 +2776,7 @@ class RealtimeClientChannelTests: XCTestCase { XCTAssertEqual(protocolMessages.count, 1) } - // FIXME: Fix flaky presence tests and re-enable. See https://ably-real-time.slack.com/archives/C030C5YLY/p1623172436085700 - func skipped__test__090__Channel__publish__message_bundling__should_only_bundle_messages_when_it_respects_all_of_the_constraints() throws { + func test__FLAKY__090__Channel__publish__message_bundling__should_only_bundle_messages_when_it_respects_all_of_the_constraints() throws { let test = Test() let defaultMaxMessageSize = ARTDefault.maxMessageSize() ARTDefault.setMaxMessageSize(256) diff --git a/Test/Tests/RealtimeClientConnectionTests.swift b/Test/Tests/RealtimeClientConnectionTests.swift index a995144d9..6f0a4e4e2 100644 --- a/Test/Tests/RealtimeClientConnectionTests.swift +++ b/Test/Tests/RealtimeClientConnectionTests.swift @@ -122,7 +122,7 @@ private func expectDataToMatch(_ message: ARTMessage, _ fixtureMessage: Any) { fail("expected NSArray") } case "binary": - XCTAssertEqual(message.data as? NSData, (dictionaryValue["dictionaryValue"] as! String).dataFromHexadecimalString()! as NSData?) + XCTAssertEqual(message.data as? NSData, (dictionaryValue["expectedHexValue"] as! String).dataFromHexadecimalString()! as NSData?) default: fail("unhandled: \(dictionaryValue["expectedType"] as! String)") } @@ -2773,7 +2773,7 @@ class RealtimeClientConnectionTests: XCTestCase { XCTAssertTrue(channel.errorReason === protocolError.error) } - func skipped__test__072__Connection__connection_failures_once_CONNECTED__System_s_response_to_a_resume_request__should_resume_the_connection_after_an_auth_renewal() throws { + func test__FLAKY__072__Connection__connection_failures_once_CONNECTED__System_s_response_to_a_resume_request__should_resume_the_connection_after_an_auth_renewal() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) options.tokenDetails = try getTestTokenDetails(for: test, ttl: 5.0) @@ -2850,10 +2850,9 @@ class RealtimeClientConnectionTests: XCTestCase { } } } - - // FIXME: Fix flaky presence tests and re-enable. See https://ably-real-time.slack.com/archives/C030C5YLY/p1623172436085700 + // RTN15d - func skipped__test__065__Connection__connection_failures_once_CONNECTED__should_recover_from_disconnection_and_messages_should_be_delivered_once_the_connection_is_resumed() throws { + func test__FLAKY__065__Connection__connection_failures_once_CONNECTED__should_recover_from_disconnection_and_messages_should_be_delivered_once_the_connection_is_resumed() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) @@ -2995,7 +2994,7 @@ class RealtimeClientConnectionTests: XCTestCase { } // RTN15g RTN15g1 - func skipped__test__074__Connection__connection_failures_once_CONNECTED__when_connection__ttl_plus_idle_interval__period_has_passed_since_last_activity__uses_a_new_connection() throws { + func test__FLAKY__074__Connection__connection_failures_once_CONNECTED__when_connection__ttl_plus_idle_interval__period_has_passed_since_last_activity__uses_a_new_connection() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) // We want this to be > than the sum of customTtlInterval and customIdleInterval @@ -3098,7 +3097,7 @@ class RealtimeClientConnectionTests: XCTestCase { // RTN15h - func skipped__test__077__Connection__connection_failures_once_CONNECTED__DISCONNECTED_message_contains_a_token_error__if_the_token_is_renewable_then_error_should_not_be_emitted() throws { + func test__FLAKY__077__Connection__connection_failures_once_CONNECTED__DISCONNECTED_message_contains_a_token_error__if_the_token_is_renewable_then_error_should_not_be_emitted() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) options.autoConnect = false @@ -3176,7 +3175,7 @@ class RealtimeClientConnectionTests: XCTestCase { } // RTN15h2 - func skipped__test__079__Connection__connection_failures_once_CONNECTED__DISCONNECTED_message_contains_a_token_error__should_transition_to_disconnected_when_the_token_renewal_fails_and_the_error_should_be_emitted() throws { + func test__FLAKY__079__Connection__connection_failures_once_CONNECTED__DISCONNECTED_message_contains_a_token_error__should_transition_to_disconnected_when_the_token_renewal_fails_and_the_error_should_be_emitted() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) options.autoConnect = false @@ -3299,25 +3298,6 @@ class RealtimeClientConnectionTests: XCTestCase { } } } - - // RTN16c - func skipped__test__083__Connection__Connection_recovery__Connection_recoveryKey_should_become_becomes_null_when_a_connection_is_explicitly_CLOSED_or_CLOSED() throws { - let test = Test() - let options = try AblyTests.commonAppSetup(for: test) - let client = ARTRealtime(options: options) - defer { client.dispose(); client.close() } - waitUntil(timeout: testTimeout) { done in - client.connection.once(.connected) { _ in - client.connection.once(.closed) { _ in - XCTAssertNil(client.connection.createRecoveryKey()) - XCTAssertNil(client.connection.key) - XCTAssertNil(client.connection.id) - done() - } - client.close() - } - } - } // RTN16g func test__110__Connection__Connection_recovery__connection_recovery_key_is_correctly_constructed_from_defined_parts() throws { @@ -4009,17 +3989,17 @@ class RealtimeClientConnectionTests: XCTestCase { // RTN17d - func skipped__test__097__Connection__Host_Fallback__should_use_an_alternative_host_when___hostUnreachable() { + func test__FLAKY__097__Connection__Host_Fallback__should_use_an_alternative_host_when___hostUnreachable() { let test = Test() testUsesAlternativeHostOnResponse(.hostUnreachable, channelName: test.uniqueChannelName()) } - func skipped__test__098__Connection__Host_Fallback__should_use_an_alternative_host_when___requestTimeout_timeout__0_1_() { + func test__FLAKY__098__Connection__Host_Fallback__should_use_an_alternative_host_when___requestTimeout_timeout__0_1_() { let test = Test() testUsesAlternativeHostOnResponse(.requestTimeout(timeout: 0.1), channelName: test.uniqueChannelName()) } - func skipped__test__099__Connection__Host_Fallback__should_use_an_alternative_host_when___hostInternalError_code__501_() { + func test__FLAKY__099__Connection__Host_Fallback__should_use_an_alternative_host_when___hostInternalError_code__501_() { let test = Test() testUsesAlternativeHostOnResponse(.hostInternalError(code: 501), channelName: test.uniqueChannelName()) } @@ -4615,7 +4595,7 @@ class RealtimeClientConnectionTests: XCTestCase { } // RTN19b - func skipped__test__104__Connection__Transport_disconnected_side_effects__should_resend_the_ATTACH_message_if_there_are_any_pending_channels() throws { + func test__FLAKY__104__Connection__Transport_disconnected_side_effects__should_resend_the_ATTACH_message_if_there_are_any_pending_channels() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) let client = AblyTests.newRealtime(options).client @@ -5207,7 +5187,7 @@ class RealtimeClientConnectionTests: XCTestCase { } // https://github.com/ably/wiki/issues/22 - func skipped__test__111__Connection__with_fixture_messages__should_encode_and_decode_fixture_messages_as_expected() throws { + func test__111__Connection__with_fixture_messages__should_encode_and_decode_fixture_messages_as_expected() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) options.useBinaryProtocol = false @@ -5281,7 +5261,7 @@ class RealtimeClientConnectionTests: XCTestCase { } } - func skipped__test__112__Connection__with_fixture_messages__should_send_messages_through_raw_JSON_POST_and_retrieve_equal_messages_through_MsgPack_and_JSON() throws { + func test__112__Connection__with_fixture_messages__should_send_messages_through_raw_JSON_POST_and_retrieve_equal_messages_through_MsgPack_and_JSON() throws { let test = Test() try setupDependencies(for: test) let restPublishClient = ARTRest(options: jsonOptions) @@ -5335,7 +5315,7 @@ class RealtimeClientConnectionTests: XCTestCase { } } - func skipped__test__113__Connection__with_fixture_messages__should_send_messages_through_MsgPack_and_JSON_and_retrieve_equal_messages_through_raw_JSON_GET() throws { + func test__FLAKY__113__Connection__with_fixture_messages__should_send_messages_through_MsgPack_and_JSON_and_retrieve_equal_messages_through_raw_JSON_GET() throws { let test = Test() try setupDependencies(for: test) let restPublishClientMsgPack = ARTRest(options: msgpackOptions) diff --git a/Test/Tests/RealtimeClientTests.swift b/Test/Tests/RealtimeClientTests.swift index 91cc43519..108f766cc 100644 --- a/Test/Tests/RealtimeClientTests.swift +++ b/Test/Tests/RealtimeClientTests.swift @@ -320,7 +320,7 @@ class RealtimeClientTests: XCTestCase { } // RTC5b - func skipped__test__023__RealtimeClient__stats__should_accept_all_the_same_params_as_RestClient() throws { + func test__FLAKY__023__RealtimeClient__stats__should_accept_all_the_same_params_as_RestClient() throws { let test = Test() let client = ARTRealtime(options: try AblyTests.commonAppSetup(for: test)) defer { client.close() } @@ -1014,7 +1014,7 @@ class RealtimeClientTests: XCTestCase { } // RTC8b1 - part 4 - func skipped__test__036__RealtimeClient__Auth_authorize_should_upgrade_the_connection_with_current_token__authorize_call_should_complete_with_an_error_if_the_connection_moves_to_the_CLOSED_state() throws { + func test__FLAKY__036__RealtimeClient__Auth_authorize_should_upgrade_the_connection_with_current_token__authorize_call_should_complete_with_an_error_if_the_connection_moves_to_the_CLOSED_state() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) options.autoConnect = false @@ -1280,13 +1280,12 @@ class RealtimeClientTests: XCTestCase { } } } - - // FIXME: Fix flaky presence tests and re-enable. See https://ably-real-time.slack.com/archives/C030C5YLY/p1623172436085700 + // https://github.com/ably/ably-cocoa/issues/577 - func skipped__test__005__RealtimeClient__background_behaviour() { + func test__FLAKY__005__RealtimeClient__background_behaviour() { let test = Test() waitUntil(timeout: testTimeout) { done in - URLSession.shared.dataTask(with: URL(string: "https://ably.io")!) { _, _, _ in + URLSession.shared.dataTask(with: URL(string: "https://ably.com")!) { _, _, _ in let realtime: ARTRealtime do { @@ -1515,7 +1514,7 @@ class RealtimeClientTests: XCTestCase { } } - func skipped__test__012__RealtimeClient__moves_to_DISCONNECTED_on_an_unexpected_normal_WebSocket_close() throws { + func test__FLAKY__012__RealtimeClient__moves_to_DISCONNECTED_on_an_unexpected_normal_WebSocket_close() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) let client = ARTRealtime(options: options) diff --git a/Test/Tests/RestClientChannelTests.swift b/Test/Tests/RestClientChannelTests.swift index 2afd1fd4b..10513784c 100644 --- a/Test/Tests/RestClientChannelTests.swift +++ b/Test/Tests/RestClientChannelTests.swift @@ -1211,7 +1211,7 @@ class RestClientChannelTests: XCTestCase { // RSP3 - func skipped__test__035__presence__get__should_return_presence_fixture_data() throws { + func test__035__presence__get__should_return_presence_fixture_data() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) options.testOptions.channelNamePrefix = nil diff --git a/Test/Tests/RestClientPresenceTests.swift b/Test/Tests/RestClientPresenceTests.swift index 39f8c4791..6200276c7 100644 --- a/Test/Tests/RestClientPresenceTests.swift +++ b/Test/Tests/RestClientPresenceTests.swift @@ -6,7 +6,7 @@ class RestClientPresenceTests: XCTestCase { // RSP3 // RSP3a - func skipped__test__002__Presence__get__should_return_a_PaginatedResult_page_containing_the_first_page_of_members() throws { + func test__FLAKY__002__Presence__get__should_return_a_PaginatedResult_page_containing_the_first_page_of_members() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) let client = ARTRest(options: options) @@ -222,9 +222,7 @@ class RestClientPresenceTests: XCTestCase { // RSP4b // RSP4b2 - // Disabled because there's something wrong in the Sandbox. - // More info at https://ably-real-time.slack.com/archives/C030C5YLY/p1614269570000400 - func skipped__test__007__Presence__history__query_argument__direction_should_change_the_order_of_the_members() throws { + func test__FLAKY__007__Presence__history__query_argument__direction_should_change_the_order_of_the_members() throws { let test = Test() let options = try AblyTests.commonAppSetup(for: test) let client = ARTRest(options: options) diff --git a/Test/Tests/RestClientStatsTests.swift b/Test/Tests/RestClientStatsTests.swift index ff67d4d64..9f17a45d6 100644 --- a/Test/Tests/RestClientStatsTests.swift +++ b/Test/Tests/RestClientStatsTests.swift @@ -99,7 +99,7 @@ class RestClientStatsTests: XCTestCase { statsOptions = try postTestStats(statsFixtures, for: test) } - func skipped__test__001__RestClient__stats__result__should_match_minute_level_inbound_and_outbound_fixture_data__forwards_() throws { + func test__FLAKY__001__RestClient__stats__result__should_match_minute_level_inbound_and_outbound_fixture_data__forwards_() throws { let test = Test() try beforeEach__RestClient__stats__result(for: test) @@ -164,7 +164,7 @@ class RestClientStatsTests: XCTestCase { XCTAssertEqual(totalOutbound, 20 + 10 + 40) } - func skipped__test__004__RestClient__stats__result__should_match_month_level_inbound_and_outbound_fixture_data__forwards_() throws { + func test__FLAKY__004__RestClient__stats__result__should_match_month_level_inbound_and_outbound_fixture_data__forwards_() throws { let test = Test() try beforeEach__RestClient__stats__result(for: test) @@ -183,7 +183,7 @@ class RestClientStatsTests: XCTestCase { XCTAssertEqual(totalOutbound, 20 + 10 + 40) } - func skipped__test__005__RestClient__stats__result__should_contain_only_one_item_when_limit_is_1__backwards() throws { + func test__FLAKY__005__RestClient__stats__result__should_contain_only_one_item_when_limit_is_1__backwards() throws { let test = Test() try beforeEach__RestClient__stats__result(for: test) @@ -269,7 +269,7 @@ class RestClientStatsTests: XCTestCase { XCTAssertEqual((firstPageAgain.items)[0].inbound.all.messages.data, 7000) } - func skipped__test__008__RestClient__stats__result__should_be_paginated_according_to_the_limit__fowards_() throws { + func test__FLAKY__008__RestClient__stats__result__should_be_paginated_according_to_the_limit__fowards_() throws { let test = Test() try beforeEach__RestClient__stats__result(for: test) diff --git a/fastlane/Scanfile b/fastlane/Scanfile index fea35296b..da002c455 100644 --- a/fastlane/Scanfile +++ b/fastlane/Scanfile @@ -1,8 +1,10 @@ open_report false -clean true +clean false skip_slack true ensure_devices_found true output_types "junit" # I'm being explicit about this because I want to make sure it's being used, to make sure that trainer is used to generate the JUnit report xcodebuild_formatter "xcbeautify" result_bundle true +# Just for printing inside these loop jobs +buildlog_path "xcodebuild_output"