Skip to content

Commit

Permalink
Merge pull request #115 from Perfexionists/typing
Browse files Browse the repository at this point in the history
Add typing annotation to Perun
  • Loading branch information
JiriPavela authored Oct 5, 2023
2 parents 0ff4ecf + 8973e4c commit ba11799
Show file tree
Hide file tree
Showing 127 changed files with 3,806 additions and 2,307 deletions.
38 changes: 38 additions & 0 deletions .github/workflows/actions/setup/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
name: Setup Environment
description: Performs setup of Perun, Python and its dependencies
inputs:
python-version:
description: 'Python version to run'
required: true

runs:
using: 'composite'
steps:
- name: Set up Python ${{ inputs.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ inputs.python-version }}
cache: 'pip'

- name: Install Unix dependencies
shell: sh
run: |
sudo apt-get -qq update
sudo apt-get install time libunwind8-dev g++-9 gcc-9 git
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 90 --slave /usr/bin/g++ g++ /usr/bin/g++-9 --slave /usr/bin/gcov gcov /usr/bin/gcov-9
- name: Initialize Git
shell: sh
run: |
git config --global user.email "[email protected]"
git config --global user.name "Perun Allmighty"
- name: Upgrade pip and setuptools dependencies
shell: sh
run: |
python -m pip install --upgrade pip setuptools
- name: Install Tox
shell: sh
run: |
pip install tox
46 changes: 23 additions & 23 deletions .github/workflows/ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,34 +14,15 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
python-version: ["3.9", "3.10", "3.11"]

steps:
- uses: actions/checkout@v3

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
- name: Setup Python, Ubuntu and Python environment
uses: ./.github/workflows/actions/setup
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'

- name: Install Unix dependencies
run: |
sudo apt-get install time libunwind8-dev g++-9 gcc-9 git
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 90 --slave /usr/bin/g++ g++ /usr/bin/g++-9 --slave /usr/bin/gcov gcov /usr/bin/gcov-9
- name: Initialize Git
run: |
git config --global user.email "[email protected]"
git config --global user.name "Perun Allmighty"
- name: Upgrade pip and setuptools dependencies
run: |
python -m pip install --upgrade pip setuptools
- name: Install Tox
run: |
pip install tox

- name: Execute tests for Python ${{ matrix.python-version }} using Tox
run: tox -e py
Expand All @@ -54,4 +35,23 @@ jobs:
uses: codecov/codecov-action@v3
with:
flags: coverage-${{ matrix.python-version }}
verbose: true
verbose: true

typing:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11"]

steps:
- uses: actions/checkout@v3

- name: Setup Python, Ubuntu and Python environment
uses: ./.github/workflows/actions/setup
with:
python-version: ${{ matrix.python-version }}

- name: Check type correctness for Python ${{ matrix.python-version }} using Tox
run: |
tox -e typing
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
test:
python3 -m pytest --cov=./ --cov-report term-missing:skip-covered tests/
python3 -m pytest --durations=10 --cov=./ --cov-report term-missing:skip-covered tests/

check:
mypy perun/

# Setuptools fails for nested requirements file when installed as `pip install .`, so sadly no
# simple "dev" optional dependency
Expand Down
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ makes it a good store of profiles along with the context.
Installation
------------

Note that we are no longer maintaining support for Python 3.8. Perun may work, but we strongly
advise to upgrade your Python to newer version.

You can install Perun as follows:

git clone https://github.com/tfiedor/perun.git
Expand Down
1 change: 1 addition & 0 deletions codecov.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
ignore:
- "perun/collect/trace"
- "perun/thirdparty"
- "tests/test_tracer"

2 changes: 2 additions & 0 deletions perun/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
Perun currently exists as CLI application, with GUI application being in development.
"""
from __future__ import annotations

import importlib.metadata

__version__ = importlib.metadata.version(__package__)
19 changes: 12 additions & 7 deletions perun/check/average_amount_threshold.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,19 +34,22 @@
changed from about six seconds to hundred miliseconds. For these detected changes we report no
confidence at all.
"""
from __future__ import annotations

from typing import Any, Iterable

import perun.profile.convert as convert
import perun.check.factory as check
import perun.postprocess.regression_analysis.tools as tools

from perun.utils.structs import DegradationInfo
from perun.utils.structs import DegradationInfo, PerformanceChange
from perun.profile.factory import Profile


DEGRADATION_THRESHOLD = 2.0
OPTIMIZATION_THRESHOLD = 0.5


def get_averages(profile):
def get_averages(profile: Profile) -> dict[str, float]:
"""Retrieves the averages of all amounts grouped by the uid
:param profiles.Profile profile: dictionary representation of profile
Expand All @@ -59,7 +62,9 @@ def get_averages(profile):
return data_frame.groupby('uid').mean(numeric_only=True).to_dict()['amount']


def average_amount_threshold(baseline_profile, target_profile, **_):
def average_amount_threshold(
baseline_profile: Profile, target_profile: Profile, **_: Any
) -> Iterable[DegradationInfo]:
"""Checks between pair of (baseline, target) profiles, whether the can be degradation detected
This is based on simple heuristic, where for the same function models, we only check the order
Expand All @@ -81,11 +86,11 @@ def average_amount_threshold(baseline_profile, target_profile, **_):
if baseline_average is not None:
difference_ratio = tools.safe_division(target_average, baseline_average)
if difference_ratio >= DEGRADATION_THRESHOLD:
change = check.PerformanceChange.Degradation
change = PerformanceChange.Degradation
elif 0.0 < difference_ratio <= OPTIMIZATION_THRESHOLD:
change = check.PerformanceChange.Optimization
change = PerformanceChange.Optimization
else:
change = check.PerformanceChange.NoChange
change = PerformanceChange.NoChange

yield DegradationInfo(
res=change,
Expand Down
17 changes: 11 additions & 6 deletions perun/check/best_model_order_equality.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,14 @@
value `1.0` (which would mean, that the model precisely fits the measured values), this signifies
that the best model fit the data tightly and hence the detected optimization is **not spurious**.
"""
from __future__ import annotations

from typing import Any, Iterable

import perun.check.factory as check
import perun.check.general_detection as detection

from perun.utils.structs import DegradationInfo
from perun.utils.structs import DegradationInfo, PerformanceChange
from perun.profile.factory import Profile


CONFIDENCE_THRESHOLD = 0.9
Expand All @@ -57,7 +60,9 @@
]


def best_model_order_equality(baseline_profile, target_profile, **_):
def best_model_order_equality(
baseline_profile: Profile, target_profile: Profile, **_: Any
) -> Iterable[DegradationInfo]:
"""Checks between pair of (baseline, target) profiles, whether the can be degradation detected
This is based on simple heuristic, where for the same function models, we only check the order
Expand All @@ -80,12 +85,12 @@ def best_model_order_equality(baseline_profile, target_profile, **_):
baseline_ordering = MODEL_ORDERING.index(best_baseline_model.type)
target_ordering = MODEL_ORDERING.index(best_model.type)
if baseline_ordering > target_ordering:
change = check.PerformanceChange.Optimization
change = PerformanceChange.Optimization
else:
change = check.PerformanceChange.Degradation
change = PerformanceChange.Degradation
degradation_rate = target_ordering - baseline_ordering
else:
change = check.PerformanceChange.NoChange
change = PerformanceChange.NoChange
degradation_rate = 0

yield DegradationInfo(
Expand Down
25 changes: 13 additions & 12 deletions perun/check/exclusive_time_outliers.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,24 +63,25 @@
reported as the IQR multiple of `110.46`.
"""
from __future__ import annotations

from typing import Optional, Generator, Dict, List, Tuple
from typing import Optional, Iterable, Any
from difflib import get_close_matches

import pandas as pd
import numpy as np
from scipy import stats

from perun.utils.structs import DegradationInfo
from perun.check.factory import PerformanceChange
from perun.profile import convert
from perun.logic import config

from perun.utils.structs import PerformanceChange
from perun.profile.factory import Profile


OldLocMap = Dict[str, str]
NewLocMap = Dict[str, str]
OldLocMap = dict[str, str]
NewLocMap = dict[str, str]


MZS_CORRECTION = 0.6745
Expand All @@ -91,8 +92,8 @@


def exclusive_time_outliers(
baseline_profile: Profile, target_profile: Profile, **_
) -> Generator[DegradationInfo, None, None]:
baseline_profile: Profile, target_profile: Profile, **_: Any
) -> Iterable[DegradationInfo]:
"""Checks the pair of (baseline, target) profiles for changes in function exclusive times.
The method works by detecting 'exclusive time delta' outliers and classifying their severity
Expand Down Expand Up @@ -147,14 +148,14 @@ def __init__(self, baseline_profile: Profile, target_profile: Profile) -> None:
)
if self.location_filter == "*":
self.location_filter = None
self.cut_off: Optional[float] = float(config.lookup_key_recursively(
self.cut_off: float = float(config.lookup_key_recursively(
"degradation.cutoff", "0.0")
)
self.df: pd.DataFrame = self._merge_and_diff(
self._prepare_profile(baseline_profile), self._prepare_profile(target_profile)
)

def detect_changes(self) -> Generator[DegradationInfo, None, None]:
def detect_changes(self) -> Iterable[DegradationInfo]:
"""Detect and report the exclusive time changes and the overall degradation / optimization.
The method runs three outlier detection methods, where z-score flags the most significant
Expand Down Expand Up @@ -217,7 +218,7 @@ def detect_changes(self) -> Generator[DegradationInfo, None, None]:
)

@staticmethod
def _determine_result_and_confidence(row: pd.Series) -> Tuple[PerformanceChange, str, float]:
def _determine_result_and_confidence(row: pd.Series[Any]) -> tuple[PerformanceChange, str, float]:
"""Select the severity, confidence type and confidence rate of the exclusive time change.
:param row: DataFrame row of specific 'uid' (i.e., function) record
Expand Down Expand Up @@ -363,7 +364,7 @@ def _merge_and_diff(
:return: the merged and extended DataFrame
"""
def _delta_exc(row: pd.Series) -> float:
def _delta_exc(row: pd.Series[Any]) -> float:
""" Helper function for properly computing the absolute exclusive time delta even
when the function is new / deleted (in those cases, one of the exclusive times is
nan, which breaks the computation).
Expand Down Expand Up @@ -416,8 +417,8 @@ def _delta_exc(row: pd.Series) -> float:


def _map_similar_names(
strings_old: List[str], strings_new: List[str]
) -> Tuple[OldLocMap, NewLocMap]:
strings_old: list[str], strings_new: list[str]
) -> tuple[OldLocMap, NewLocMap]:
"""Map profile location names that might have slightly changed in different versions.
E.g., due to the names containing the version number (mylib-3.4 vs mylib-3.5).
Expand Down
Loading

0 comments on commit ba11799

Please sign in to comment.