diff --git a/captum/_utils/av.py b/captum/_utils/av.py index 2ab4ae268..97329a8b1 100644 --- a/captum/_utils/av.py +++ b/captum/_utils/av.py @@ -351,8 +351,7 @@ def _compute_and_save_activations( inputs: Union[Tensor, Tuple[Tensor, ...]], identifier: str, num_id: str, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, load_from_disk: bool = True, ) -> None: r""" diff --git a/captum/_utils/common.py b/captum/_utils/common.py index 12784bfaf..6459cd8aa 100644 --- a/captum/_utils/common.py +++ b/captum/_utils/common.py @@ -273,9 +273,25 @@ def _format_float_or_tensor_into_tuples( return inputs +@overload +def _format_additional_forward_args( + # pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter. + additional_forward_args: Union[Tensor, Tuple] + # pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter. +) -> Tuple: ... + + +@overload +def _format_additional_forward_args( # type: ignore + additional_forward_args: Optional[object], + # pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter. +) -> Union[None, Tuple]: ... + + def _format_additional_forward_args( additional_forward_args: Optional[object], -) -> Union[None, Tuple[object, ...]]: + # pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter. +) -> Union[None, Tuple]: if additional_forward_args is not None and not isinstance( additional_forward_args, tuple ): @@ -284,8 +300,8 @@ def _format_additional_forward_args( def _expand_additional_forward_args( - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any, + # pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter. + additional_forward_args: Union[None, Tuple], n_steps: int, expansion_type: ExpansionTypes = ExpansionTypes.repeat, # pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter. @@ -557,8 +573,7 @@ def _run_forward( # pyre-fixme[2]: Parameter annotation cannot be `Any`. inputs: Any, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, ) -> Union[Tensor, Future[Tensor]]: forward_func_args = signature(forward_func).parameters if len(forward_func_args) == 0: diff --git a/captum/_utils/gradient.py b/captum/_utils/gradient.py index 2dab8154d..69502b744 100644 --- a/captum/_utils/gradient.py +++ b/captum/_utils/gradient.py @@ -104,8 +104,7 @@ def compute_gradients( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], target_ind: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, ) -> Tuple[Tensor, ...]: r""" Computes gradients of the output with respect to inputs for an @@ -175,8 +174,7 @@ def _forward_layer_eval( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], layer: List[Module], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, device_ids: Union[None, List[int]] = None, attribute_to_layer_input: bool = False, grad_enabled: bool = False, @@ -191,8 +189,7 @@ def _forward_layer_eval( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], layer: Module, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, device_ids: Union[None, List[int]] = None, attribute_to_layer_input: bool = False, grad_enabled: bool = False, @@ -204,7 +201,7 @@ def _forward_layer_eval( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], layer: ModuleOrModuleList, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, device_ids: Union[None, List[int]] = None, attribute_to_layer_input: bool = False, grad_enabled: bool = False, @@ -233,8 +230,7 @@ def _forward_layer_distributed_eval( inputs: Any, layer: ModuleOrModuleList, target_ind: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_layer_input: bool = False, forward_hook_with_return: Literal[False] = False, require_layer_grads: bool = False, @@ -250,7 +246,7 @@ def _forward_layer_distributed_eval( inputs: Any, layer: ModuleOrModuleList, target_ind: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_layer_input: bool = False, *, forward_hook_with_return: Literal[True], @@ -264,7 +260,7 @@ def _forward_layer_distributed_eval( inputs: Any, layer: ModuleOrModuleList, target_ind: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_layer_input: bool = False, forward_hook_with_return: bool = False, require_layer_grads: bool = False, @@ -427,8 +423,7 @@ def _forward_layer_eval_with_neuron_grads( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], layer: Module, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, *, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], @@ -446,7 +441,7 @@ def _forward_layer_eval_with_neuron_grads( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], layer: List[Module], - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, gradient_neuron_selector: None = None, grad_enabled: bool = False, device_ids: Union[None, List[int]] = None, @@ -462,7 +457,7 @@ def _forward_layer_eval_with_neuron_grads( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], layer: Module, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, gradient_neuron_selector: None = None, grad_enabled: bool = False, device_ids: Union[None, List[int]] = None, @@ -475,7 +470,7 @@ def _forward_layer_eval_with_neuron_grads( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], layer: ModuleOrModuleList, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. gradient_neuron_selector: Union[ None, int, Tuple[Union[int, slice], ...], Callable @@ -549,8 +544,7 @@ def compute_layer_gradients_and_eval( layer: Module, inputs: Union[Tensor, Tuple[Tensor, ...]], target_ind: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, *, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], @@ -571,7 +565,7 @@ def compute_layer_gradients_and_eval( layer: List[Module], inputs: Union[Tensor, Tuple[Tensor, ...]], target_ind: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, gradient_neuron_selector: None = None, device_ids: Union[None, List[int]] = None, attribute_to_layer_input: bool = False, @@ -590,7 +584,7 @@ def compute_layer_gradients_and_eval( layer: Module, inputs: Union[Tensor, Tuple[Tensor, ...]], target_ind: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, gradient_neuron_selector: None = None, device_ids: Union[None, List[int]] = None, attribute_to_layer_input: bool = False, @@ -606,7 +600,7 @@ def compute_layer_gradients_and_eval( layer: ModuleOrModuleList, inputs: Union[Tensor, Tuple[Tensor, ...]], target_ind: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. gradient_neuron_selector: Union[ None, int, Tuple[Union[int, slice], ...], Callable @@ -792,8 +786,7 @@ def grad_fn( forward_fn: Callable, inputs: TensorOrTupleOfTensorsGeneric, target_ind: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, ) -> Tuple[Tensor, ...]: _, grads = _forward_layer_eval_with_neuron_grads( forward_fn, diff --git a/captum/attr/_core/deep_lift.py b/captum/attr/_core/deep_lift.py index 8bd9dbbfe..f909bf79c 100644 --- a/captum/attr/_core/deep_lift.py +++ b/captum/attr/_core/deep_lift.py @@ -3,7 +3,7 @@ # pyre-strict import typing import warnings -from typing import Callable, cast, Dict, List, Literal, Tuple, Type, Union +from typing import Callable, cast, Dict, List, Literal, Optional, Tuple, Type, Union import torch import torch.nn as nn @@ -117,7 +117,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, *, return_convergence_delta: Literal[True], custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, @@ -129,7 +129,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, return_convergence_delta: Literal[False] = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> TensorOrTupleOfTensorsGeneric: ... @@ -140,7 +140,7 @@ def attribute( # type: ignore inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, return_convergence_delta: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> Union[ @@ -370,7 +370,7 @@ def _construct_forward_func( forward_func: Callable[..., Tensor], inputs: Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]], target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, ) -> Callable[[], Tensor]: def forward_fn() -> Tensor: model_out = cast( @@ -604,7 +604,7 @@ def attribute( TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, *, return_convergence_delta: Literal[True], custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, @@ -618,7 +618,7 @@ def attribute( TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, return_convergence_delta: Literal[False] = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> TensorOrTupleOfTensorsGeneric: ... @@ -631,7 +631,7 @@ def attribute( # type: ignore TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, return_convergence_delta: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> Union[ @@ -840,7 +840,7 @@ def _expand_inputs_baselines_targets( baselines: Tuple[Tensor, ...], inputs: Tuple[Tensor, ...], target: TargetType, - additional_forward_args: object, + additional_forward_args: Optional[Tuple[object, ...]], ) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], TargetType, object]: inp_bsz = inputs[0].shape[0] base_bsz = baselines[0].shape[0] diff --git a/captum/attr/_core/feature_ablation.py b/captum/attr/_core/feature_ablation.py index abdb7e53f..03e66ed3e 100644 --- a/captum/attr/_core/feature_ablation.py +++ b/captum/attr/_core/feature_ablation.py @@ -75,7 +75,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None, perturbations_per_eval: int = 1, show_progress: bool = False, @@ -408,7 +408,7 @@ def attribute_future( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None, perturbations_per_eval: int = 1, show_progress: bool = False, @@ -655,7 +655,7 @@ def _ith_input_ablation_generator( self, i: int, inputs: TensorOrTupleOfTensorsGeneric, - additional_args: object, + additional_args: Optional[Tuple[object, ...]], target: TargetType, baselines: BaselineType, input_mask: Union[None, Tensor, Tuple[Tensor, ...]], diff --git a/captum/attr/_core/feature_permutation.py b/captum/attr/_core/feature_permutation.py index 19287b6ec..79c519602 100644 --- a/captum/attr/_core/feature_permutation.py +++ b/captum/attr/_core/feature_permutation.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Tuple, Union +from typing import Any, Callable, Optional, Tuple, Union import torch from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric @@ -99,7 +99,7 @@ def attribute( # type: ignore self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, perturbations_per_eval: int = 1, show_progress: bool = False, @@ -280,7 +280,7 @@ def attribute_future( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, perturbations_per_eval: int = 1, show_progress: bool = False, diff --git a/captum/attr/_core/gradient_shap.py b/captum/attr/_core/gradient_shap.py index 51328c7cd..9cf9e85a4 100644 --- a/captum/attr/_core/gradient_shap.py +++ b/captum/attr/_core/gradient_shap.py @@ -2,7 +2,7 @@ # pyre-strict import typing -from typing import Callable, Literal, Tuple, Union +from typing import Callable, Literal, Optional, Tuple, Union import numpy as np import torch @@ -91,7 +91,7 @@ def attribute( n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, *, return_convergence_delta: Literal[True], ) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]: ... @@ -106,7 +106,7 @@ def attribute( n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: Literal[False] = False, ) -> TensorOrTupleOfTensorsGeneric: ... @@ -122,7 +122,7 @@ def attribute( n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: bool = False, ) -> Union[ TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor] @@ -336,7 +336,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, *, return_convergence_delta: Literal[True], ) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]: ... @@ -347,7 +347,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: Literal[False] = False, ) -> TensorOrTupleOfTensorsGeneric: ... @@ -357,7 +357,7 @@ def attribute( # type: ignore inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: bool = False, ) -> Union[ TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor] diff --git a/captum/attr/_core/guided_backprop_deconvnet.py b/captum/attr/_core/guided_backprop_deconvnet.py index 5dc444e45..35b7f1993 100644 --- a/captum/attr/_core/guided_backprop_deconvnet.py +++ b/captum/attr/_core/guided_backprop_deconvnet.py @@ -2,7 +2,7 @@ # pyre-strict import warnings -from typing import Callable, List, Tuple, Union +from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn.functional as F @@ -45,7 +45,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, ) -> TensorOrTupleOfTensorsGeneric: r""" Computes attribution by overriding relu gradients. Based on constructor @@ -146,7 +146,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, ) -> TensorOrTupleOfTensorsGeneric: r""" Args: @@ -255,7 +255,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, ) -> TensorOrTupleOfTensorsGeneric: r""" Args: diff --git a/captum/attr/_core/guided_grad_cam.py b/captum/attr/_core/guided_grad_cam.py index d4d197849..9f89f387d 100644 --- a/captum/attr/_core/guided_grad_cam.py +++ b/captum/attr/_core/guided_grad_cam.py @@ -2,7 +2,7 @@ # pyre-strict import warnings -from typing import List, Union +from typing import List, Optional, Union import torch from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple @@ -72,7 +72,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, interpolate_mode: str = "nearest", attribute_to_layer_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: diff --git a/captum/attr/_core/input_x_gradient.py b/captum/attr/_core/input_x_gradient.py index bfaa75def..8686a0557 100644 --- a/captum/attr/_core/input_x_gradient.py +++ b/captum/attr/_core/input_x_gradient.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Callable +from typing import Callable, Optional from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple from captum._utils.gradient import ( @@ -35,7 +35,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, ) -> TensorOrTupleOfTensorsGeneric: r""" Args: diff --git a/captum/attr/_core/integrated_gradients.py b/captum/attr/_core/integrated_gradients.py index 1abbcc69f..825c2cae6 100644 --- a/captum/attr/_core/integrated_gradients.py +++ b/captum/attr/_core/integrated_gradients.py @@ -2,7 +2,7 @@ # pyre-strict import typing -from typing import Any, Callable, List, Literal, Tuple, Union +from typing import Callable, List, Literal, Optional, Tuple, Union import torch from captum._utils.common import ( @@ -79,7 +79,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, @@ -95,8 +95,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, @@ -109,7 +108,7 @@ def attribute( # type: ignore inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, @@ -328,7 +327,7 @@ def _attribute( inputs: Tuple[Tensor, ...], baselines: Tuple[Union[Tensor, int, float], ...], target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", step_sizes_and_alphas: Union[None, Tuple[List[float], List[float]]] = None, diff --git a/captum/attr/_core/kernel_shap.py b/captum/attr/_core/kernel_shap.py index 89d22990d..6fdbfcb9b 100644 --- a/captum/attr/_core/kernel_shap.py +++ b/captum/attr/_core/kernel_shap.py @@ -2,7 +2,7 @@ # pyre-strict -from typing import Callable, cast, Generator, Tuple, Union +from typing import Callable, cast, Generator, Optional, Tuple, Union import torch from captum._utils.models.linear_model import SkLearnLinearRegression @@ -49,7 +49,7 @@ def attribute( # type: ignore inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None, n_samples: int = 25, perturbations_per_eval: int = 1, diff --git a/captum/attr/_core/layer/grad_cam.py b/captum/attr/_core/layer/grad_cam.py index 01c14a405..eed639760 100644 --- a/captum/attr/_core/layer/grad_cam.py +++ b/captum/attr/_core/layer/grad_cam.py @@ -82,8 +82,7 @@ def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_layer_input: bool = False, relu_attributions: bool = False, attr_dim_summation: bool = True, diff --git a/captum/attr/_core/layer/internal_influence.py b/captum/attr/_core/layer/internal_influence.py index 548d8d228..47b69ffb2 100644 --- a/captum/attr/_core/layer/internal_influence.py +++ b/captum/attr/_core/layer/internal_influence.py @@ -72,8 +72,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, @@ -254,8 +253,7 @@ def _attribute( inputs: Tuple[Tensor, ...], baselines: Tuple[Union[Tensor, int, float], ...], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", attribute_to_layer_input: bool = False, diff --git a/captum/attr/_core/layer/layer_activation.py b/captum/attr/_core/layer/layer_activation.py index 9ef5c3866..076323a27 100644 --- a/captum/attr/_core/layer/layer_activation.py +++ b/captum/attr/_core/layer/layer_activation.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, cast, List, Tuple, Union +from typing import Callable, cast, List, Optional, Tuple, Union import torch from captum._utils.common import _format_output @@ -51,8 +51,7 @@ def __init__( def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_layer_input: bool = False, ) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]: r""" diff --git a/captum/attr/_core/layer/layer_conductance.py b/captum/attr/_core/layer/layer_conductance.py index 54ec6fdb2..1f1a5f467 100644 --- a/captum/attr/_core/layer/layer_conductance.py +++ b/captum/attr/_core/layer/layer_conductance.py @@ -80,8 +80,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, @@ -99,7 +98,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, @@ -118,7 +117,7 @@ def attribute( None, int, float, Tensor, Tuple[Union[int, float, Tensor], ...] ] = None, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, @@ -333,8 +332,7 @@ def _attribute( inputs: Tuple[Tensor, ...], baselines: Tuple[Union[Tensor, int, float], ...], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", attribute_to_layer_input: bool = False, diff --git a/captum/attr/_core/layer/layer_deep_lift.py b/captum/attr/_core/layer/layer_deep_lift.py index 85d81cd5e..a126971cf 100644 --- a/captum/attr/_core/layer/layer_deep_lift.py +++ b/captum/attr/_core/layer/layer_deep_lift.py @@ -101,8 +101,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, *, return_convergence_delta: Literal[True], attribute_to_layer_input: bool = False, @@ -116,8 +115,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: Literal[False] = False, attribute_to_layer_input: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, @@ -132,7 +130,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: bool = False, attribute_to_layer_input: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, @@ -445,8 +443,7 @@ def attribute( Tensor, Tuple[Tensor, ...], Callable[..., Union[Tensor, Tuple[Tensor, ...]]] ], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, *, return_convergence_delta: Literal[True], attribute_to_layer_input: bool = False, @@ -463,8 +460,7 @@ def attribute( Tensor, Tuple[Tensor, ...], Callable[..., Union[Tensor, Tuple[Tensor, ...]]] ], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, return_convergence_delta: Literal[False] = False, attribute_to_layer_input: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, @@ -480,7 +476,7 @@ def attribute( Tensor, Tuple[Tensor, ...], Callable[..., Union[Tensor, Tuple[Tensor, ...]]] ], target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, return_convergence_delta: bool = False, attribute_to_layer_input: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, diff --git a/captum/attr/_core/layer/layer_feature_ablation.py b/captum/attr/_core/layer/layer_feature_ablation.py index 35233cbc8..c0297d954 100644 --- a/captum/attr/_core/layer/layer_feature_ablation.py +++ b/captum/attr/_core/layer/layer_feature_ablation.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, cast, Dict, List, Tuple, Type, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union import torch from captum._utils.common import ( @@ -69,7 +69,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], layer_baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, layer_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None, attribute_to_layer_input: bool = False, perturbations_per_eval: int = 1, diff --git a/captum/attr/_core/layer/layer_feature_permutation.py b/captum/attr/_core/layer/layer_feature_permutation.py index aee338fe6..8db7b965d 100644 --- a/captum/attr/_core/layer/layer_feature_permutation.py +++ b/captum/attr/_core/layer/layer_feature_permutation.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, cast, Dict, List, Tuple, Type, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union import torch from captum._utils.common import ( @@ -62,7 +62,7 @@ def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, layer_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, perturbations_per_eval: int = 1, ) -> Union[Tensor, Tuple[Tensor, ...]]: diff --git a/captum/attr/_core/layer/layer_gradient_shap.py b/captum/attr/_core/layer/layer_gradient_shap.py index dcfe109fa..c9987eb00 100644 --- a/captum/attr/_core/layer/layer_gradient_shap.py +++ b/captum/attr/_core/layer/layer_gradient_shap.py @@ -114,8 +114,7 @@ def attribute( n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, *, return_convergence_delta: Literal[True], attribute_to_layer_input: bool = False, @@ -132,7 +131,7 @@ def attribute( n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: Literal[False] = False, attribute_to_layer_input: bool = False, ) -> Union[Tensor, Tuple[Tensor, ...]]: ... @@ -146,7 +145,7 @@ def attribute( n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: bool = False, attribute_to_layer_input: bool = False, ) -> Union[ @@ -392,8 +391,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, *, return_convergence_delta: Literal[True], attribute_to_layer_input: bool = False, @@ -406,8 +404,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: Literal[False] = False, attribute_to_layer_input: bool = False, grad_kwargs: Optional[Dict[str, Any]] = None, @@ -419,7 +416,7 @@ def attribute( # type: ignore inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: bool = False, attribute_to_layer_input: bool = False, grad_kwargs: Optional[Dict[str, Any]] = None, diff --git a/captum/attr/_core/layer/layer_gradient_x_activation.py b/captum/attr/_core/layer/layer_gradient_x_activation.py index ebbb83655..c828a262e 100644 --- a/captum/attr/_core/layer/layer_gradient_x_activation.py +++ b/captum/attr/_core/layer/layer_gradient_x_activation.py @@ -77,8 +77,7 @@ def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_layer_input: bool = False, grad_kwargs: Optional[Dict[str, Any]] = None, ) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]: diff --git a/captum/attr/_core/layer/layer_integrated_gradients.py b/captum/attr/_core/layer/layer_integrated_gradients.py index 5c56e858d..01067ca0d 100644 --- a/captum/attr/_core/layer/layer_integrated_gradients.py +++ b/captum/attr/_core/layer/layer_integrated_gradients.py @@ -3,7 +3,7 @@ # pyre-strict import functools import warnings -from typing import Any, Callable, cast, List, Literal, overload, Tuple, Union +from typing import Callable, cast, List, Literal, Optional, overload, Tuple, Union import torch from captum._utils.common import ( @@ -120,8 +120,7 @@ def _gradient_func( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], target_ind: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, ) -> Tuple[Tensor, ...]: if self.device_ids is None or len(self.device_ids) == 0: scattered_inputs = (inputs,) @@ -232,8 +231,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType, target: TargetType, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any, + additional_forward_args: Optional[object], n_steps: int, method: str, internal_batch_size: Union[None, int], @@ -247,8 +245,7 @@ def attribute( # type: ignore inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType, target: TargetType, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any, + additional_forward_args: Optional[object], n_steps: int, method: str, internal_batch_size: Union[None, int], @@ -267,7 +264,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, @@ -289,7 +286,7 @@ def attribute( inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, diff --git a/captum/attr/_core/layer/layer_lrp.py b/captum/attr/_core/layer/layer_lrp.py index cd774d855..ba6a73d70 100644 --- a/captum/attr/_core/layer/layer_lrp.py +++ b/captum/attr/_core/layer/layer_lrp.py @@ -2,7 +2,7 @@ # pyre-strict import typing -from typing import Any, cast, List, Literal, Tuple, Union +from typing import Any, cast, List, Literal, Optional, Tuple, Union from captum._utils.common import ( _format_tensor_into_tuples, @@ -67,8 +67,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, *, return_convergence_delta: Literal[True], attribute_to_layer_input: bool = False, @@ -83,8 +82,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: Literal[False] = False, attribute_to_layer_input: bool = False, verbose: bool = False, @@ -94,7 +92,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: bool = False, attribute_to_layer_input: bool = False, verbose: bool = False, diff --git a/captum/attr/_core/lime.py b/captum/attr/_core/lime.py index 152bae2c3..8b5a6f86b 100644 --- a/captum/attr/_core/lime.py +++ b/captum/attr/_core/lime.py @@ -244,7 +244,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, n_samples: int = 50, perturbations_per_eval: int = 1, show_progress: bool = False, @@ -876,7 +876,7 @@ def attribute( # type: ignore inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None, n_samples: int = 25, perturbations_per_eval: int = 1, @@ -1121,7 +1121,7 @@ def _attribute_kwargs( # type: ignore inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None, n_samples: int = 25, perturbations_per_eval: int = 1, diff --git a/captum/attr/_core/lrp.py b/captum/attr/_core/lrp.py index d08b7b4de..c2c0dac74 100644 --- a/captum/attr/_core/lrp.py +++ b/captum/attr/_core/lrp.py @@ -4,7 +4,7 @@ import typing from collections import defaultdict -from typing import Any, Callable, cast, Dict, List, Literal, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Literal, Optional, Tuple, Union import torch.nn as nn from captum._utils.common import ( @@ -72,7 +72,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, *, return_convergence_delta: Literal[True], verbose: bool = False, @@ -83,7 +83,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: Literal[False] = False, verbose: bool = False, ) -> TensorOrTupleOfTensorsGeneric: ... @@ -95,7 +95,7 @@ def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, return_convergence_delta: bool = False, verbose: bool = False, ) -> Union[ @@ -367,7 +367,7 @@ def _compute_output_and_change_weights( self, inputs: Tuple[Tensor, ...], target: TargetType, - additional_forward_args: object, + additional_forward_args: Optional[object], ) -> Tensor: try: self._register_weight_hooks() diff --git a/captum/attr/_core/neuron/neuron_conductance.py b/captum/attr/_core/neuron/neuron_conductance.py index fcb8bafb5..359736d0c 100644 --- a/captum/attr/_core/neuron/neuron_conductance.py +++ b/captum/attr/_core/neuron/neuron_conductance.py @@ -98,8 +98,7 @@ def attribute( neuron_selector: Union[int, Tuple[int, ...], Callable], baselines: BaselineType = None, target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "riemann_trapezoid", internal_batch_size: Union[None, int] = None, @@ -339,8 +338,7 @@ def _attribute( neuron_selector: Union[int, Tuple[int, ...], Callable], baselines: Tuple[Union[Tensor, int, float], ...], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "riemann_trapezoid", attribute_to_neuron_input: bool = False, diff --git a/captum/attr/_core/neuron/neuron_deep_lift.py b/captum/attr/_core/neuron/neuron_deep_lift.py index da7008372..f4648b43b 100644 --- a/captum/attr/_core/neuron/neuron_deep_lift.py +++ b/captum/attr/_core/neuron/neuron_deep_lift.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, cast, Tuple, Union +from typing import Callable, cast, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric @@ -82,8 +82,7 @@ def attribute( # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], baselines: BaselineType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> TensorOrTupleOfTensorsGeneric: @@ -315,8 +314,7 @@ def attribute( baselines: Union[ TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> TensorOrTupleOfTensorsGeneric: diff --git a/captum/attr/_core/neuron/neuron_feature_ablation.py b/captum/attr/_core/neuron/neuron_feature_ablation.py index dfdd9833e..c72cf806a 100644 --- a/captum/attr/_core/neuron/neuron_feature_ablation.py +++ b/captum/attr/_core/neuron/neuron_feature_ablation.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, cast, List, Tuple, Union +from typing import Any, Callable, cast, List, Optional, Tuple, Union import torch from captum._utils.common import _verify_select_neuron @@ -64,8 +64,7 @@ def attribute( # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], baselines: BaselineType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, attribute_to_neuron_input: bool = False, perturbations_per_eval: int = 1, diff --git a/captum/attr/_core/neuron/neuron_gradient.py b/captum/attr/_core/neuron/neuron_gradient.py index fef1f2c19..0e74382d3 100644 --- a/captum/attr/_core/neuron/neuron_gradient.py +++ b/captum/attr/_core/neuron/neuron_gradient.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, List, Tuple, Union +from typing import Callable, List, Optional, Tuple, Union from captum._utils.common import ( _format_additional_forward_args, @@ -62,8 +62,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: r""" diff --git a/captum/attr/_core/neuron/neuron_gradient_shap.py b/captum/attr/_core/neuron/neuron_gradient_shap.py index 897dea197..18b650723 100644 --- a/captum/attr/_core/neuron/neuron_gradient_shap.py +++ b/captum/attr/_core/neuron/neuron_gradient_shap.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, List, Tuple, Union +from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn from captum._utils.typing import TensorOrTupleOfTensorsGeneric @@ -104,8 +104,7 @@ def attribute( ], n_samples: int = 5, stdevs: float = 0.0, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: r""" diff --git a/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.py b/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.py index d95edf2e3..03f3e1418 100644 --- a/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.py +++ b/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, List, Tuple, Union +from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn from captum._utils.typing import TensorOrTupleOfTensorsGeneric @@ -62,8 +62,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: r""" @@ -218,8 +217,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: r""" diff --git a/captum/attr/_core/neuron/neuron_integrated_gradients.py b/captum/attr/_core/neuron/neuron_integrated_gradients.py index 3bef9caa0..8e56221d7 100644 --- a/captum/attr/_core/neuron/neuron_integrated_gradients.py +++ b/captum/attr/_core/neuron/neuron_integrated_gradients.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, List, Tuple, Union +from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn from captum._utils.typing import TensorOrTupleOfTensorsGeneric @@ -79,8 +79,7 @@ def attribute( # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Union[None, int] = None, diff --git a/captum/attr/_core/occlusion.py b/captum/attr/_core/occlusion.py index 62ac38e84..8361d1058 100644 --- a/captum/attr/_core/occlusion.py +++ b/captum/attr/_core/occlusion.py @@ -57,7 +57,7 @@ def attribute( # type: ignore ] = None, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, perturbations_per_eval: int = 1, show_progress: bool = False, ) -> TensorOrTupleOfTensorsGeneric: diff --git a/captum/attr/_core/saliency.py b/captum/attr/_core/saliency.py index 8698099db..f4dce70cd 100644 --- a/captum/attr/_core/saliency.py +++ b/captum/attr/_core/saliency.py @@ -2,7 +2,7 @@ # pyre-strict -from typing import Callable +from typing import Callable, Optional import torch from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple @@ -41,7 +41,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, target: TargetType = None, abs: bool = True, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, ) -> TensorOrTupleOfTensorsGeneric: r""" Args: diff --git a/captum/attr/_core/shapley_value.py b/captum/attr/_core/shapley_value.py index 8f8d79137..83f1811ae 100644 --- a/captum/attr/_core/shapley_value.py +++ b/captum/attr/_core/shapley_value.py @@ -5,7 +5,7 @@ import itertools import math import warnings -from typing import Callable, cast, Iterable, Sequence, Tuple, Union +from typing import Callable, cast, Iterable, Optional, Sequence, Tuple, Union import torch from captum._utils.common import ( @@ -108,7 +108,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, n_samples: int = 25, perturbations_per_eval: int = 1, @@ -464,7 +464,7 @@ def attribute_future(self) -> Callable: def _perturbation_generator( self, inputs: Tuple[Tensor, ...], - additional_args: object, + additional_args: Optional[Tuple[object, ...]], target: TargetType, baselines: Tuple[Tensor, ...], input_masks: TensorOrTupleOfTensorsGeneric, @@ -627,7 +627,7 @@ def attribute( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, perturbations_per_eval: int = 1, show_progress: bool = False, diff --git a/captum/attr/_utils/attribution.py b/captum/attr/_utils/attribution.py index 9cb9b297b..04f0b1d24 100644 --- a/captum/attr/_utils/attribution.py +++ b/captum/attr/_utils/attribution.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, cast, Generic, List, Tuple, Type, Union +from typing import Callable, cast, Generic, List, Optional, Tuple, Type, Union import torch import torch.nn.functional as F @@ -209,8 +209,7 @@ def compute_convergence_delta( ], end_point: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, ) -> Tensor: r""" Here we provide a specific implementation for `compute_convergence_delta` diff --git a/captum/attr/_utils/batching.py b/captum/attr/_utils/batching.py index 96e1e20a4..f7bce61ec 100644 --- a/captum/attr/_utils/batching.py +++ b/captum/attr/_utils/batching.py @@ -3,7 +3,7 @@ # pyre-strict import typing import warnings -from typing import Any, Callable, Iterator, Tuple, Union +from typing import Any, Callable, Iterator, Optional, Tuple, Union import torch from captum._utils.common import ( @@ -139,8 +139,7 @@ def _tuple_splice_range( # pyre-fixme[3]: Return annotation cannot contain `Any`. def _batched_generator( inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, target_ind: TargetType = None, internal_batch_size: Union[None, int] = None, ) -> Iterator[Tuple[Tuple[Tensor, ...], Any, TargetType]]: @@ -181,6 +180,9 @@ def _batched_generator( ) # pyre-fixme[7]: Expected `Iterator[Tuple[typing.Tuple[Tensor, ...], typi... yield inputs_splice, _tuple_splice_range( + # pyre-fixme[6]: In call `_tuple_splice_range`, for 1st positional + # argument, expected `None` but got + # `Optional[typing.Tuple[typing.Any, ...]]` additional_forward_args, current_total, current_total + internal_batch_size, @@ -195,8 +197,7 @@ def _batched_generator( def _batched_operator( operator: Callable[..., TupleOrTensorOrBoolGeneric], inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, target_ind: TargetType = None, internal_batch_size: Union[None, int] = None, **kwargs: Any, diff --git a/captum/attr/_utils/common.py b/captum/attr/_utils/common.py index 09889cd52..2c03acc03 100644 --- a/captum/attr/_utils/common.py +++ b/captum/attr/_utils/common.py @@ -3,7 +3,7 @@ # pyre-strict import typing from inspect import signature -from typing import Any, Callable, List, Literal, Tuple, TYPE_CHECKING, Union +from typing import Callable, List, Literal, Optional, Tuple, TYPE_CHECKING, Union import torch from captum._utils.common import ( @@ -207,8 +207,7 @@ def _compute_conv_delta_and_format_attrs( attributions: Tuple[Tensor, ...], start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]], end_point: Union[Tensor, Tuple[Tensor, ...]], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any, + additional_forward_args: Optional[object], target: TargetType, is_inputs_tuple: Literal[True], ) -> Union[Tuple[Tensor, ...], Tuple[Tuple[Tensor, ...], Tensor]]: ... @@ -221,8 +220,7 @@ def _compute_conv_delta_and_format_attrs( attributions: Tuple[Tensor, ...], start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]], end_point: Union[Tensor, Tuple[Tensor, ...]], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any, + additional_forward_args: Optional[object], target: TargetType, is_inputs_tuple: Literal[False] = False, ) -> Union[Tensor, Tuple[Tensor, Tensor]]: ... @@ -235,7 +233,7 @@ def _compute_conv_delta_and_format_attrs( attributions: Tuple[Tensor, ...], start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]], end_point: Union[Tensor, Tuple[Tensor, ...]], - additional_forward_args: Any, + additional_forward_args: Optional[object], target: TargetType, is_inputs_tuple: bool = False, ) -> Union[ @@ -251,7 +249,7 @@ def _compute_conv_delta_and_format_attrs( attributions: Tuple[Tensor, ...], start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]], end_point: Union[Tensor, Tuple[Tensor, ...]], - additional_forward_args: Any, + additional_forward_args: Optional[object], target: TargetType, is_inputs_tuple: bool = False, ) -> Union[ diff --git a/captum/concept/_core/tcav.py b/captum/concept/_core/tcav.py index a81c50079..daea9f939 100644 --- a/captum/concept/_core/tcav.py +++ b/captum/concept/_core/tcav.py @@ -589,8 +589,7 @@ def interpret( inputs: TensorOrTupleOfTensorsGeneric, experimental_sets: List[List[Concept]], target: TargetType = None, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, processes: Optional[int] = None, **kwargs: Any, ) -> Dict[str, Dict[str, Dict[str, Tensor]]]: diff --git a/captum/metrics/_core/infidelity.py b/captum/metrics/_core/infidelity.py index 83e770592..c4c4bd061 100644 --- a/captum/metrics/_core/infidelity.py +++ b/captum/metrics/_core/infidelity.py @@ -147,7 +147,7 @@ def infidelity( inputs: TensorOrTupleOfTensorsGeneric, attributions: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, - additional_forward_args: object = None, + additional_forward_args: Optional[object] = None, target: TargetType = None, n_perturb_samples: int = 10, max_examples_per_batch: Optional[int] = None, @@ -571,7 +571,7 @@ def _make_next_infidelity_tensors_func( inputs: Tuple[Tensor, ...], baselines: BaselineTupleType, attributions: Tuple[Tensor, ...], - additional_forward_args: object = None, + additional_forward_args: Optional[Tuple[object, ...]] = None, target: TargetType = None, normalize: bool = False, ) -> Callable[[int], Union[Tuple[Tensor], Tuple[Tensor, Tensor, Tensor]]]: diff --git a/captum/robust/_core/fgsm.py b/captum/robust/_core/fgsm.py index e57deb0b5..af36e25ba 100644 --- a/captum/robust/_core/fgsm.py +++ b/captum/robust/_core/fgsm.py @@ -87,8 +87,7 @@ def perturb( epsilon: float, # pyre-fixme[2]: Parameter annotation cannot be `Any`. target: Any, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, targeted: bool = False, mask: Optional[TensorOrTupleOfTensorsGeneric] = None, ) -> TensorOrTupleOfTensorsGeneric: diff --git a/captum/robust/_core/metrics/attack_comparator.py b/captum/robust/_core/metrics/attack_comparator.py index 649fd6283..348e2d69e 100644 --- a/captum/robust/_core/metrics/attack_comparator.py +++ b/captum/robust/_core/metrics/attack_comparator.py @@ -259,8 +259,7 @@ def evaluate( self, # pyre-fixme[2]: Parameter annotation cannot be `Any`. inputs: Any, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, perturbations_per_eval: int = 1, # pyre-fixme[2]: Parameter must be annotated. **kwargs, diff --git a/captum/robust/_core/metrics/min_param_perturbation.py b/captum/robust/_core/metrics/min_param_perturbation.py index 1c2d32fae..afca08f1c 100644 --- a/captum/robust/_core/metrics/min_param_perturbation.py +++ b/captum/robust/_core/metrics/min_param_perturbation.py @@ -169,8 +169,7 @@ def _evaluate_batch( # pyre-fixme[24]: Generic type `list` expects 1 type parameter, use # `typing.List[]` to avoid runtime subscripting errors. input_list: List, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any, + additional_forward_args: Optional[Tuple[object, ...]], correct_fn_kwargs: Optional[Dict[str, Any]], target: TargetType, ) -> Optional[int]: @@ -239,8 +238,7 @@ def _linear_search( # pyre-fixme[2]: Parameter annotation cannot be `Any`. preproc_input: Any, attack_kwargs: Optional[Dict[str, Any]], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any, + additional_forward_args: Optional[Tuple[object, ...]], # pyre-fixme[2]: Parameter annotation cannot be `Any`. expanded_additional_args: Any, correct_fn_kwargs: Optional[Dict[str, Any]], @@ -301,8 +299,7 @@ def _binary_search( # pyre-fixme[2]: Parameter annotation cannot be `Any`. preproc_input: Any, attack_kwargs: Optional[Dict[str, Any]], - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any, + additional_forward_args: Optional[Tuple[object, ...]], # pyre-fixme[2]: Parameter annotation cannot be `Any`. expanded_additional_args: Any, correct_fn_kwargs: Optional[Dict[str, Any]], diff --git a/captum/robust/_core/pgd.py b/captum/robust/_core/pgd.py index 76d654011..cf49c26ae 100644 --- a/captum/robust/_core/pgd.py +++ b/captum/robust/_core/pgd.py @@ -80,8 +80,7 @@ def perturb( step_num: int, # pyre-fixme[2]: Parameter annotation cannot be `Any`. target: Any, - # pyre-fixme[2]: Parameter annotation cannot be `Any`. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, targeted: bool = False, random_start: bool = False, norm: str = "Linf", diff --git a/tests/attr/test_input_x_gradient.py b/tests/attr/test_input_x_gradient.py index f06bd01c7..68b442ac5 100644 --- a/tests/attr/test_input_x_gradient.py +++ b/tests/attr/test_input_x_gradient.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-unsafe -from typing import Any, cast +from typing import cast, Optional import torch from captum._utils.typing import TensorOrTupleOfTensorsGeneric @@ -63,7 +63,7 @@ def _input_x_gradient_base_assert( model: Module, inputs: TensorOrTupleOfTensorsGeneric, expected_grads: TensorOrTupleOfTensorsGeneric, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, nt_type: str = "vanilla", ) -> None: input_x_grad = InputXGradient(model) diff --git a/tests/attr/test_integrated_gradients_basic.py b/tests/attr/test_integrated_gradients_basic.py index 2846e119e..378f7eb49 100644 --- a/tests/attr/test_integrated_gradients_basic.py +++ b/tests/attr/test_integrated_gradients_basic.py @@ -3,7 +3,7 @@ # pyre-strict import unittest -from typing import Any, cast, Optional, Tuple, Union +from typing import cast, Optional, Tuple, Union import torch from captum._utils.common import _zeros @@ -394,9 +394,7 @@ def _compute_attribution_and_evaluate( inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: Union[None, int] = None, - # pyre-fixme[2]: Parameter `additional_forward_args` has type `None` - # but type`Any` is specified. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, type: str = "vanilla", approximation_method: str = "gausslegendre", multiply_by_inputs: bool = True, @@ -502,9 +500,7 @@ def _compute_attribution_batch_helper_evaluate( inputs: TensorOrTupleOfTensorsGeneric, baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None, target: Optional[int] = None, - # pyre-fixme[2]: Parameter `additional_forward_args` has type `None` but type - # `Any` is specified. - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, approximation_method: str = "gausslegendre", ) -> None: ig = IntegratedGradients(model) diff --git a/tests/attr/test_saliency.py b/tests/attr/test_saliency.py index 1ecf3872b..e6695eaec 100644 --- a/tests/attr/test_saliency.py +++ b/tests/attr/test_saliency.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-unsafe -from typing import Any, cast, Tuple, Union +from typing import cast, Optional, Tuple, Union import torch from captum._utils.typing import TensorOrTupleOfTensorsGeneric @@ -120,7 +120,7 @@ def _saliency_base_assert( model: Module, inputs: TensorOrTupleOfTensorsGeneric, expected: TensorOrTupleOfTensorsGeneric, - additional_forward_args: Any = None, + additional_forward_args: Optional[object] = None, nt_type: str = "vanilla", n_samples_batch_size=None, ) -> Union[Tensor, Tuple[Tensor, ...]]: diff --git a/tests/metrics/test_sensitivity.py b/tests/metrics/test_sensitivity.py index 9fafed5e7..14c04fba5 100644 --- a/tests/metrics/test_sensitivity.py +++ b/tests/metrics/test_sensitivity.py @@ -3,7 +3,7 @@ # pyre-strict import typing -from typing import Any, Callable, cast, List, Optional, Tuple, Union +from typing import Callable, cast, List, Optional, Tuple, Union import torch from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric @@ -293,9 +293,7 @@ def sensitivity_max_assert( max_examples_per_batch: Optional[int] = None, baselines: Optional[BaselineType] = None, target: Optional[TargetType] = None, - # pyre-fixme[2]: Parameter `additional_forward_args` has type `None` - # but type `Any` is specified. - additional_forward_args: Optional[Any] = None, + additional_forward_args: Optional[object] = None, ) -> Tensor: if baselines is None: sens = sensitivity_max(