forked from VincenzoDalia/AML_Project
-
Notifications
You must be signed in to change notification settings - Fork 0
/
parse_args.py
81 lines (71 loc) · 2.79 KB
/
parse_args.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
from argparse import ArgumentParser
def _clear_args(parsed_args):
parsed_args.experiment_args = eval(parsed_args.experiment_args)
parsed_args.dataset_args = eval(parsed_args.dataset_args)
return parsed_args
def parse_arguments():
parser = ArgumentParser()
parser.add_argument(
"--seed", type=int, default=0, help="Seed used for deterministic behavior"
)
parser.add_argument(
"--test_only", action="store_true", help="Whether to skip training"
)
parser.add_argument(
"--cpu", action="store_true", help="Whether to force the usage of CPU"
)
parser.add_argument("--experiment", type=str, default="baseline")
parser.add_argument("--experiment_name", type=str, default="baseline")
parser.add_argument("--experiment_args", type=str, default="{}")
parser.add_argument("--dataset_args", type=str, default="{}")
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--epochs", type=int, default=30)
parser.add_argument("--num_workers", type=int, default=5)
parser.add_argument("--grad_accum_steps", type=int, default=1)
parser.add_argument(
"--topK",
action="store_true",
help="Wheter to adapt activation map to output topKs",
)
parser.add_argument(
"--tk_treshold",
type=float,
default=1,
help="If topK is enabled, this controls K (how many elements will be retained in the activation map)",
)
parser.add_argument(
"--no_binarize",
action="store_true",
help="Wheter to keep activation mask as it is, without binarizing it",
)
parser.add_argument(
"--mask_ratio",
type=float,
default=1,
help="If the experiment is random_maps, this controls the ratio of 1s in the random mask",
)
parser.add_argument(
"--use_bernoulli",
action="store_true",
help="Wheter to shape the activation drawing from a mask_ratio bernoulli distribution while performing random maps",
)
parser.add_argument(
"--epsilon",
type=float,
default=0,
help="This sets epsilon instead of 0 while binarizing. Defaults to 0",
)
parser.add_argument(
"--layers",
nargs="+",
default=[],
help="""The layers after which to hook the activation shaping module.
Must be passed with this pattern: RESNET_LAYER.LEVEL.LAYER_NUM,
for example: 2.0.1 corresponds to layer2.0.conv1., 2.0.bm1 to layer2.0.bn1
Invalid layers are ignored.
To hook a relu layer, use the pattern : 2.0.r, for layer2.0.relu.
To hook the avgpool, use "avgpool".
To hook the first convolution, conv1, use : 1
""",
)
return _clear_args(parser.parse_args())