forked from rosinality/stylegan2-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
apply_factor.py
executable file
·94 lines (81 loc) · 2.59 KB
/
apply_factor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import argparse
import torch
from torchvision import utils
from model import Generator
if __name__ == "__main__":
torch.set_grad_enabled(False)
parser = argparse.ArgumentParser(description="Apply closed form factorization")
parser.add_argument(
"-i", "--index", type=int, default=0, help="index of eigenvector"
)
parser.add_argument(
"-d",
"--degree",
type=float,
default=5,
help="scalar factors for moving latent vectors along eigenvector",
)
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help='channel multiplier factor. config-f = 2, else = 1',
)
parser.add_argument("--ckpt", type=str, required=True, help="stylegan2 checkpoints")
parser.add_argument(
"--size", type=int, default=256, help="output image size of the generator"
)
parser.add_argument(
"-n", "--n_sample", type=int, default=7, help="number of samples created"
)
parser.add_argument(
"--truncation", type=float, default=0.7, help="truncation factor"
)
parser.add_argument(
"--device", type=str, default="cuda", help="device to run the model"
)
parser.add_argument(
"--out_prefix",
type=str,
default="factor",
help="filename prefix to result samples",
)
parser.add_argument(
"factor",
type=str,
help="name of the closed form factorization result factor file",
)
args = parser.parse_args()
eigvec = torch.load(args.factor)["eigvec"].to(args.device)
ckpt = torch.load(args.ckpt)
g = Generator(args.size, 512, 8, channel_multiplier=args.channel_multiplier).to(args.device)
g.load_state_dict(ckpt["g_ema"], strict=False)
trunc = g.mean_latent(4096)
latent = torch.randn(args.n_sample, 512, device=args.device)
latent = g.get_latent(latent)
direction = args.degree * eigvec[:, args.index].unsqueeze(0)
img, _ = g(
[latent],
truncation=args.truncation,
truncation_latent=trunc,
input_is_latent=True,
)
img1, _ = g(
[latent + direction],
truncation=args.truncation,
truncation_latent=trunc,
input_is_latent=True,
)
img2, _ = g(
[latent - direction],
truncation=args.truncation,
truncation_latent=trunc,
input_is_latent=True,
)
grid = utils.save_image(
torch.cat([img1, img, img2], 0),
f"{args.out_prefix}_index-{args.index}_degree-{args.degree}.png",
normalize=True,
range=(-1, 1),
nrow=args.n_sample,
)