-
Notifications
You must be signed in to change notification settings - Fork 49
/
model.py
102 lines (87 loc) · 3.39 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
######################################################################
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_out')
init.constant(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal(m.weight.data, std=0.001)
init.constant(m.bias.data, 0.0)
class ft_net(nn.Module):
def __init__(self, class_num ):
super(ft_net,self).__init__()
model_ft = models.resnet50(pretrained=True)
# avg pooling to global pooling
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
num_ftrs = model_ft.fc.in_features # extract feature parameters of fully collected layers
add_block = []
num_bottleneck = 512
add_block += [nn.Linear(num_ftrs, num_bottleneck)] # add a linear layer, batchnorm layer, leakyrelu layer and dropout layer
add_block += [nn.BatchNorm1d(num_bottleneck)]
add_block += [nn.LeakyReLU(0.1)]
add_block += [nn.Dropout(p=0.5)] #default dropout rate 0.5
#transforms.CenterCrop(224),
add_block = nn.Sequential(*add_block)
add_block.apply(weights_init_kaiming)
model_ft.fc = add_block
self.model = model_ft
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)] # class_num classification
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.classifier = classifier
def forward(self, x):
x = self.model(x)
x = self.classifier(x)
return x
class ft_net_dense(nn.Module):
def __init__(self, class_num ):
super(ft_net_dense,self).__init__()
model_ft = models.densenet121(pretrained=True)
# add pooling to the model
# in the originial version, pooling is written in the forward function
model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1,1))
add_block = []
num_bottleneck = 512
add_block += [nn.Linear(1024, num_bottleneck)] #For ResNet, it is 2048
add_block += [nn.BatchNorm1d(num_bottleneck)]
add_block += [nn.LeakyReLU(0.1)]
add_block += [nn.Dropout(p=0.5)]
add_block = nn.Sequential(*add_block)
add_block.apply(weights_init_kaiming)
model_ft.fc = add_block
self.model = model_ft
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.classifier = classifier
def forward(self, x):
x = self.model.features(x)
x = x.view(x.size(0),-1)
x = self.model.fc(x)
x = self.classifier(x)
return x
# debug model structure
#net = ft_net(751)
#net = ft_net_dense(751)
#print(net)
'''
input = Variable(torch.FloatTensor(8, 3, 224, 224))
output = net(input)
print('net output size:')
#print(output.shape)
'''