我尝试同时训练8个具有相同结构的CNN模型。在批量训练模型后,我需要同步其他7个模型中特征提取层的权重。
这是模型:
class GNet(nn.Module):
def __init__(self, dim_output, dropout=0.5):
super(GNet, self).__init__()
self.out_dim = dim_output
# Load the pretrained AlexNet model
alexnet = models.alexnet(pretrained=True)
self.pre_filtering = nn.Sequential(
alexnet.features[:4]
)
# Set requires_grad to False for all parameters in the pre_filtering network
for param in self.pre_filtering.parameters():
param.requires_grad = False
# construct the feature extractor
# every intermediate feature will be fed to the feature extractor
# res: 25 x 25
self.feat_ex1 = nn.Conv2d(192, 128, kernel_size=3, stride=1)
# res: 25 x 25
self.feat_ex2 = nn.Sequential(
nn.BatchNorm2d(128),
nn.Dropout(p=dropout),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
)
# res: 25 x 25
self.feat_ex3 = nn.Sequential(
nn.BatchNorm2d(128),
nn.Dropout(p=dropout),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
)
# res: 13 x 13
self.feat_ex4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.Dropout(p=dropout),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
)
# res: 13 x 13
self.feat_ex5 = nn.Sequential(
nn.BatchNorm2d(128),
nn.Dropout(p=dropout),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
)
# res: 13 x 13
self.feat_ex6 = nn.Sequential(
nn.BatchNorm2d(128),
nn.Dropout(p=dropout),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
)
# res: 13 x 13
self.feat_ex7 = nn.Sequential(
nn.BatchNorm2d(128),
nn.Dropout(p=dropout),
nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1)
)
# define the flexible pooling field of each layer
# use a full convolution layer here to perform flexible pooling
self.fpf13 = nn.Conv2d(in_channels=448, out_channels=448, kernel_size=13, groups=448)
self.fpf25 = nn.Conv2d(in_channels=384, out_channels=384, kernel_size=25, groups=384)
self.linears = {}
for i in range(self.out_dim):
self.linears[f'linear_{i+1}'] = nn.Linear(832, 1)
self.LogTanh = LogTanh()
self.flatten = nn.Flatten()
这是同步权重的函数:
def sync_weights(models, current_sub, sync_seqs):
for sub in range(1, 9):
if sub != current_sub:
# Synchronize the specified layers
with torch.no_grad():
for seq_name in sync_seqs:
reference_layer = getattr(models[current_sub], seq_name)[2]
layer = getattr(models[sub], seq_name)[2]
layer.weight.data = reference_layer.weight.data.clone()
if layer.bias is not None:
layer.bias.data = reference_layer.bias.data.clone()
则出现错误:
'Conv2d' object is not iterable
这意味着getattr()返回一个Conv2D对象。
但如果我删除[2]:
def sync_weights(models, current_sub, sync_seqs):
for sub in range(1, 9):
if sub != current_sub:
# Synchronize the specified layers
with torch.no_grad():
for seq_name in sync_seqs:
reference_layer = getattr(models[current_sub], seq_name)
layer = getattr(models[sub], seq_name)
layer.weight.data = reference_layer.weight.data.clone()
if layer.bias is not None:
layer.bias.data = reference_layer.bias.data.clone()
我得到另一个错误:
'Sequential' object has no attribute 'weight'
这意味着getattr()返回一个Sequential。但之前它会返回一个Conv2D对象。
有人知道这件事吗?
供您参考,sync_weights中传递的sync_seqs参数为:
sync_seqs = [
'feat_ex1',
'feat_ex2',
'feat_ex3',
'feat_ex4',
'feat_ex5',
'feat_ex6',
'feat_ex7'
]