From 2b82f1904a88ab4ad7f1fc8daf223505150ace16 Mon Sep 17 00:00:00 2001 From: Joel Tio Date: Thu, 21 May 2020 19:50:31 +0800 Subject: [PATCH 1/8] Remove trailing whitespace --- hardnet.py | 66 +++++++++++++++++++++++++----------------------------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/hardnet.py b/hardnet.py index cdb7a13..b51ca37 100644 --- a/hardnet.py +++ b/hardnet.py @@ -16,7 +16,7 @@ def __init__(self, in_channels, out_channels, kernel=1, stride=1, dropout=0.1, b super().__init__() self.add_module('layer1',ConvLayer(in_channels, out_channels, kernel)) self.add_module('layer2',DWConvLayer(out_channels, out_channels, stride=stride)) - + def forward(self, x): return super().forward(x) @@ -24,16 +24,16 @@ class DWConvLayer(nn.Sequential): def __init__(self, in_channels, out_channels, stride=1, bias=False): super().__init__() out_ch = out_channels - + groups = in_channels kernel = 3 #print(kernel, 'x', kernel, 'x', out_channels, 'x', out_channels, 'DepthWise') - + self.add_module('dwconv', nn.Conv2d(groups, groups, kernel_size=3, stride=stride, padding=1, groups=groups, bias=bias)) self.add_module('norm', nn.BatchNorm2d(groups)) def forward(self, x): - return super().forward(x) + return super().forward(x) class ConvLayer(nn.Sequential): def __init__(self, in_channels, out_channels, kernel=3, stride=1, dropout=0.1, bias=False): @@ -41,10 +41,10 @@ def __init__(self, in_channels, out_channels, kernel=3, stride=1, dropout=0.1, b out_ch = out_channels groups = 1 #print(kernel, 'x', kernel, 'x', in_channels, 'x', out_channels) - self.add_module('conv', nn.Conv2d(in_channels, out_ch, kernel_size=kernel, + self.add_module('conv', nn.Conv2d(in_channels, out_ch, kernel_size=kernel, stride=stride, padding=kernel//2, groups=groups, bias=bias)) self.add_module('norm', nn.BatchNorm2d(out_ch)) - self.add_module('relu', nn.ReLU6(True)) + self.add_module('relu', nn.ReLU6(True)) def forward(self, x): return super().forward(x) @@ -86,27 +86,27 @@ def __init__(self, in_channels, growth_rate, grmul, n_layers, keepBase=False, re layers_.append(CombConvLayer(inch, outch)) else: layers_.append(ConvLayer(inch, outch)) - + if (i % 2 == 0) or (i == n_layers - 1): self.out_channels += outch #print("Blk out =",self.out_channels) self.layers = nn.ModuleList(layers_) - + def forward(self, x): layers_ = [x] - + for layer in range(len(self.layers)): link = self.links[layer] tin = [] for i in link: tin.append(layers_[i]) - if len(tin) > 1: + if len(tin) > 1: x = torch.cat(tin, 1) else: x = tin[0] out = self.layers[layer](x) layers_.append(out) - + t = len(layers_) out_ = [] for i in range(t): @@ -115,10 +115,10 @@ def forward(self, x): out_.append(layers_[i]) out = torch.cat(out_, 1) return out - - - - + + + + class HarDNet(nn.Module): def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): super().__init__() @@ -127,13 +127,13 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): max_pool = True grmul = 1.7 drop_rate = 0.1 - + #HarDNet68 ch_list = [ 128, 256, 320, 640, 1024] gr = [ 14, 16, 20, 40,160] n_layers = [ 8, 16, 16, 16, 4] downSamp = [ 1, 0, 1, 1, 0] - + if arch==85: #HarDNet85 first_ch = [48, 96] @@ -150,12 +150,12 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): gr = [ 16, 20, 64, 160] n_layers = [ 4, 16, 8, 4] downSamp = [ 1, 1, 1, 0] - + if depth_wise: second_kernel = 1 max_pool = False drop_rate = 0.05 - + blks = len(n_layers) self.base = nn.ModuleList([]) @@ -163,10 +163,10 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): self.base.append ( ConvLayer(in_channels=3, out_channels=first_ch[0], kernel=3, stride=2, bias=False) ) - + # Second Layer self.base.append ( ConvLayer(first_ch[0], first_ch[1], kernel=second_kernel) ) - + # Maxpooling or DWConv3x3 downsampling if max_pool: self.base.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) @@ -179,10 +179,10 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): blk = HarDBlock(ch, gr[i], grmul, n_layers[i], dwconv=depth_wise) ch = blk.get_out_ch() self.base.append ( blk ) - + if i == blks-1 and arch == 85: self.base.append ( nn.Dropout(0.1)) - + self.base.append ( ConvLayer(ch, ch_list[i], kernel=1) ) ch = ch_list[i] if downSamp[i] == 1: @@ -190,8 +190,8 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): self.base.append(nn.MaxPool2d(kernel_size=2, stride=2)) else: self.base.append ( DWConvLayer(ch, ch, stride=2) ) - - + + ch = ch_list[blks-1] self.base.append ( nn.Sequential( @@ -199,12 +199,12 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): Flatten(), nn.Dropout(drop_rate), nn.Linear(ch, 1000) )) - + #print(self.base) - + if pretrained: if hasattr(torch, 'hub'): - + if arch == 68 and not depth_wise: checkpoint = 'https://ping-chao.com/hardnet/hardnet68-5d684880.pth' elif arch == 85 and not depth_wise: @@ -217,21 +217,17 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): self.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False)) else: postfix = 'ds' if depth_wise else '' - weight_file = '%shardnet%d%s.pth'%(weight_path, arch, postfix) + weight_file = '%shardnet%d%s.pth'%(weight_path, arch, postfix) if not os.path.isfile(weight_file): print(weight_file,'is not found') exit(0) weights = torch.load(weight_file) self.load_state_dict(weights) - + postfix = 'DS' if depth_wise else '' print('ImageNet pretrained weights for HarDNet%d%s is loaded'%(arch, postfix)) - + def forward(self, x): for layer in self.base: x = layer(x) return x - - - - From 988535c60aef77394483526ffd793ddb1bc33181 Mon Sep 17 00:00:00 2001 From: Joel Tio Date: Thu, 21 May 2020 19:53:21 +0800 Subject: [PATCH 2/8] Fix indentation to always be 4 spaces --- hardnet.py | 134 ++++++++++++++++++++++++++--------------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/hardnet.py b/hardnet.py index b51ca37..e33446b 100644 --- a/hardnet.py +++ b/hardnet.py @@ -52,21 +52,21 @@ def forward(self, x): class HarDBlock(nn.Module): def get_link(self, layer, base_ch, growth_rate, grmul): if layer == 0: - return base_ch, 0, [] + return base_ch, 0, [] out_channels = growth_rate link = [] for i in range(10): - dv = 2 ** i - if layer % dv == 0: - k = layer - dv - link.append(k) - if i > 0: - out_channels *= grmul + dv = 2 ** i + if layer % dv == 0: + k = layer - dv + link.append(k) + if i > 0: + out_channels *= grmul out_channels = int(int(out_channels + 1) / 2) * 2 in_channels = 0 for i in link: - ch,_,_ = self.get_link(i, base_ch, growth_rate, grmul) - in_channels += ch + ch,_,_ = self.get_link(i, base_ch, growth_rate, grmul) + in_channels += ch return out_channels, in_channels, link def get_out_ch(self): @@ -79,16 +79,16 @@ def __init__(self, in_channels, growth_rate, grmul, n_layers, keepBase=False, re layers_ = [] self.out_channels = 0 # if upsample else in_channels for i in range(n_layers): - outch, inch, link = self.get_link(i+1, in_channels, growth_rate, grmul) - self.links.append(link) - use_relu = residual_out - if dwconv: - layers_.append(CombConvLayer(inch, outch)) - else: - layers_.append(ConvLayer(inch, outch)) - - if (i % 2 == 0) or (i == n_layers - 1): - self.out_channels += outch + outch, inch, link = self.get_link(i+1, in_channels, growth_rate, grmul) + self.links.append(link) + use_relu = residual_out + if dwconv: + layers_.append(CombConvLayer(inch, outch)) + else: + layers_.append(ConvLayer(inch, outch)) + + if (i % 2 == 0) or (i == n_layers - 1): + self.out_channels += outch #print("Blk out =",self.out_channels) self.layers = nn.ModuleList(layers_) @@ -110,9 +110,9 @@ def forward(self, x): t = len(layers_) out_ = [] for i in range(t): - if (i == 0 and self.keepBase) or \ - (i == t-1) or (i%2 == 1): - out_.append(layers_[i]) + if (i == 0 and self.keepBase) or \ + (i == t-1) or (i%2 == 1): + out_.append(layers_[i]) out = torch.cat(out_, 1) return out @@ -135,26 +135,26 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): downSamp = [ 1, 0, 1, 1, 0] if arch==85: - #HarDNet85 - first_ch = [48, 96] - ch_list = [ 192, 256, 320, 480, 720, 1280] - gr = [ 24, 24, 28, 36, 48, 256] - n_layers = [ 8, 16, 16, 16, 16, 4] - downSamp = [ 1, 0, 1, 0, 1, 0] - drop_rate = 0.2 + #HarDNet85 + first_ch = [48, 96] + ch_list = [ 192, 256, 320, 480, 720, 1280] + gr = [ 24, 24, 28, 36, 48, 256] + n_layers = [ 8, 16, 16, 16, 16, 4] + downSamp = [ 1, 0, 1, 0, 1, 0] + drop_rate = 0.2 elif arch==39: - #HarDNet39 - first_ch = [24, 48] - ch_list = [ 96, 320, 640, 1024] - grmul = 1.6 - gr = [ 16, 20, 64, 160] - n_layers = [ 4, 16, 8, 4] - downSamp = [ 1, 1, 1, 0] + #HarDNet39 + first_ch = [24, 48] + ch_list = [ 96, 320, 640, 1024] + grmul = 1.6 + gr = [ 16, 20, 64, 160] + n_layers = [ 4, 16, 8, 4] + downSamp = [ 1, 1, 1, 0] if depth_wise: - second_kernel = 1 - max_pool = False - drop_rate = 0.05 + second_kernel = 1 + max_pool = False + drop_rate = 0.05 blks = len(n_layers) self.base = nn.ModuleList([]) @@ -169,9 +169,9 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): # Maxpooling or DWConv3x3 downsampling if max_pool: - self.base.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + self.base.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) else: - self.base.append ( DWConvLayer(first_ch[1], first_ch[1], stride=2) ) + self.base.append ( DWConvLayer(first_ch[1], first_ch[1], stride=2) ) # Build all HarDNet blocks ch = first_ch[1] @@ -186,10 +186,10 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): self.base.append ( ConvLayer(ch, ch_list[i], kernel=1) ) ch = ch_list[i] if downSamp[i] == 1: - if max_pool: - self.base.append(nn.MaxPool2d(kernel_size=2, stride=2)) - else: - self.base.append ( DWConvLayer(ch, ch, stride=2) ) + if max_pool: + self.base.append(nn.MaxPool2d(kernel_size=2, stride=2)) + else: + self.base.append ( DWConvLayer(ch, ch, stride=2) ) ch = ch_list[blks-1] @@ -203,31 +203,31 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): #print(self.base) if pretrained: - if hasattr(torch, 'hub'): - - if arch == 68 and not depth_wise: - checkpoint = 'https://ping-chao.com/hardnet/hardnet68-5d684880.pth' - elif arch == 85 and not depth_wise: - checkpoint = 'https://ping-chao.com/hardnet/hardnet85-a28faa00.pth' - elif arch == 68 and depth_wise: - checkpoint = 'https://ping-chao.com/hardnet/hardnet68ds-632474d2.pth' + if hasattr(torch, 'hub'): + + if arch == 68 and not depth_wise: + checkpoint = 'https://ping-chao.com/hardnet/hardnet68-5d684880.pth' + elif arch == 85 and not depth_wise: + checkpoint = 'https://ping-chao.com/hardnet/hardnet85-a28faa00.pth' + elif arch == 68 and depth_wise: + checkpoint = 'https://ping-chao.com/hardnet/hardnet68ds-632474d2.pth' + else: + checkpoint = 'https://ping-chao.com/hardnet/hardnet39ds-0e6c6fa9.pth' + + self.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False)) else: - checkpoint = 'https://ping-chao.com/hardnet/hardnet39ds-0e6c6fa9.pth' - - self.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False)) - else: - postfix = 'ds' if depth_wise else '' - weight_file = '%shardnet%d%s.pth'%(weight_path, arch, postfix) - if not os.path.isfile(weight_file): - print(weight_file,'is not found') - exit(0) - weights = torch.load(weight_file) - self.load_state_dict(weights) + postfix = 'ds' if depth_wise else '' + weight_file = '%shardnet%d%s.pth'%(weight_path, arch, postfix) + if not os.path.isfile(weight_file): + print(weight_file,'is not found') + exit(0) + weights = torch.load(weight_file) + self.load_state_dict(weights) - postfix = 'DS' if depth_wise else '' - print('ImageNet pretrained weights for HarDNet%d%s is loaded'%(arch, postfix)) + postfix = 'DS' if depth_wise else '' + print('ImageNet pretrained weights for HarDNet%d%s is loaded'%(arch, postfix)) def forward(self, x): for layer in self.base: - x = layer(x) + x = layer(x) return x From 2a837b4177da9a2b85b560a316111b42ca4ae4a7 Mon Sep 17 00:00:00 2001 From: Joel Tio Date: Thu, 21 May 2020 19:55:23 +0800 Subject: [PATCH 3/8] Remove whitespace before and after method parentheses --- hardnet.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/hardnet.py b/hardnet.py index e33446b..190fb5a 100644 --- a/hardnet.py +++ b/hardnet.py @@ -160,40 +160,40 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): self.base = nn.ModuleList([]) # First Layer: Standard Conv3x3, Stride=2 - self.base.append ( + self.base.append( ConvLayer(in_channels=3, out_channels=first_ch[0], kernel=3, - stride=2, bias=False) ) + stride=2, bias=False)) # Second Layer - self.base.append ( ConvLayer(first_ch[0], first_ch[1], kernel=second_kernel) ) + self.base.append(ConvLayer(first_ch[0], first_ch[1], kernel=second_kernel)) # Maxpooling or DWConv3x3 downsampling if max_pool: self.base.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) else: - self.base.append ( DWConvLayer(first_ch[1], first_ch[1], stride=2) ) + self.base.append(DWConvLayer(first_ch[1], first_ch[1], stride=2)) # Build all HarDNet blocks ch = first_ch[1] for i in range(blks): blk = HarDBlock(ch, gr[i], grmul, n_layers[i], dwconv=depth_wise) ch = blk.get_out_ch() - self.base.append ( blk ) + self.base.append(blk) if i == blks-1 and arch == 85: - self.base.append ( nn.Dropout(0.1)) + self.base.append(nn.Dropout(0.1)) - self.base.append ( ConvLayer(ch, ch_list[i], kernel=1) ) + self.base.append(ConvLayer(ch, ch_list[i], kernel=1)) ch = ch_list[i] if downSamp[i] == 1: if max_pool: self.base.append(nn.MaxPool2d(kernel_size=2, stride=2)) else: - self.base.append ( DWConvLayer(ch, ch, stride=2) ) + self.base.append(DWConvLayer(ch, ch, stride=2)) ch = ch_list[blks-1] - self.base.append ( + self.base.append( nn.Sequential( nn.AdaptiveAvgPool2d((1,1)), Flatten(), From 1c30dcbf15cbef25c0f5280885f3a9e2c74b355c Mon Sep 17 00:00:00 2001 From: Joel Tio Date: Thu, 21 May 2020 21:42:57 +0800 Subject: [PATCH 4/8] Align and reorder hardnet variables --- hardnet.py | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/hardnet.py b/hardnet.py index 190fb5a..d280815 100644 --- a/hardnet.py +++ b/hardnet.py @@ -122,34 +122,35 @@ def forward(self, x): class HarDNet(nn.Module): def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): super().__init__() - first_ch = [32, 64] - second_kernel = 3 - max_pool = True + + # HarDNet68 + first_ch = [32, 64] + ch_list = [128, 256, 320, 640, 1024] grmul = 1.7 + gr = [ 14, 16, 20, 40, 160] + n_layers = [ 8, 16, 16, 16, 4] + downSamp = [ 1, 0, 1, 1, 0] drop_rate = 0.1 - #HarDNet68 - ch_list = [ 128, 256, 320, 640, 1024] - gr = [ 14, 16, 20, 40,160] - n_layers = [ 8, 16, 16, 16, 4] - downSamp = [ 1, 0, 1, 1, 0] - - if arch==85: - #HarDNet85 - first_ch = [48, 96] - ch_list = [ 192, 256, 320, 480, 720, 1280] - gr = [ 24, 24, 28, 36, 48, 256] - n_layers = [ 8, 16, 16, 16, 16, 4] - downSamp = [ 1, 0, 1, 0, 1, 0] + if arch == 85: + # HarDNet85 + first_ch = [48, 96] + ch_list = [192, 256, 320, 480, 720, 1280] + gr = [ 24, 24, 28, 36, 48, 256] + n_layers = [ 8, 16, 16, 16, 16, 4] + downSamp = [ 1, 0, 1, 0, 1, 0] drop_rate = 0.2 - elif arch==39: - #HarDNet39 - first_ch = [24, 48] - ch_list = [ 96, 320, 640, 1024] + elif arch == 39: + # HarDNet39 + first_ch = [24, 48] + ch_list = [96, 320, 640, 1024] grmul = 1.6 - gr = [ 16, 20, 64, 160] - n_layers = [ 4, 16, 8, 4] - downSamp = [ 1, 1, 1, 0] + gr = [16, 20, 64, 160] + n_layers = [ 4, 16, 8, 4] + downSamp = [ 1, 1, 1, 0] + + second_kernel = 3 + max_pool = True if depth_wise: second_kernel = 1 From 70ba50d514147673f8aa9c8c51374b7a450585c0 Mon Sep 17 00:00:00 2001 From: Joel Tio Date: Thu, 21 May 2020 21:51:36 +0800 Subject: [PATCH 5/8] Replace exit() with FileNotFoundError --- hardnet.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hardnet.py b/hardnet.py index d280815..dd29880 100644 --- a/hardnet.py +++ b/hardnet.py @@ -1,4 +1,5 @@ import os +import errno import torch import torch.nn as nn import torch.nn.functional as F @@ -220,8 +221,8 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): postfix = 'ds' if depth_wise else '' weight_file = '%shardnet%d%s.pth'%(weight_path, arch, postfix) if not os.path.isfile(weight_file): - print(weight_file,'is not found') - exit(0) + raise FileNotFoundError( + errno.ENOENT, os.strerror(errno.ENOENT), weight_file) weights = torch.load(weight_file) self.load_state_dict(weights) From 000bf77ff925c9a3ce56c54b35d4ce11e35e733e Mon Sep 17 00:00:00 2001 From: Joel Tio Date: Thu, 21 May 2020 22:05:33 +0800 Subject: [PATCH 6/8] Add error handling if architecture is not supported --- hardnet.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/hardnet.py b/hardnet.py index dd29880..4a70997 100644 --- a/hardnet.py +++ b/hardnet.py @@ -124,19 +124,20 @@ class HarDNet(nn.Module): def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): super().__init__() - # HarDNet68 - first_ch = [32, 64] - ch_list = [128, 256, 320, 640, 1024] - grmul = 1.7 - gr = [ 14, 16, 20, 40, 160] - n_layers = [ 8, 16, 16, 16, 4] - downSamp = [ 1, 0, 1, 1, 0] - drop_rate = 0.1 - - if arch == 85: + if arch == 68: + # HarDNet68 + first_ch = [32, 64] + ch_list = [128, 256, 320, 640, 1024] + grmul = 1.7 + gr = [ 14, 16, 20, 40, 160] + n_layers = [ 8, 16, 16, 16, 4] + downSamp = [ 1, 0, 1, 1, 0] + drop_rate = 0.1 + elif arch == 85: # HarDNet85 first_ch = [48, 96] ch_list = [192, 256, 320, 480, 720, 1280] + grmul = 1.7 gr = [ 24, 24, 28, 36, 48, 256] n_layers = [ 8, 16, 16, 16, 16, 4] downSamp = [ 1, 0, 1, 0, 1, 0] @@ -149,6 +150,8 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): gr = [16, 20, 64, 160] n_layers = [ 4, 16, 8, 4] downSamp = [ 1, 1, 1, 0] + else: + raise ValueError("Architecture type %s is not supported" % arch) second_kernel = 3 max_pool = True From b76ca86a399c35f04f18cf30f16f282d6fa32835 Mon Sep 17 00:00:00 2001 From: Joel Tio Date: Thu, 21 May 2020 22:21:58 +0800 Subject: [PATCH 7/8] Refactor choosing PyTorch hub url from if statements to dictionary --- hardnet.py | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/hardnet.py b/hardnet.py index 4a70997..66d2634 100644 --- a/hardnet.py +++ b/hardnet.py @@ -170,7 +170,7 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): stride=2, bias=False)) # Second Layer - self.base.append(ConvLayer(first_ch[0], first_ch[1], kernel=second_kernel)) + self.base.append(ConvLayer(first_ch[0], first_ch[1], kernel=second_kernel)) # Maxpooling or DWConv3x3 downsampling if max_pool: @@ -208,29 +208,31 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): #print(self.base) if pretrained: - if hasattr(torch, 'hub'): - - if arch == 68 and not depth_wise: - checkpoint = 'https://ping-chao.com/hardnet/hardnet68-5d684880.pth' - elif arch == 85 and not depth_wise: - checkpoint = 'https://ping-chao.com/hardnet/hardnet85-a28faa00.pth' - elif arch == 68 and depth_wise: - checkpoint = 'https://ping-chao.com/hardnet/hardnet68ds-632474d2.pth' - else: - checkpoint = 'https://ping-chao.com/hardnet/hardnet39ds-0e6c6fa9.pth' + # Represent the architecture with a single string + arch_codename = "HarDNet%d" % (arch) + if depth_wise: + arch_codename += "DS" + if hasattr(torch, 'hub'): + checkpoint_urls = { + "HarDNet39_DS": 'https://ping-chao.com/hardnet/hardnet39ds-0e6c6fa9.pth', + "HarDNet68": 'https://ping-chao.com/hardnet/hardnet68-5d684880.pth', + "HarDNet68DS": 'https://ping-chao.com/hardnet/hardnet68ds-632474d2.pth', + "HarDNet85": 'https://ping-chao.com/hardnet/hardnet85-a28faa00.pth', + } + + checkpoint = checkpoint_urls[arch_codename] self.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False)) else: - postfix = 'ds' if depth_wise else '' - weight_file = '%shardnet%d%s.pth'%(weight_path, arch, postfix) + weight_file = '%s%s.pth' % (weight_path, arch_codename.lower()) if not os.path.isfile(weight_file): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), weight_file) + weights = torch.load(weight_file) self.load_state_dict(weights) - postfix = 'DS' if depth_wise else '' - print('ImageNet pretrained weights for HarDNet%d%s is loaded'%(arch, postfix)) + print('ImageNet pretrained weights for %s is loaded' % arch_codename) def forward(self, x): for layer in self.base: From 980121264803f925a8e04fae09d4ce207594f404 Mon Sep 17 00:00:00 2001 From: Joel Tio Date: Thu, 21 May 2020 22:56:26 +0800 Subject: [PATCH 8/8] Refactor convolution sequential layers to functions --- hardnet.py | 74 +++++++++++++++++++++++------------------------------- 1 file changed, 31 insertions(+), 43 deletions(-) diff --git a/hardnet.py b/hardnet.py index 66d2634..4888333 100644 --- a/hardnet.py +++ b/hardnet.py @@ -3,51 +3,39 @@ import torch import torch.nn as nn import torch.nn.functional as F +from collections import OrderedDict class Flatten(nn.Module): - def __init__(self): - super().__init__() def forward(self, x): - return x.view(x.data.size(0),-1) - + return x.view(x.size(0), -1) -class CombConvLayer(nn.Sequential): - def __init__(self, in_channels, out_channels, kernel=1, stride=1, dropout=0.1, bias=False): - super().__init__() - self.add_module('layer1',ConvLayer(in_channels, out_channels, kernel)) - self.add_module('layer2',DWConvLayer(out_channels, out_channels, stride=stride)) +def conv_layer(in_channels, out_channels, kernel=3, stride=1, dropout=0.1, bias=False): + groups = 1 + #print(kernel, 'x', kernel, 'x', in_channels, 'x', out_channels) + return nn.Sequential(OrderedDict([ + ('conv', nn.Conv2d(in_channels, out_channels, kernel_size=kernel, + stride=stride, padding=kernel//2, groups=groups, bias=bias)), + ('norm', nn.BatchNorm2d(out_channels)), + ('relu', nn.ReLU6(inplace=True)), + ])) - def forward(self, x): - return super().forward(x) -class DWConvLayer(nn.Sequential): - def __init__(self, in_channels, out_channels, stride=1, bias=False): - super().__init__() - out_ch = out_channels +def dw_conv_layer(in_channels, out_channels, stride=1, bias=False): + groups = in_channels - groups = in_channels - kernel = 3 - #print(kernel, 'x', kernel, 'x', out_channels, 'x', out_channels, 'DepthWise') + return nn.Sequential(OrderedDict([ + ('dwconv', nn.Conv2d(groups, groups, kernel_size=3, + stride=stride, padding=1, groups=groups, bias=bias)), + ('norm', nn.BatchNorm2d(groups)), + ])) - self.add_module('dwconv', nn.Conv2d(groups, groups, kernel_size=3, - stride=stride, padding=1, groups=groups, bias=bias)) - self.add_module('norm', nn.BatchNorm2d(groups)) - def forward(self, x): - return super().forward(x) -class ConvLayer(nn.Sequential): - def __init__(self, in_channels, out_channels, kernel=3, stride=1, dropout=0.1, bias=False): - super().__init__() - out_ch = out_channels - groups = 1 - #print(kernel, 'x', kernel, 'x', in_channels, 'x', out_channels) - self.add_module('conv', nn.Conv2d(in_channels, out_ch, kernel_size=kernel, - stride=stride, padding=kernel//2, groups=groups, bias=bias)) - self.add_module('norm', nn.BatchNorm2d(out_ch)) - self.add_module('relu', nn.ReLU6(True)) - def forward(self, x): - return super().forward(x) +def comb_conv_layer(in_channels, out_channels, kernel=1, stride=1, dropout=0.1, bias=False): + return nn.Sequential(OrderedDict([ + ('layer1', conv_layer(in_channels, out_channels, kernel)), + ('layer2', dw_conv_layer(out_channels, out_channels, stride=stride)) + ])) class HarDBlock(nn.Module): @@ -84,9 +72,9 @@ def __init__(self, in_channels, growth_rate, grmul, n_layers, keepBase=False, re self.links.append(link) use_relu = residual_out if dwconv: - layers_.append(CombConvLayer(inch, outch)) + layers_.append(comb_conv_layer(inch, outch)) else: - layers_.append(ConvLayer(inch, outch)) + layers_.append(conv_layer(inch, outch)) if (i % 2 == 0) or (i == n_layers - 1): self.out_channels += outch @@ -166,17 +154,17 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): # First Layer: Standard Conv3x3, Stride=2 self.base.append( - ConvLayer(in_channels=3, out_channels=first_ch[0], kernel=3, - stride=2, bias=False)) + conv_layer(in_channels=3, out_channels=first_ch[0], kernel=3, + stride=2, bias=False)) # Second Layer - self.base.append(ConvLayer(first_ch[0], first_ch[1], kernel=second_kernel)) + self.base.append(conv_layer(first_ch[0], first_ch[1], kernel=second_kernel)) # Maxpooling or DWConv3x3 downsampling if max_pool: self.base.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) else: - self.base.append(DWConvLayer(first_ch[1], first_ch[1], stride=2)) + self.base.append(dw_conv_layer(first_ch[1], first_ch[1], stride=2)) # Build all HarDNet blocks ch = first_ch[1] @@ -188,13 +176,13 @@ def __init__(self, depth_wise=False, arch=85, pretrained=True, weight_path=''): if i == blks-1 and arch == 85: self.base.append(nn.Dropout(0.1)) - self.base.append(ConvLayer(ch, ch_list[i], kernel=1)) + self.base.append(conv_layer(ch, ch_list[i], kernel=1)) ch = ch_list[i] if downSamp[i] == 1: if max_pool: self.base.append(nn.MaxPool2d(kernel_size=2, stride=2)) else: - self.base.append(DWConvLayer(ch, ch, stride=2)) + self.base.append(dw_conv_layer(ch, ch, stride=2)) ch = ch_list[blks-1]