Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
a583155
# First commit
Ronalmoo Oct 1, 2019
fcb8508
CNN Model
Ronalmoo Oct 2, 2019
3364dc6
add argparse
Ronalmoo Oct 2, 2019
dc55d07
Merge remote-tracking branch 'upstream/develop'
Ronalmoo Oct 3, 2019
ea7a5e8
repository hierarchy reconstructed
Ronalmoo Oct 3, 2019
8ecd211
Re-construct hierarchy (#11)
ssaru Oct 5, 2019
574e269
add ResNet50
Ronalmoo Oct 8, 2019
943e85f
Use GPU
Ronalmoo Oct 8, 2019
28a7fc1
test
Ronalmoo Oct 8, 2019
f2d8c74
minor fixed
Ronalmoo Oct 11, 2019
6de5887
add train_acc
Ronalmoo Oct 11, 2019
b3e85a5
add test.py
Ronalmoo Oct 11, 2019
6433bd1
add validation_set_loader
Ronalmoo Oct 12, 2019
afc72c9
validate
Ronalmoo Oct 12, 2019
5ce21d4
add confusion_matrix
Ronalmoo Oct 14, 2019
00f6e5e
add validation
Ronalmoo Oct 14, 2019
933f67c
minor change
Ronalmoo Oct 14, 2019
702eaa8
minor change
Ronalmoo Oct 14, 2019
693c81d
split data into train, validation, test
Ronalmoo Oct 14, 2019
4539c07
remove metrics.py
Ronalmoo Oct 24, 2019
c9a39af
Merge pull request #35 from chromatices/develop
chromatices Oct 25, 2019
0c22a0f
Merge branch 'master' of https://github.com/DeepBaksuVision/BinaryCon…
Ronalmoo Oct 26, 2019
f119606
Merge branch 'develop' of https://github.com/DeepBaksuVision/BinaryCo…
Ronalmoo Oct 26, 2019
3f24d24
add aug module with albumentations
Ronalmoo Oct 26, 2019
e60db94
Merge remote-tracking branch 'origin/master'
Ronalmoo Oct 26, 2019
6832bd9
fix dataloader
Ronalmoo Oct 26, 2019
83b87c9
변경 사항에 대한 커밋 메시지를 입력하십시오. '#' 문자로 시작하는
Ronalmoo Oct 26, 2019
0ebaacd
delete train
Ronalmoo Oct 26, 2019
fb7e9ac
delete validate
Ronalmoo Oct 26, 2019
b7df5c0
delete resnet
Ronalmoo Oct 26, 2019
53433a5
add BinaryLinear
Ronalmoo Oct 26, 2019
46330c8
add binarized_conv
Ronalmoo Oct 26, 2019
4e0eed0
add binarized_linear
Ronalmoo Oct 26, 2019
d4fa978
add aug module with albumentation
Ronalmoo Oct 26, 2019
3a24004
add aug module with albumentations
Ronalmoo Oct 26, 2019
4b50002
Merge remote-tracking branch 'upstream/develop'
Ronalmoo Oct 28, 2019
142794d
change configurations
Ronalmoo Oct 29, 2019
5035938
resnet with pytorch_lightning
Ronalmoo Oct 30, 2019
6715801
Merge branch 'develop' of https://github.com/DeepBaksuVision/BinaryCo…
Ronalmoo Nov 19, 2019
6235784
ResNet with pytorch lightning
Ronalmoo Nov 19, 2019
f4ff94d
Revert "change configurations"
Ronalmoo Nov 19, 2019
6550c5a
Delete augmetations
Ronalmoo Nov 19, 2019
c47bf11
fixed typo
Ronalmoo Nov 19, 2019
05e54fb
fixed typo
Ronalmoo Nov 19, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion dataloader/data_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import torchvision
import torchvision.transforms as transforms


def data_loader(dataset="CIFAR-10", batch_size = 16):

if dataset == "CIFAR-10":
Expand All @@ -21,4 +22,4 @@ def data_loader(dataset="CIFAR-10", batch_size = 16):
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=2)
classes = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

CIFAR-10인데 classes = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")은 무엇일까요?


return train_loader, test_loader, classes
return train_loader, test_loader, classes
10 changes: 5 additions & 5 deletions layers/BinaryLinear.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

class BinaryLinear(torch.nn.Linear):

def __init__(self, in_features, out_features, bias=True, mode="Stocastic"):
def __init__(self, in_features, out_features, bias=True, mode="Stochastic"):
super().__init__(in_features, out_features, bias)
self.mode = mode
self.bin_weight = self.weight_binarization(self.weight, self.mode)
Expand All @@ -17,8 +17,8 @@ def forward(self, input: torch.Tensor) -> torch.Tensor:

def weight_binarization(self, weight: torch.tensor, mode:str):
with torch.set_grad_enabled(False):
if mode == "Stocastic":
bin_weight = self.stocastic(weight)
if mode == "Stochastic":
bin_weight = self.stochastic(weight)
elif mode == "Deterministic":
bin_weight = self.deterministic(weight)
else:
Expand All @@ -32,7 +32,7 @@ def deterministic(weight: torch.tensor) -> torch.tensor:
return weight.sign()

@staticmethod
def stocastic(weight: torch.tensor) -> torch.tensor:
def stochastic(weight: torch.tensor) -> torch.tensor:
p = torch.sigmoid(weight)
uniform_matrix = torch.empty(p.shape).uniform_(0,1)
bin_weight = (p >= uniform_matrix).type(torch.float32)
Expand All @@ -49,4 +49,4 @@ def clipping_weight(self, weight:torch.tensor) -> torch.tensor:
with torch.set_grad_enabled(False):
weight = torch.clamp(weight, -1, 1)
weight.requires_grad = True
return weight
return weight
2 changes: 1 addition & 1 deletion models/binarized_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,4 +114,4 @@ def test_dataloader(self):
trainer = Trainer(checkpoint_callback=checkpoint_callback,
max_nb_epochs=1, train_percent_check=0.1)
trainer.fit(model)
trainer.test(model)
trainer.test(model)
8 changes: 4 additions & 4 deletions models/binarized_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,16 +120,16 @@ def test_dataloader(self):
monitor='val_loss',
mode='min',
prefix='',
save_weights_only= True
save_weights_only=True
)

gpus = torch.cuda.device_count()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Binarized_MLP(device=device, mode="Stochastic")
model.to(device)
model.summary()

trainer = Trainer(checkpoint_callback=checkpoint_callback,
max_nb_epochs=5, train_percent_check=0.1)
trainer.fit(model)
trainer.test(model)
trainer.test(model)
149 changes: 149 additions & 0 deletions models/resnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
import os
import torch
import torch.nn as nn
import pytorch_lightning as pl
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer
from torchvision.datasets import CIFAR10, MNIST


def conv3x3(in_channels, out_channels, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=1, bias=False)
Comment on lines +12 to +14
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

그냥 호불호인데 이렇게까지 분리 안해도 될 거 같긴한데.. ㅎㅎ



class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
super(ResidualBlock, self).__init__()
self.conv1 = conv3x3(in_channels, out_channels, stride)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(out_channels, out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.downsample = downsample

def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out


class Bottleneck(nn.Module):
expansion = 4

def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)

self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)

def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out


class ResNet(pl.LightningModule):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64

self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)

def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)

def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out

def training_step(self, batch, batch_nb):
x, y = batch
y_hat = self.forward(x)
return {'loss': F.cross_entropy(y_hat, y)}

def validation_step(self, batch, batch_nb):
x, y = batch
y_hat = self.forward(x)
return {'val_loss': F.cross_entropy(y_hat, y)}

def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
return {'avg_val_loss': avg_loss}

def test_step(self, batch, batch_nb):
x, y = batch
y_hat = self.forward(x)
return {'test_loss': F.cross_entropy(y_hat, y)}

def test_end(self, outputs):
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
return {'avg_test_loss': avg_loss}

def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)

@pl.data_loader
def train_dataloader(self):
return DataLoader(CIFAR10(os.getcwd(), train=True, transform=transforms.ToTensor(), download=True), batch_size=128)

@pl.data_loader
def val_dataloader(self):
return DataLoader(CIFAR10(os.getcwd(), train=True, transform=transforms.ToTensor(), download=True), batch_size=32)

@pl.data_loader
def test_dataloader(self):
return DataLoader(CIFAR10(os.getcwd(), train=False, download=True), batch_size=32)


def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])


if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = ResNet50()
trainer = Trainer()
trainer.fit(model)
75 changes: 0 additions & 75 deletions train.py

This file was deleted.