Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
Bohao-Lee authored Mar 6, 2021
1 parent e3a7c69 commit ff3b18f
Show file tree
Hide file tree
Showing 55 changed files with 6,354 additions and 1 deletion.
25 changes: 24 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,24 @@
# CME-Class-Margin-Equilibrium-for-Few-shot-Object-Detection-
# CME
Our code is based on https://github.com/bingykang/Fewshot_Detection and developed with Python 3.6.5 & PyTorch 1.1.0.
## Training our model on VOC
### Dataset preparation
If you want to train our model on VOC pascal dataset, please prepare dataset according to https://github.com/bingykang/Fewshot_Detection
### Training model on VOC split1
If you finish preparing dataset,
Please modify the dir in train_decoupling_disturbance.py and valid_decoupling.py
```
sys.path.append("Your Project dir")
```

After that,you can
```
bash train_model.sh
```
to train model and get corresponding results on split1.
If you want to train other split, you only need to change
```
SPLIT=(1)
```
to which split you want to train.

You can download the pretrained weight by [Google Drive](https://drive.google.com/file/d/1-0Q2EqYXXb0dDm1J0e4dgyXeoQAesibU/view?usp=sharing) or [BaiduYun](https://pan.baidu.com/s/1WUrF0-dMyaS3InObBQa5zw ) with code: **CVPR**
57 changes: 57 additions & 0 deletions layers/batchnorm/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
GPU=1
CUDNN=0

ARCH= -gencode arch=compute_50,code=[sm_50,compute_50] \
-gencode arch=compute_52,code=[sm_52,compute_52]

# This is what I use, uncomment if you know your arch and want to specify
# ARCH= -gencode arch=compute_52,code=compute_52
VPATH=./src/
OBJDIR=./obj/

CC=gcc
NVCC=nvcc
OPTS=-Ofast
LDFLAGS= -lm -pthread
COMMON=
CFLAGS=-Wall -Wfatal-errors

CFLAGS+=$(OPTS)

ifeq ($(GPU), 1)
COMMON+= -DGPU -I/usr/local/cuda/include/
CFLAGS+= -DGPU
LDFLAGS+= -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand
endif

ifeq ($(CUDNN), 1)
COMMON+= -DCUDNN
CFLAGS+= -DCUDNN
LDFLAGS+= -lcudnn
endif

OBJ=blas.o cuda.o
ifeq ($(GPU), 1)
LDFLAGS+= -lstdc++
OBJ+=blas_kernels.o
endif

OBJS = $(addprefix $(OBJDIR), $(OBJ))
DEPS = $(wildcard src/*.h) Makefile

all: obj $(OBJS)

$(OBJDIR)%.o: %.c $(DEPS)
$(CC) $(COMMON) $(CFLAGS) -fPIC -c $< -o $@

$(OBJDIR)%.o: %.cu $(DEPS)
$(NVCC) $(ARCH) $(COMMON) -Xcompiler -fPIC --compiler-options "$(CFLAGS)" -c $< -o $@

obj:
mkdir -p obj

.PHONY: clean

clean:
rm -rf $(OBJS) $(EXEC)

162 changes: 162 additions & 0 deletions layers/batchnorm/bn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch.autograd import Function
import bn_lib

class BN2dFunc(Function):
def __init__(self, running_mean, running_var, training, momentum, eps):
self.running_mean = running_mean
self.running_var = running_var
self.training = training
self.momentum = momentum
self.eps = eps

def forward(self, input, weight, bias):
nB = input.size(0)
nC = input.size(1)
nH = input.size(2)
nW = input.size(3)

output = input.new(nB, nC, nH, nW)
self.input = input
self.weight = weight
self.bias = bias
self.x = input.new(nB, nC, nH, nW)
self.x_norm = input.new(nB, nC, nH, nW)
self.mean = input.new(nB, nC)
self.var = input.new(nB, nC)

if input.is_cuda:
bn_lib.bn_forward_gpu(input, self.x, self.x_norm, self.mean, self.running_mean, self.var, self.running_var, weight, bias, self.training, output)
else:
bn_lib.bn_forward(input, self.x, self.x_norm, self.mean, self.running_mean, self.var, self.running_var, weight, bias, self.training, output)
return output

def backward(self, grad_output):
nB = grad_output.size(0)
nC = grad_output.size(1)
nH = grad_output.size(2)
nW = grad_output.size(3)
grad_input = grad_output.new(nB, nC, nH, nW)
grad_mean = grad_output.new(nC)
grad_var = grad_output.new(nC)
grad_weight = grad_output.new(nC)
grad_bias = grad_output.new(nC)

if grad_output.is_cuda:
bn_lib.bn_backward_gpu(grad_output, self.input, self.x_norm, self.mean, grad_mean, self.var, grad_var, self.weight, grad_weight, self.bias, grad_bias, self.training, grad_input)
else:
bn_lib.bn_backward(grad_output, self.input, self.x_norm, self.mean, grad_mean, self.var, grad_var, self.weight, grad_weight, self.bias, grad_bias, self.training, grad_input)

return grad_input, grad_weight, grad_bias

class BN2d(nn.Module):
def __init__(self, num_features, momentum=0.01, eps=1e-5):
super(BN2d, self).__init__()
self.num_features = num_features
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.momentum = momentum
self.eps = eps

self.running_mean.zero_()
self.running_var.fill_(1)
self.weight.data.uniform_()
self.bias.data.zero_()

def forward(self, input):
#print('------------ BN2d input -------------')
#print(input.data.storage()[0:10])
return BN2dFunc(self.running_mean, self.running_var, self.training, self.momentum, self.eps)(input, self.weight, self.bias)

class BN2d_slow(nn.Module):
def __init__(self, num_features, momentum=0.01):
super(BN2d_slow, self).__init__()
self.num_features = num_features
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.eps = 1e-5
self.momentum = momentum

self.running_mean.zero_()
self.running_var.fill_(1)
self.weight.data.uniform_()
self.bias.data.zero_()
def forward(self, x):
nB = x.data.size(0)
nC = x.data.size(1)
nH = x.data.size(2)
nW = x.data.size(3)
samples = nB*nH*nW
y = x.view(nB, nC, nH*nW).transpose(1,2).contiguous().view(-1,nC)
if self.training:
print('forward in training mode on autograd')
m = Variable(y.mean(0).data, requires_grad=False)
v = Variable(y.var(0).data, requires_grad=False)
self.running_mean = (1-self.momentum)*self.running_mean + self.momentum * m.data.view(-1)
self.running_var = (1-self.momentum)*self.running_var + self.momentum * v.data.view(-1)
m = m.repeat(samples, 1)
v = v.repeat(samples, 1)*(samples-1.0)/samples
else:
m = Variable(self.running_mean.repeat(samples, 1), requires_grad=False)
v = Variable(self.running_var.repeat(samples, 1), requires_grad=False)
w = self.weight.repeat(samples, 1)
b = self.bias.repeat(samples, 1)
y = (y - m)/(v+self.eps).sqrt() * w + b
y = y.view(nB, nH*nW, nC).transpose(1,2).contiguous().view(nB, nC, nH, nW)
return y


if __name__ == '__main__':
nB = 64
nC = 3
nH = 4
nW = 4
samples = nB*nH*nW
a = torch.rand(nB,nC,nH,nW)
a = Variable(a)
nn_model = nn.BatchNorm2d(nC)
dkn_model = BN2d(nC)
atg_model = BN2d_slow(nC)

nn_model.weight.data.fill_(1.0)
nn_model.bias.data.zero_()
dkn_model.weight.data.fill_(1.0)
dkn_model.bias.data.zero_()
atg_model.weight.data.fill_(1.0)
atg_model.bias.data.zero_()
nn_out_cpu = nn_model(a)
dkn_out_cpu = dkn_model(a)
atg_out_cpu = atg_model(a)



a = a.cuda()
nn_model.cuda()
dkn_model.cuda()
atg_model.cuda()

nn_out_gpu = nn_model(a)
dkn_out_gpu = dkn_model(a)
atg_out_gpu = atg_model(a)

print('--- nn cpu out ---')
print(nn_out_cpu.data.storage()[0:10])
print('--- dkn cpu out ---')
print(dkn_out_cpu.data.storage()[0:10])
print('--- atg cpu out ---')
print(atg_out_cpu.data.storage()[0:10])


print('--- nn gpu out ---')
print(nn_out_gpu.data.storage()[0:10])
print('--- dkn gpu out ---')
print(dkn_out_gpu.data.storage()[0:10])
print('--- atg gpu out ---')
print(atg_out_gpu.data.storage()[0:10])
12 changes: 12 additions & 0 deletions layers/batchnorm/bn_lib/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@

from torch.utils.ffi import _wrap_function
from ._bn_lib import lib as _lib, ffi as _ffi

__all__ = []
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
locals[symbol] = _wrap_function(fn, _ffi)
__all__.append(symbol)

_import_symbols(locals())
35 changes: 35 additions & 0 deletions layers/batchnorm/build.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import os
import torch
from torch.utils.ffi import create_extension


sources = ['src/batchnorm.c']
headers = ['src/batchnorm.h']
defines = []
with_cuda = False

if torch.cuda.is_available():
print('Including CUDA code.')
#sources += ['src/cuda.c']
#headers += ['src/cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True

this_file = os.path.dirname(os.path.realpath(__file__))
print(this_file)
#extra_objects=[]
extra_objects = ['obj/blas_kernels.o', 'obj/cuda.o', 'obj/blas.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]

ffi = create_extension(
'bn_lib',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects
)

if __name__ == '__main__':
ffi.build()
Loading

0 comments on commit ff3b18f

Please sign in to comment.