Skip to content

Commit

Permalink
basic setup to test cuda version of it
Browse files Browse the repository at this point in the history
  • Loading branch information
liuliu committed Nov 28, 2013
1 parent 2aae4cd commit fe623cb
Show file tree
Hide file tree
Showing 10 changed files with 389 additions and 34 deletions.
2 changes: 1 addition & 1 deletion bin/cifar-10.c
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ int main(int argc, char** argv)
.max_epoch = 100,
.mini_batch = 128,
.decay = 0.005,
.learn_rate = 0.00005,
.learn_rate = 0.00008,
.momentum = 0.9,
};
ccv_convnet_supervised_train(convnet, categorizeds, tests, params);
Expand Down
74 changes: 74 additions & 0 deletions bin/image-net.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
#include "ccv.h"
#include <sys/time.h>
#include <ctype.h>

unsigned int get_current_time()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}

int main(int argc, char** argv)
{
ccv_enable_default_cache();
assert(argc == 2);
FILE *r = fopen(argv[1], "r");
char* file = (char*)malloc(1024);
ccv_array_t* categorizeds = ccv_array_new(sizeof(ccv_categorized_t), 64, 0);
size_t len = 1024;
ssize_t read;
while ((read = getline(&file, &len, r)) != -1)
{
while(read > 1 && isspace(file[read - 1]))
read--;
file[read] = 0;
ccv_categorized_t categorized;
categorized.file.filename = (char*)ccmalloc(1024);
strncpy(categorized.file.filename, file, 1024);
ccv_array_push(categorizeds, &categorized);
}
fclose(r);
free(file);
ccv_convnet_param_t params[] = {
{
.type = CCV_CONVNET_CONVOLUTIONAL,
.bias = 0,
.sigma = 0.01,
.input = {
.matrix = {
.rows = 225,
.cols = 225,
.channels = 3,
},
},
.output = {
.convolutional = {
.count = 128,
.strides = 4,
.border = 1,
.rows = 11,
.cols = 11,
.channels = 3,
},
},
},
};
ccv_convnet_t* convnet = ccv_convnet_new(params, 1);
ccv_convnet_train_param_t train_params = {
.max_epoch = 100,
.mini_batch = 128,
.decay = 0.005,
.learn_rate = 0.00008,
.momentum = 0.9,
};
int i;
for (i = 0; i < convnet->layers->wnum; i++)
convnet->layers->w[i] = 1;
for (i = 0; i < convnet->layers->net.convolutional.count; i++)
convnet->layers->bias[i] = 0;
ccv_convnet_supervised_train(convnet, categorizeds, 0, train_params);
ccv_convnet_free(convnet);
ccv_disable_cache();
return 0;
}
2 changes: 1 addition & 1 deletion bin/makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ include ../lib/config.mk
LDFLAGS := -L"../lib" -lccv $(LDFLAGS)
CFLAGS := -O3 -Wall -I"../lib" $(CFLAGS)

TARGETS = bbffmt msermatch siftmatch bbfcreate bbfdetect swtcreate swtdetect dpmcreate dpmdetect convert tld icfcreate icfdetect icfoptimize cifar-10
TARGETS = bbffmt msermatch siftmatch bbfcreate bbfdetect swtcreate swtdetect dpmcreate dpmdetect convert tld icfcreate icfdetect icfoptimize cifar-10 image-net

all: libccv.a $(TARGETS)

Expand Down
4 changes: 2 additions & 2 deletions lib/ccv.h
Original file line number Diff line number Diff line change
Expand Up @@ -1158,8 +1158,8 @@ typedef struct {

ccv_convnet_t* __attribute__((warn_unused_result)) ccv_convnet_new(ccv_convnet_param_t params[], int count);
void ccv_convnet_supervised_train(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_array_t* tests, ccv_convnet_train_param_t params);
void ccv_convnet_encode(ccv_convnet_t* convnet, ccv_dense_matrix_t* a, ccv_dense_matrix_t** b, int type);
int ccv_convnet_classify(ccv_convnet_t* convnet, ccv_dense_matrix_t* a);
void ccv_convnet_encode(ccv_convnet_t* convnet, ccv_dense_matrix_t** a, ccv_dense_matrix_t** b, int batch);
void ccv_convnet_classify(ccv_convnet_t* convnet, ccv_dense_matrix_t** a, int* labels, int batch);
void ccv_convnet_free(ccv_convnet_t* convnet);

#endif
45 changes: 31 additions & 14 deletions lib/ccv_convnet.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#endif
#ifdef HAVE_CUDA
#include "cuda/ccv_convnet.h"
#endif

#ifndef CASE_TESTS

Expand Down Expand Up @@ -275,31 +278,34 @@ static void _ccv_convnet_average_pool_forward_propagate(ccv_convnet_layer_t* lay

#ifndef CASE_TESTS

void ccv_convnet_encode(ccv_convnet_t* convnet, ccv_dense_matrix_t* a, ccv_dense_matrix_t** b, int type)
void ccv_convnet_encode(ccv_convnet_t* convnet, ccv_dense_matrix_t** a, ccv_dense_matrix_t** b, int batch)
{
assert(CCV_GET_CHANNEL(a->type) == convnet->channels);
assert(a->rows == convnet->rows);
assert(a->cols == convnet->cols);
#ifdef HAVE_CUDA
ccv_cu_convnet_encode(convnet, a, b, batch);
#else
assert(batch == 1);
assert(CCV_GET_CHANNEL((*a)->type) == convnet->channels);
assert((*a)->rows == convnet->rows);
assert((*a)->cols == convnet->cols);
int i;
// save the last layer of neuron cache in case that we encode to a different matrix
ccv_dense_matrix_t* out_neuron = convnet->acts[convnet->count - 1];
convnet->acts[convnet->count - 1] = *b;
switch(convnet->layers->type)
{
case CCV_CONVNET_CONVOLUTIONAL:
_ccv_convnet_convolutional_forward_propagate(convnet->layers, a, convnet->count > 1 ? convnet->dropouts[0] : 0, convnet->acts);
_ccv_convnet_convolutional_forward_propagate(convnet->layers, *a, convnet->count > 1 ? convnet->dropouts[0] : 0, convnet->acts);
break;
case CCV_CONVNET_FULL_CONNECT:
_ccv_convnet_full_connect_forward_propagate(convnet->layers, a, convnet->count > 1 ? convnet->dropouts[0] : 0, convnet->acts);
_ccv_convnet_full_connect_forward_propagate(convnet->layers, *a, convnet->count > 1 ? convnet->dropouts[0] : 0, convnet->acts);
break;
case CCV_CONVNET_MAX_POOL:
_ccv_convnet_max_pool_forward_propagate(convnet->layers, a, convnet->acts);
_ccv_convnet_max_pool_forward_propagate(convnet->layers, *a, convnet->acts);
break;
case CCV_CONVNET_AVERAGE_POOL:
_ccv_convnet_average_pool_forward_propagate(convnet->layers, a, convnet->acts);
_ccv_convnet_average_pool_forward_propagate(convnet->layers, *a, convnet->acts);
break;
}
assert(type == 0 || CCV_GET_DATA_TYPE(type) == CCV_32F);
for (i = 1; i < convnet->count; i++)
{
ccv_convnet_layer_t* layer = convnet->layers + i;
Expand All @@ -326,18 +332,24 @@ void ccv_convnet_encode(ccv_convnet_t* convnet, ccv_dense_matrix_t* a, ccv_dense
// restore the last layer of neuron cache
convnet->acts[convnet->count - 1] = out_neuron;
}
#endif
}

int ccv_convnet_classify(ccv_convnet_t* convnet, ccv_dense_matrix_t* a)
void ccv_convnet_classify(ccv_convnet_t* convnet, ccv_dense_matrix_t** a, int* labels, int batch)
{
ccv_convnet_encode(convnet, a, convnet->acts + convnet->count - 1, 0);
#ifdef HAVE_CUDA
ccv_cu_convnet_classify(convnet, a, labels, batch);
#else
assert(batch == 1);
ccv_convnet_encode(convnet, a, convnet->acts + convnet->count - 1, 1);
int i, c = 0;
ccv_dense_matrix_t* b = convnet->acts[convnet->count - 1];
int maxc = b->data.f32[0];
for (i = 1; i < b->rows; i++)
if (b->data.f32[i] > maxc)
maxc = b->data.f32[i], c = i;
return c;
labels[0] = c;
#endif
}

#endif
Expand Down Expand Up @@ -800,6 +812,9 @@ static ccv_convnet_t* _ccv_convnet_update_new(ccv_convnet_t* convnet)

void ccv_convnet_supervised_train(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_array_t* tests, ccv_convnet_train_param_t params)
{
#ifdef HAVE_CUDA
ccv_cu_convnet_supervised_train(convnet, categorizeds, tests, params);
#else
int i, j, t;
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
Expand Down Expand Up @@ -836,7 +851,7 @@ void ccv_convnet_supervised_train(ccv_convnet_t* convnet, ccv_array_t* categoriz
}
}
ccv_categorized_t* categorized = (ccv_categorized_t*)ccv_array_get(categorizeds, idx[i]);
ccv_convnet_encode(convnet, categorized->matrix, convnet->acts + convnet->count - 1, 0);
ccv_convnet_encode(convnet, &categorized->matrix, convnet->acts + convnet->count - 1, 1);
ccv_dense_matrix_t* softmax = convnet->acts[convnet->count - 1];
float* dloss = softmax->data.f32;
_ccv_convnet_compute_softmax(softmax, &softmax, 0);
Expand All @@ -860,7 +875,8 @@ void ccv_convnet_supervised_train(ccv_convnet_t* convnet, ccv_array_t* categoriz
{
FLUSH(" - going through %d / %d for tests", i + 1, tests->rnum);
ccv_categorized_t* test = (ccv_categorized_t*)ccv_array_get(tests, i);
int c = ccv_convnet_classify(convnet, test->matrix);
int c = 0;
ccv_convnet_classify(convnet, &test->matrix, &c, 1);
if (c != test->c)
++miss;
}
Expand All @@ -878,6 +894,7 @@ void ccv_convnet_supervised_train(ccv_convnet_t* convnet, ccv_array_t* categoriz
ccv_convnet_free(momentum);
ccv_convnet_free(update_params);
gsl_rng_free(rng);
#endif
}

void ccv_convnet_free(ccv_convnet_t* convnet)
Expand Down
10 changes: 0 additions & 10 deletions lib/cuda/ccv_convnet.cu

This file was deleted.

3 changes: 0 additions & 3 deletions lib/cuda/ccv_convnet.h

This file was deleted.

Loading

0 comments on commit fe623cb

Please sign in to comment.