Skip to content

Commit

Permalink
probing points.
Browse files Browse the repository at this point in the history
  • Loading branch information
ghimiredhikura committed Nov 20, 2018
1 parent 051c997 commit 92145c8
Show file tree
Hide file tree
Showing 10 changed files with 120 additions and 20 deletions.
Binary file added .vs/yolo_cpu/v14/.suo
Binary file not shown.
Binary file modified bin/yolo_cpu.exe
Binary file not shown.
Binary file removed bin/yolo_cpu.iobj
Binary file not shown.
Binary file removed bin/yolo_cpu.ipdb
Binary file not shown.
Binary file removed bin/yolo_cpu.pdb
Binary file not shown.
110 changes: 101 additions & 9 deletions src/additionally.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
// global GPU index: cuda.c
int gpu_index = 0;

int m_dbg = 1;

// im2col.c
float im2col_get_pixel(float *im, int height, int width, int channels,
int row, int col, int channel, int pad)
Expand Down Expand Up @@ -175,12 +173,12 @@ void calculate_binary_weights(network net)
}
}

if(m_dbg)
{
layer l1 = net.layers[j];
if (l1.dontload) continue;
save_convolutional_weights(l1, j);
}
// if(m_dbg)
// {
// layer l1 = net.layers[j];
// if (l1.dontload) continue;
// save_convolutional_weights(l1, j);
// }
}
}
}
Expand Down Expand Up @@ -322,7 +320,6 @@ void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, i
}
}


void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
Expand Down Expand Up @@ -1901,6 +1898,9 @@ network parse_network_cfg(char *filename, int batch, int quantized)
params.c = l.out_c;
params.inputs = l.outputs;
}

if (m_dbg) save_layer_param(l, count, lt);

}
free_list(sections);
net.outputs = get_network_output_size(net);
Expand Down Expand Up @@ -2131,6 +2131,7 @@ void save_net_param(network net)
fprintf(stderr, "network init.: writing parameters...");
sprintf(buf, "dbg/network_parameters.txt");
FILE *fp = fopen(buf, "w");
fprintf(fp, "m_net.quantized: %d\n", net.quantized);
fprintf(fp, "m_net.inputs: %d\n", net.inputs);
fprintf(fp, "m_net.h: %d\n", net.h);
fprintf(fp, "m_net.w: %d\n", net.w);
Expand All @@ -2152,7 +2153,12 @@ void save_net_param(network net)
fprintf(fp, "m_net.step[%d] : %d\n", i, net.steps[i]);
fprintf(fp, "m_net.scale[%d] : %f\n", i, net.scales[i]);
}
fprintf(fp, "m_net.input_calibration_size: %d\n", net.input_calibration_size);
for (int i = 0; i < net.input_calibration_size; i++) {
fprintf(fp, "m_net.input_calibration[%d] : %f\n", i, net.input_calibration[i]);
}
fprintf(fp, "m_net.time_steps: %d\n", net.time_steps);
fprintf(fp, "m_net.burn_in: %d\n", net.burn_in);
fprintf(fp, "m_net.policy: %d\n", net.policy);
fprintf(fp, "m_net.gamma: %f\n", net.gamma);
fprintf(fp, "m_net.power: %f\n", net.power);
Expand All @@ -2164,6 +2170,92 @@ void save_net_param(network net)
fprintf(fp, "m_net.burn_in: %d\n", net.burn_in);
fprintf(fp, "m_net.momentum: %f\n", net.momentum);
fprintf(fp, "m_net.subdivisions : %d\n", net.subdivisions);
fclose(fp);
fprintf(stderr, "done.\n");
}

void save_layer_param(layer l, int count, int lt)
{
char buf[256];
fprintf(stderr, "layer %d init.: writing parameters...", count);
sprintf(buf, "dbg/layer_parameters_%02d_%02d.txt", count, lt);
FILE *fp = fopen(buf, "w");
fprintf(fp, "layer #%d (type: %d)\n", count, lt);
fprintf(fp, "l.binary: %d\n", l.binary);
fprintf(fp, "l.xnor: %d\n", l.xnor);
fprintf(fp, "l.use_bin_output: %d\n", l.use_bin_output);
fprintf(fp, "l.size: %d\n", l.size);
fprintf(fp, "l.stride: %d\n", l.stride);
fprintf(fp, "l.pad: %d\n", l.pad);
fprintf(fp, "l.n (# of filters): %d\n", l.n);
fprintf(fp, "l.batch_normalize: %d\n", l.batch_normalize);
fprintf(fp, "l.activation(type): %d\n", l.activation);
fprintf(fp, "l.out_h: %d\n", l.out_h);
fprintf(fp, "l.out_w: %d\n", l.out_w);
fprintf(fp, "l.out_c: %d\n", l.out_c);
fprintf(fp, "l.outputs: %d\n", l.outputs);
fclose(fp);
fprintf(stderr, "done.\n");
}

/*
void save_layer_data(layer l, int count)
{
char buf[256];
fprintf(stderr, "layer %d: writing layer data...", count);
sprintf(buf, "dbg/layer_%02d_data_%04d_%04d_%04d_%08d.txt", count, l.out_w, l.out_h, l.out_c, l.outputs);
FILE *fp = fopen(buf, "w");
if (l.out_w > 0) {
for (int x = 0; x < l.out_w; x++)
for (int y = 0; y < l.out_h; y++)
{
for (int c = 0; c < l.out_c; c++) {
fprintf(fp, "%f", l.output[y*l.out_w*l.out_c + x*l.out_c + c]);
if (c < (l.out_c - 1)) fprintf(fp, " ");
}
fprintf(fp, "\n");
}
}
else {
for (int c = 0; c < l.outputs; c++) fprintf(fp, "%f\n", l.output[c]);
}
fclose(fp);
fprintf(stderr, "done.\n");
}
*/


void save_layer_data(layer l, int count)
{
char buf[256];

fprintf(stderr, "layer %d: writing layer data...", count);
sprintf(buf, "dbg/layer_%02d_data_%04d_%04d_%04d_%08d.txt", count, l.w, l.h, l.c, l.inputs);
FILE *fp = fopen(buf, "w");

if (l.xnor) {
if (l.w > 0) {
for (int x = 0; x < l.w; x++)
for (int y = 0; y < l.h; y++)
{
for (int c = 0; c < l.c; c++) {
int b_i = l.binary_input[y*l.w*l.c + x*l.c + c];
fprintf(fp, "%d", b_i);
if (c < (l.out_c - 1)) fprintf(fp, " ");
}
fprintf(fp, "\n");
}
}
else {
for (int c = 0; c < l.inputs; c++) {
int b_i = l.binary_input[c];
fprintf(fp, "%d\n", b_i);
}
}
}

fclose(fp);
fprintf(stderr, "done.\n");
}
9 changes: 4 additions & 5 deletions src/additionally.h
Original file line number Diff line number Diff line change
Expand Up @@ -623,8 +623,6 @@ extern "C" {
// convolutional_layer.c
convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam, int quantized, int use_bin_output);



// -------------- image.c --------------

// image.c
Expand Down Expand Up @@ -697,7 +695,6 @@ extern "C" {
// parser.c
void load_weights_upto_cpu(network *net, char *filename, int cutoff);


// -------------- yolov2_forward_network.c --------------------


Expand All @@ -710,7 +707,7 @@ extern "C" {
// -------------- yolov2_forward_network_quantized.c --------------------

// yolov2_forward_network.c - fp32 is used for 1st and last layers during INT8-quantized inference
void forward_convolutional_layer_cpu(layer l, network_state state);
void forward_convolutional_layer_cpu(layer l, int layer_id, network_state state);

// quantizized
float *network_predict_quantized(network net, float *input);
Expand All @@ -730,11 +727,13 @@ extern "C" {
// additionally.c
void free_detections(detection *dets, int n);


// save probing data
int m_dbg;

void save_convolutional_weights(layer l, int ind);
void save_net_param(network net);
void save_layer_param(layer l, int count, int lt);
void save_layer_data(layer l, int count);

// -------------- gettimeofday for Windows--------------------

Expand Down
1 change: 1 addition & 0 deletions src/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,7 @@ int main(int argc, char **argv)
}

_mkdir("dbg");
m_dbg = 1;

if (argc < 2) {
fprintf(stderr, "usage: %s <function>\n", argv[0]);
Expand Down
18 changes: 13 additions & 5 deletions src/yolov2_forward_network.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,8 @@ size_t binary_transpose_align_input(int k, int n, float *b, char **t_bit_input,
}

// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_cpu(layer l, network_state state)
void forward_convolutional_layer_cpu(layer l, int layer_id, network_state state)
{

int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1
int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1
int i, f, j;
Expand All @@ -40,6 +39,14 @@ void forward_convolutional_layer_cpu(layer l, network_state state)
state.input = l.binary_input;
}

if (m_dbg)
{
//if (l.dontload) continue;
save_convolutional_weights(l, layer_id);
save_layer_data(l, layer_id);
}


// l.n - number of filters on this layer
// l.c - channels of input-array
// l.h - height of input-array
Expand Down Expand Up @@ -221,7 +228,7 @@ void yolov2_forward_network_cpu(network net, network_state state)
layer l = net.layers[i];

if (l.type == CONVOLUTIONAL) {
forward_convolutional_layer_cpu(l, state);
forward_convolutional_layer_cpu(l, i, state);
//printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size);
}
else if (l.type == MAXPOOL) {
Expand Down Expand Up @@ -255,7 +262,7 @@ void yolov2_forward_network_cpu(network net, network_state state)
else {
printf("\n layer: %d \n", l.type);
}

state.input = l.output;
}
}
Expand All @@ -275,7 +282,8 @@ float *network_predict_cpu(network net, float *input)
//float *out = get_network_output(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return net.layers[i].output;

return net.layers[i].output;
}


Expand Down
2 changes: 1 addition & 1 deletion src/yolov2_forward_network_quantized.c
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ void yolov2_forward_network_q(network net, network_state state)

if (l.type == CONVOLUTIONAL) {
if (i >= 1 && l.activation != LINEAR) forward_convolutional_layer_q(l, state);
else forward_convolutional_layer_cpu(l, state);
else forward_convolutional_layer_cpu(l, i, state);

printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
}
Expand Down

0 comments on commit 92145c8

Please sign in to comment.