diff --git a/scripts/nnom.py b/scripts/nnom.py index b390f80..7bfe99e 100644 --- a/scripts/nnom.py +++ b/scripts/nnom.py @@ -937,13 +937,13 @@ def gen_weight_tensor(w, per_axis): if (cfg['activation'] == 'relu'): fp.write('\tlayer[%s] = model.active(act_relu(), layer[%s]);\n' % (id, LI[inp][0])) elif (cfg['activation'] == 'tanh'): - fp.write('\tlayer[%s] = model.active(act_tanh(%s_OUTPUT_DEC), layer[%s]);\n' % ( + fp.write('\tlayer[%s] = model.active(act_hard_tanh(%s_OUTPUT_DEC), layer[%s]);\n' % ( id, inp.upper(), LI[inp][0])) elif (cfg['activation'] == 'sigmoid'): fp.write('\tlayer[%s] = model.active(act_sigmoid(%s_OUTPUT_DEC), layer[%s]);\n' % ( id, inp.upper(), LI[inp][0])) elif (cfg['activation'] == 'hard_sigmoid'): - fp.write('\tlayer[%s] = model.active(act_sigmoid(%s_OUTPUT_DEC), layer[%s]);\n' % ( + fp.write('\tlayer[%s] = model.active(act_hard_sigmoid(%s_OUTPUT_DEC), layer[%s]);\n' % ( id, inp.upper(), LI[inp][0])) elif (cfg['activation'] == 'softmax'): fp.write('\tlayer[%s] = model.hook(Softmax(), layer[%s]);\n' % (id, LI[inp][0])) diff --git a/src/backends/nnom_local.c b/src/backends/nnom_local.c index 8c1d510..b4faf5f 100644 --- a/src/backends/nnom_local.c +++ b/src/backends/nnom_local.c @@ -1557,7 +1557,7 @@ void local_softmax_q7(const q7_t *vec_in, const uint32_t dim_vec, q7_t *p_out) // otherwise y = 0.2 * x + 0.5 (y=0.20315 * x + 0.5) void local_hard_sigmoid_q7(q7_t *data, uint32_t size, int16_t dec_bit) { - int16_t limit = 2.5f * (1<> dec_bit) + offset; + data[i] = ((int16_t)(data[i] * mult) >> dec_bit) + offset; } } } @@ -1587,7 +1587,7 @@ void local_hard_tanh_q7(q7_t *data, uint32_t size, int16_t dec_bit) if(dec_bit == 7) return; - // int bit > 0 + // int bit < 0 if(int_bit < 0) for(int i=0; ihead, output), default_layer_names[output->type]); + if (output->type != NNOM_OUTPUT) + NNOM_LOG("WARNING: the last layer '%s' is not the Output Layer, please check carefully.\n", + default_layer_names[output->type]); // get the total (aligned) memory requirement buf_size = mem_analysis_result(m);