Skip to content

Commit

Permalink
tests : fix memory leaks (ggerganov#936)
Browse files Browse the repository at this point in the history
It is annoying to run the tests using the sanitizers
because of all the uninteresting reports about the memory
leaked by the tests themselves.

Signed-off-by: Salvatore Mesoraca <[email protected]>
  • Loading branch information
smeso authored Aug 27, 2024
1 parent b1ce8fb commit 2438d62
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 38 deletions.
32 changes: 16 additions & 16 deletions tests/test-conv-transpose-1d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -421,9 +421,9 @@ int main(void)
}
}

float* conv1d_transpose_data_0 = new float[ggml_nelements(conv1d_transpose_res_0)];
std::vector<float> conv1d_transpose_data_0(ggml_nelements(conv1d_transpose_res_0));

ggml_backend_tensor_get(conv1d_transpose_res_0, conv1d_transpose_data_0, 0, ggml_nbytes(conv1d_transpose_res_0));
ggml_backend_tensor_get(conv1d_transpose_res_0, conv1d_transpose_data_0.data(), 0, ggml_nbytes(conv1d_transpose_res_0));

const int n_conv_transpose_1d_test_0 = 4;

Expand All @@ -440,9 +440,9 @@ int main(void)
}
}

float* conv1d_transpose_data_1 = new float[ggml_nelements(conv1d_transpose_res_1)];
std::vector<float> conv1d_transpose_data_1(ggml_nelements(conv1d_transpose_res_1));

ggml_backend_tensor_get(conv1d_transpose_res_1, conv1d_transpose_data_1, 0, ggml_nbytes(conv1d_transpose_res_1));
ggml_backend_tensor_get(conv1d_transpose_res_1, conv1d_transpose_data_1.data(), 0, ggml_nbytes(conv1d_transpose_res_1));



Expand All @@ -462,9 +462,9 @@ int main(void)
}
}

float* conv1d_transpose_data_2 = new float[ggml_nelements(conv1d_transpose_res_2)];
std::vector<float> conv1d_transpose_data_2(ggml_nelements(conv1d_transpose_res_2));

ggml_backend_tensor_get(conv1d_transpose_res_2, conv1d_transpose_data_2, 0, ggml_nbytes(conv1d_transpose_res_2));
ggml_backend_tensor_get(conv1d_transpose_res_2, conv1d_transpose_data_2.data(), 0, ggml_nbytes(conv1d_transpose_res_2));


const int n_conv_transpose_1d_test_2 = 10;
Expand All @@ -481,9 +481,9 @@ int main(void)
}
}

float* conv1d_transpose_data_3 = new float[ggml_nelements(conv1d_transpose_res_3)];
std::vector<float> conv1d_transpose_data_3(ggml_nelements(conv1d_transpose_res_3));

ggml_backend_tensor_get(conv1d_transpose_res_3, conv1d_transpose_data_3, 0, ggml_nbytes(conv1d_transpose_res_3));
ggml_backend_tensor_get(conv1d_transpose_res_3, conv1d_transpose_data_3.data(), 0, ggml_nbytes(conv1d_transpose_res_3));


const int n_conv_transpose_1d_test_3 = 14;
Expand All @@ -501,9 +501,9 @@ int main(void)
}
}

float* conv1d_transpose_data_4 = new float[ggml_nelements(conv1d_transpose_res_4)];
std::vector<float> conv1d_transpose_data_4(ggml_nelements(conv1d_transpose_res_4));

ggml_backend_tensor_get(conv1d_transpose_res_4, conv1d_transpose_data_4, 0, ggml_nbytes(conv1d_transpose_res_4));
ggml_backend_tensor_get(conv1d_transpose_res_4, conv1d_transpose_data_4.data(), 0, ggml_nbytes(conv1d_transpose_res_4));


const int n_conv_transpose_1d_test_4 = 12;
Expand All @@ -522,9 +522,9 @@ int main(void)
}
}

float* conv1d_transpose_data_5 = new float[ggml_nelements(conv1d_transpose_res_5)];
std::vector<float> conv1d_transpose_data_5(ggml_nelements(conv1d_transpose_res_5));

ggml_backend_tensor_get(conv1d_transpose_res_5, conv1d_transpose_data_5, 0, ggml_nbytes(conv1d_transpose_res_5));
ggml_backend_tensor_get(conv1d_transpose_res_5, conv1d_transpose_data_5.data(), 0, ggml_nbytes(conv1d_transpose_res_5));


const int n_conv_transpose_1d_test_5 = 18;
Expand All @@ -543,9 +543,9 @@ int main(void)
}
}

float* conv1d_transpose_data_6 = new float[ggml_nelements(conv1d_transpose_res_6)];
std::vector<float> conv1d_transpose_data_6(ggml_nelements(conv1d_transpose_res_6));

ggml_backend_tensor_get(conv1d_transpose_res_6, conv1d_transpose_data_6, 0, ggml_nbytes(conv1d_transpose_res_6));
ggml_backend_tensor_get(conv1d_transpose_res_6, conv1d_transpose_data_6.data(), 0, ggml_nbytes(conv1d_transpose_res_6));


const int n_conv_transpose_1d_test_6 = 24;
Expand All @@ -565,9 +565,9 @@ int main(void)
}
}

float* conv1d_transpose_data_7 = new float[ggml_nelements(conv1d_transpose_res_7)];
std::vector<float> conv1d_transpose_data_7(ggml_nelements(conv1d_transpose_res_7));

ggml_backend_tensor_get(conv1d_transpose_res_7, conv1d_transpose_data_7, 0, ggml_nbytes(conv1d_transpose_res_7));
ggml_backend_tensor_get(conv1d_transpose_res_7, conv1d_transpose_data_7.data(), 0, ggml_nbytes(conv1d_transpose_res_7));


const int n_conv_transpose_1d_test_7 = 32*1584;
Expand Down
18 changes: 9 additions & 9 deletions tests/test-conv1d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,17 +40,17 @@ void load_model(test_model & model, bool use_gpu = false) {
int IL = 8, N = 1;

// Initialize adata
float * adata = new float[K * IC * OC];
std::vector<float> adata(K * IC * OC);
for (int i = 0; i < K * IC * OC; i++) {
adata[i] = 4.5f;
}

// Convert adata to fp16 format
std::vector<ggml_fp16_t> hadata(K * IC * OC);
ggml_fp32_to_fp16_row(adata, hadata.data(), K * IC * OC);
ggml_fp32_to_fp16_row(adata.data(), hadata.data(), K * IC * OC);

// Initialize bdata
float * bdata = new float[IL * IC * N];
std::vector<float> bdata(IL * IC * N);
for (int i = 0; i < IL * IC * N; i++) {
bdata[i] = 2.5f;
}
Expand Down Expand Up @@ -129,9 +129,9 @@ void load_model(test_model & model, bool use_gpu = false) {
|| ggml_backend_is_metal(model.backend)
#endif
) {
memcpy(model.b->data, bdata, ggml_nbytes(model.b));
memcpy(model.b->data, bdata.data(), ggml_nbytes(model.b));
} else {
ggml_backend_tensor_set(model.b, bdata, 0, ggml_nbytes(model.b));
ggml_backend_tensor_set(model.b, bdata.data(), 0, ggml_nbytes(model.b));
}
}

Expand Down Expand Up @@ -226,11 +226,11 @@ int main(void)
}
}

uint16_t* im2col_data = new uint16_t[ggml_nelements(im2col_res)];
float* conv2d_data = new float[ggml_nelements(conv1d_res)];
std::vector<uint16_t> im2col_data(ggml_nelements(im2col_res));
std::vector<float> conv2d_data(ggml_nelements(conv1d_res));

ggml_backend_tensor_get(im2col_res, im2col_data, 0, ggml_nbytes(im2col_res));
ggml_backend_tensor_get(conv1d_res, conv2d_data, 0, ggml_nbytes(conv1d_res));
ggml_backend_tensor_get(im2col_res, im2col_data.data(), 0, ggml_nbytes(im2col_res));
ggml_backend_tensor_get(conv1d_res, conv2d_data.data(), 0, ggml_nbytes(conv1d_res));

const int n_conv1d_test = 80;
const int n_im2col_test = 240;
Expand Down
18 changes: 9 additions & 9 deletions tests/test-conv2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,17 +40,17 @@ void load_model(test_model & model, bool use_gpu = false) {
int IW = 8, IH = 6, N = 1;

// Initialize adata
float * adata = new float[KW * KH * IC * OC];
std::vector<float> adata(KW * KH * IC * OC);
for (int i = 0; i < KW * KH * IC * OC; i++) {
adata[i] = 2.5f;
}

// Convert adata to fp16 format
std::vector<ggml_fp16_t> hadata(KW * KH * IC * OC);
ggml_fp32_to_fp16_row(adata, hadata.data(), KW * KH * IC * OC);
ggml_fp32_to_fp16_row(adata.data(), hadata.data(), KW * KH * IC * OC);

// Initialize bdata
float * bdata = new float[IW * IH * IC * N];
std::vector<float> bdata(IW * IH * IC * N);
for (int i = 0; i < IW * IH * IC * N; i++) {
bdata[i] = 1.5f;
}
Expand Down Expand Up @@ -129,9 +129,9 @@ void load_model(test_model & model, bool use_gpu = false) {
|| ggml_backend_is_metal(model.backend)
#endif
) {
memcpy(model.b->data, bdata, ggml_nbytes(model.b));
memcpy(model.b->data, bdata.data(), ggml_nbytes(model.b));
} else {
ggml_backend_tensor_set(model.b, bdata, 0, ggml_nbytes(model.b));
ggml_backend_tensor_set(model.b, bdata.data(), 0, ggml_nbytes(model.b));
}
}

Expand Down Expand Up @@ -229,11 +229,11 @@ int main(void)
}
}

uint16_t* im2col_data = new uint16_t[ggml_nelements(im2col_res)];
float* conv2d_data = new float[ggml_nelements(conv2d_res)];
std::vector<uint16_t> im2col_data(ggml_nelements(im2col_res));
std::vector<float> conv2d_data(ggml_nelements(conv2d_res));

ggml_backend_tensor_get(im2col_res, im2col_data, 0, ggml_nbytes(im2col_res));
ggml_backend_tensor_get(conv2d_res, conv2d_data, 0, ggml_nbytes(conv2d_res));
ggml_backend_tensor_get(im2col_res, im2col_data.data(), 0, ggml_nbytes(im2col_res));
ggml_backend_tensor_get(conv2d_res, conv2d_data.data(), 0, ggml_nbytes(conv2d_res));

const int n_conv2d_test = 480;
const int n_im2col_test = 4320;
Expand Down
8 changes: 4 additions & 4 deletions tests/test-mul-mat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -232,8 +232,8 @@ static void gemm_f16_out_f32(int m, int n, int k,
void perform_gemm_test(float* a, float* b, float* expected, int M, int N, int K) {
printf("\nPerforming gemm_f16_out_f32 test:\n");

float* gemm_out = new float[M * N];
gemm_f16_out_f32(M, N, K, a, b, gemm_out, 0, 1);
std::vector<float> gemm_out(M * N);
gemm_f16_out_f32(M, N, K, a, b, gemm_out.data(), 0, 1);

for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
Expand Down Expand Up @@ -318,9 +318,9 @@ int main(void)

struct ggml_tensor * result = compute(model, allocr);

float* out_data = new float[ggml_nelements(result)];
std::vector<float> out_data(ggml_nelements(result));

ggml_backend_tensor_get(result, out_data, 0, ggml_nbytes(result));
ggml_backend_tensor_get(result, out_data.data(), 0, ggml_nbytes(result));

printf("\nPerforming ggml_mul_mat test:\n");

Expand Down

0 comments on commit 2438d62

Please sign in to comment.