Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix CodeQL reports on crunch and crnlib #60

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions crnlib/crn_clusterizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class clusterizer {
root.m_total_weight += weight;
root.m_vectors.push_back(i);

ttsum += v.dot(v) * weight;
ttsum += (double)v.dot(v) * (double)weight;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we just disable float/double precision warnings please? It simply doesn't matter. This is a lossy image compression program.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is CodeQL static analysis report, not a compiler warning.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know that. It should be configurable.

}

root.m_variance = (float)(ttsum - (root.m_centroid.dot(root.m_centroid) / root.m_total_weight));
Expand Down Expand Up @@ -409,7 +409,7 @@ class clusterizer {
double sum = 0;

for (uint j = 0; j < N; j++)
sum += axis[j] * covar[i][j];
sum += static_cast<double>(axis[j]) * static_cast<double>(covar[i][j]);

x[i] = static_cast<float>(sum);

Expand Down Expand Up @@ -629,14 +629,14 @@ class clusterizer {
new_left_child += (v * (float)weight);
left_weight += weight;

left_ttsum += v.dot(v) * weight;
left_ttsum += (double)v.dot(v) * (double)weight;
} else {
m_right_children.push_back(parent_node.m_vectors[i]);

new_right_child += (v * (float)weight);
right_weight += weight;

right_ttsum += v.dot(v) * weight;
right_ttsum += (double)v.dot(v) * (double)weight;
}
}

Expand Down
6 changes: 3 additions & 3 deletions crnlib/crn_comp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -632,7 +632,7 @@ static void remap_color_endpoints(uint16* remapping, const optimize_color_params
remaining[i].e = unpacked_endpoints[i];
}
crnlib::vector<uint16> chosen(n << 1);
uint remaining_count = n, chosen_front = n, chosen_back = chosen_front;
uint16 remaining_count = n, chosen_front = n, chosen_back = chosen_front;
chosen[chosen_front] = selected;
optimize_color_params::unpacked_endpoint front_e = remaining[selected].e, back_e = front_e;
bool front_updated = true, back_updated = true;
Expand Down Expand Up @@ -883,7 +883,7 @@ static void remap_alpha_endpoints(uint16* remapping, const optimize_alpha_params
const optimize_alpha_params::unpacked_endpoint& e_back = unpacked_endpoints[chosen.back()];
uint16 selected_index = 0;
uint64 best_value = 0, selected_similarity_front = 0, selected_similarity_back = 0;
for (uint16 i = 0; i < remaining.size(); i++) {
for (size_t i = 0; i < remaining.size(); i++) {
uint remaining_index = remaining[i];
const optimize_alpha_params::unpacked_endpoint& e_remaining = unpacked_endpoints[remaining_index];
uint error_front = math::square(e_remaining.low - e_front.low) + math::square(e_remaining.high - e_front.high);
Expand Down Expand Up @@ -912,7 +912,7 @@ static void remap_alpha_endpoints(uint16* remapping, const optimize_alpha_params
chosen.push_back(selected);
}
remaining.erase(remaining.begin() + selected_index);
for (uint16 i = 0; i < remaining.size(); i++)
for (size_t i = 0; i < remaining.size(); i++)
total_frequency[remaining[i]] += frequency[remaining[i]];
}
for (uint16 i = 0; i < n; i++)
Expand Down
6 changes: 3 additions & 3 deletions crnlib/crn_dxt1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,9 +151,9 @@ void dxt1_endpoint_optimizer::compute_pca(vec3F& axis, const vec3F_array& norm_c
double cov[6] = {0, 0, 0, 0, 0, 0};
for (uint i = 0; i < norm_colors.size(); i++) {
const vec3F& v = norm_colors[i];
float r = v[0];
float g = v[1];
float b = v[2];
double r = (double)v[0];
double g = (double)v[1];
double b = (double)v[2];
if (m_unique_colors[i].m_weight > 1) {
const double weight = m_unique_colors[i].m_weight;
cov[0] += r * r * weight;
Expand Down
2 changes: 1 addition & 1 deletion crnlib/crn_dxt_fast.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ static bool refine_endpoints(uint n, const color_quad_u8* pBlock, uint& low16, u
for (uint i = 0; i < n; i++) {
const color_quad_u8& p = pBlock[i];

int e = v[pSelectors[i]] - p[axis];
uint64 e = v[pSelectors[i]] - p[axis];

axis_error += e * e;

Expand Down
6 changes: 3 additions & 3 deletions crnlib/crn_dxt_hc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -374,10 +374,10 @@ void dxt_hc::determine_tiles_task(uint64 data, void*) {
}
}

for (uint8 c = m_has_color_blocks ? 0 : cAlpha0; c < cAlpha0 + m_num_alpha_blocks; c++) {
for (uint8 e = 0; e < 8; e++) {
for (uint c = m_has_color_blocks ? 0 : cAlpha0; c < cAlpha0 + m_num_alpha_blocks; c++) {
for (uint e = 0; e < 8; e++) {
total_error[c][e] = 0;
for (uint8 t = 0, s = e + 1; s; s >>= 1, t++)
for (uint t = 0, s = e + 1; s; s >>= 1, t++)
total_error[c][e] += tile_error[c][tiles[e][t]];
}
}
Expand Down
4 changes: 2 additions & 2 deletions crnlib/crn_image_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -570,9 +570,9 @@ bool resample_multithreaded(const image_u8& src, image_u8& dst, const resample_p
return false;

p.m_pSrc_pixels = src_samples.get_ptr();
p.m_src_pitch = src_width * resampler_comps * sizeof(float);
p.m_src_pitch = (size_t)src_width * (size_t)resampler_comps * sizeof(float);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Crunch has a maximum image dimension of 4096 so we don't ever need to worry about super-sized memory allocations.

p.m_pDst_pixels = dst_samples.get_ptr();
p.m_dst_pitch = dst_width * resampler_comps * sizeof(float);
p.m_dst_pitch = (size_t)dst_width * (size_t)resampler_comps * sizeof(float);

for (uint src_y = 0; src_y < src_height; src_y++) {
const color_quad_u8* pSrc = src.get_scanline(src_y);
Expand Down
4 changes: 2 additions & 2 deletions crnlib/crn_jpgd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2431,7 +2431,7 @@ jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block
cb->block_len_x = block_len_x;
cb->block_len_y = block_len_y;
cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t);
cb->pData = (uint8*)alloc(cb->block_size * block_num_x * block_num_y, true);
cb->pData = (uint8*)alloc((size_t)cb->block_size * (size_t)block_num_x * (size_t)block_num_y, true);
return cb;
}

Expand Down Expand Up @@ -2860,7 +2860,7 @@ unsigned char* decompress_jpeg_image_from_stream(jpeg_decoder_stream* pStream, i

const int dst_bpl = image_width * req_comps;

uint8* pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height);
uint8* pImage_data = (uint8*)jpgd_malloc((size_t)dst_bpl * (size_t)image_height);
if (!pImage_data)
return NULL;

Expand Down
2 changes: 1 addition & 1 deletion crnlib/crn_jpge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels) {
m_image_bpl_mcu = m_image_x_mcu * m_num_components;
m_mcus_per_row = m_image_x_mcu / m_mcu_x;

if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL)
if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc((size_t)m_image_bpl_mcu * (size_t)m_mcu_y))) == NULL)
return false;
for (int i = 1; i < m_mcu_y; i++)
m_mcu_lines[i] = m_mcu_lines[i - 1] + m_image_bpl_mcu;
Expand Down
2 changes: 1 addition & 1 deletion crnlib/crn_mipmapped_texture.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2758,7 +2758,7 @@ bool mipmapped_texture::read_crn_from_memory(const void* pData, uint data_size,

if (!pDXT_image->init(
dxt_fmt, level_width, level_height,
num_blocks_x * num_blocks_y * (tex_info.m_bytes_per_block / sizeof(dxt_image::element)),
(size_t)num_blocks_x * (size_t)num_blocks_y * (tex_info.m_bytes_per_block / sizeof(dxt_image::element)),
reinterpret_cast<dxt_image::element*>(pFaces[f]), true)) {
crnlib_delete(pDXT_image);

Expand Down
2 changes: 1 addition & 1 deletion crnlib/crn_threaded_clusterizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ class threaded_clusterizer {
double sum = 0;

for (uint j = 0; j < N; j++)
sum += axis[j] * covar[i][j];
sum += static_cast<double>(axis[j]) * static_cast<double>(covar[i][j]);

x[i] = static_cast<float>(sum);

Expand Down
8 changes: 4 additions & 4 deletions crnlib/crn_tree_clusterizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class tree_clusterizer {
m_weightedVectors[i] = v * (float)weight;
root.m_centroid += m_weightedVectors[i];
root.m_total_weight += weight;
m_weightedDotProducts[i] = v.dot(v) * weight;
m_weightedDotProducts[i] = (double)v.dot(v) * (double)weight;
ttsum += m_weightedDotProducts[i];
}
root.m_variance = (float)(ttsum - (root.m_centroid.dot(root.m_centroid) / root.m_total_weight));
Expand Down Expand Up @@ -289,7 +289,7 @@ class tree_clusterizer {
double sum = 0;

for (uint j = 0; j < N; j++)
sum += axis[j] * covar[i][j];
sum += (double)axis[j] * (double)covar[i][j];

x[i] = (float)sum;

Expand Down Expand Up @@ -464,7 +464,7 @@ void split_vectors(VectorType (&vectors)[64], uint (&weights)[64], uint size, Ve
weightedVectors[i] = v * (float)weight;
centroid += weightedVectors[i];
total_weight += weight;
weightedDotProducts[i] = v.dot(v) * weight;
weightedDotProducts[i] = (double)v.dot(v) * (double)weight;
ttsum += weightedDotProducts[i];
}
float variance = (float)(ttsum - (centroid.dot(centroid) / total_weight));
Expand Down Expand Up @@ -520,7 +520,7 @@ void split_vectors(VectorType (&vectors)[64], uint (&weights)[64], uint size, Ve
for (uint i = 0; i < N; i++) {
double sum = 0;
for (uint j = 0; j < N; j++)
sum += axis[j] * covar[i][j];
sum += (double)axis[j] * (double)covar[i][j];
x[i] = (float)sum;
max_sum = i ? math::maximum(max_sum, sum) : sum;
}
Expand Down
6 changes: 3 additions & 3 deletions crnlib/crn_vec.h
Original file line number Diff line number Diff line change
Expand Up @@ -475,9 +475,9 @@ class vec : public helpers::rel_ops<vec<N, T> > {
}

inline double normalize(const vec* pDefaultVec = NULL) {
double n = m_s[0] * m_s[0];
double n = (double)m_s[0] * (double)m_s[0];
for (uint i = 1; i < N; i++)
n += m_s[i] * m_s[i];
n += (double)m_s[i] * (double)m_s[i];

if (n != 0)
*this *= static_cast<T>((1.0f / sqrt(n)));
Expand All @@ -489,7 +489,7 @@ class vec : public helpers::rel_ops<vec<N, T> > {
inline double normalize3(const vec* pDefaultVec = NULL) {
CRNLIB_ASSUME(N >= 3);

double n = m_s[0] * m_s[0] + m_s[1] * m_s[1] + m_s[2] * m_s[2];
double n = (double)m_s[0] * (double)m_s[0] + (double)m_s[1] * (double)m_s[1] + (double)m_s[2] * (double)m_s[2];

if (n != 0)
*this *= static_cast<T>((1.0f / sqrt(n)));
Expand Down
16 changes: 8 additions & 8 deletions crnlib/stb_image.h
Original file line number Diff line number Diff line change
Expand Up @@ -1817,7 +1817,7 @@ static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int r
if (req_comp == img_n) return data;
STBI_ASSERT(req_comp >= 1 && req_comp <= 4);

good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2);
good = (stbi__uint16 *) stbi__malloc((size_t)req_comp * x * y * 2);
if (good == NULL) {
STBI_FREE(data);
return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory");
Expand Down Expand Up @@ -4821,7 +4821,7 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r
stbi__create_png_alpha_expand8(dest, dest, x, img_n);
} else if (depth == 8) {
if (img_n == out_n)
memcpy(dest, cur, x*img_n);
memcpy(dest, cur, (size_t)x * (size_t)img_n);
else
stbi__create_png_alpha_expand8(dest, cur, x, img_n);
} else if (depth == 16) {
Expand Down Expand Up @@ -6201,7 +6201,7 @@ static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req
out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0);
ri->bits_per_channel = 16;
} else
out = (stbi_uc *) stbi__malloc(4 * w*h);
out = (stbi_uc *) stbi__malloc(4 * (size_t)w * (size_t)h);

if (!out) return stbi__errpuc("outofmem", "Out of memory");
pixelCount = w*h;
Expand Down Expand Up @@ -6524,7 +6524,7 @@ static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_c
// intermediate buffer is RGBA
result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0);
if (!result) return stbi__errpuc("outofmem", "Out of memory");
memset(result, 0xff, x*y*4);
memset(result, 0xff, (size_t)x * (size_t)y * 4);

if (!stbi__pic_load_core(s,x,y,comp, result)) {
STBI_FREE(result);
Expand Down Expand Up @@ -6833,11 +6833,11 @@ static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, i
}

// background is what out is after the undoing of the previou frame;
memcpy( g->background, g->out, 4 * g->w * g->h );
memcpy( g->background, g->out, 4 * (size_t)g->w * (size_t)g->h );
}

// clear my history;
memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame
memset( g->history, 0x00, (size_t)g->w * (size_t)g->h ); // pixels that were affected previous frame

for (;;) {
int tag = stbi__get8(s);
Expand Down Expand Up @@ -6991,7 +6991,7 @@ static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y,
stride = g.w * g.h * 4;

if (out) {
void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride );
void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, (size_t)layers * (size_t)stride );
if (!tmp)
return stbi__load_gif_main_outofmem(&g, out, delays);
else {
Expand All @@ -7007,7 +7007,7 @@ static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y,
delays_size = layers * sizeof(int);
}
} else {
out = (stbi_uc*)stbi__malloc( layers * stride );
out = (stbi_uc*)stbi__malloc( (size_t)layers * (size_t)stride );
if (!out)
return stbi__load_gif_main_outofmem(&g, out, delays);
out_size = layers * stride;
Expand Down
2 changes: 1 addition & 1 deletion crunch/crunch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -958,7 +958,7 @@ class crunch {
else {
console::info("CRN texture info:");

console::info("Width: %u, Height: %u, Levels: %u, Faces: %u\nBytes per block: %u, User0: 0x%08X, User1: 0x%08X, CRN Format: %u",
console::info("Width: %u, Height: %u, Levels: %u, Faces: %u\nBytes per block: %u, User0: 0x%08X, User1: 0x%08X, CRN Format: %l",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This one doesn't quite fix the issue... apparently the crn_format enum, having both a -1 value and a 2^32 - 1 value, is a different size depending on the compiler. In MSVC it is 32 bits but in some others it must be 64. We could force it to be a specific type with the C++11 feature enum crn_format: uint32_t or whatever, instead of the old approach of trying to influence the type by adding fake values.

tex_info.m_width,
tex_info.m_height,
tex_info.m_levels,
Expand Down