gpu: nvgpu: MISRA 4.5 fixes to round_up()

MISRA Advisory Directive 4.5 states that identifiers in the same
name space with overlapping visibility should be typographically
unambiguous.

The presence of both the roundup(x,y) and round_up(x,y) macros in
the posix utils.h header incurs a violation of this rule.

These macros were added to keep in sync with the linux kernel variants.

However, there is a key distinction between how these two macros
work in the linux kernel; roundup(x,y) can handle any y alignment while
round_up(x,y) is intended to work only when y is a power-of-two.

Passing a non-power-of-two alignment to round_up(x,y) results in an
incorrect value being returned (silently).

Because all current uses of roundup(x,y) and round_up(x,y) in
nvgpu specify a y value that is a power-of-two and the underlying
posix macro implementations assume as much, it is best to remove
roundup(x,y) from nvgpu altogether to avoid any confusion.

So this change converts all uses of roundup(x,y) to round_up(x,y).

Jira NVGPU-3178

Change-Id: I0ee974d3e088fa704e251a38f6b7ada5a7600aec
Signed-off-by: Scott Long <scottl@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2271385
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Scott Long
2019-12-31 10:30:42 -08:00
committed by Alex Waterman
parent 3f65316312
commit ae44d384f3
11 changed files with 28 additions and 38 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -229,7 +229,7 @@ static void nvgpu_sim_esc_readl(struct gk20a *g,
sim_escape_read_hdr_size()); sim_escape_read_hdr_size());
*sim_msg_param(g, 0) = index; *sim_msg_param(g, 0) = index;
*sim_msg_param(g, 4) = sizeof(u32); *sim_msg_param(g, 4) = sizeof(u32);
data_offset = roundup(0xc + pathlen + 1, sizeof(u32)); data_offset = round_up(0xc + pathlen + 1, sizeof(u32));
*sim_msg_param(g, 8) = data_offset; *sim_msg_param(g, 8) = data_offset;
strcpy((char *)sim_msg_param(g, 0xc), path); strcpy((char *)sim_msg_param(g, 0xc), path);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -195,7 +195,7 @@ static void nvgpu_sim_esc_readl(struct gk20a *g,
sim_escape_read_hdr_size()); sim_escape_read_hdr_size());
*sim_msg_param(g, 0) = index; *sim_msg_param(g, 0) = index;
*sim_msg_param(g, 4) = sizeof(u32); *sim_msg_param(g, 4) = sizeof(u32);
data_offset = roundup(pathlen + 1, sizeof(u32)); data_offset = round_up(pathlen + 1, sizeof(u32));
*sim_msg_param(g, 8) = data_offset + 0xc; *sim_msg_param(g, 8) = data_offset + 0xc;
strcpy((char *)sim_msg_param(g, 0xc), path); strcpy((char *)sim_msg_param(g, 0xc), path);

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B CBC * GM20B CBC
* *
* Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020 NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -85,7 +85,7 @@ int gm20b_cbc_alloc_comptags(struct gk20a *g, struct nvgpu_cbc *cbc)
ltc_ltcs_ltss_cbc_base_alignment_shift_v(); ltc_ltcs_ltss_cbc_base_alignment_shift_v();
/* must be a multiple of 64KB */ /* must be a multiple of 64KB */
compbit_backing_size = roundup(compbit_backing_size, compbit_backing_size = round_up(compbit_backing_size,
U32(64) * U32(1024)); U32(64) * U32(1024));
max_comptag_lines = max_comptag_lines =

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B CBC * GP10B CBC
* *
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -78,9 +78,9 @@ int gp10b_cbc_alloc_comptags(struct gk20a *g, struct nvgpu_cbc *cbc)
} }
compbit_backing_size = compbit_backing_size =
roundup(max_comptag_lines * gobs_per_comptagline_per_slice, round_up(max_comptag_lines * gobs_per_comptagline_per_slice,
nvgpu_ltc_get_cacheline_size(g)); nvgpu_ltc_get_cacheline_size(g));
compbit_backing_size = roundup( compbit_backing_size = round_up(
compbit_backing_size * nvgpu_ltc_get_slices_per_ltc(g) * compbit_backing_size * nvgpu_ltc_get_slices_per_ltc(g) *
nvgpu_ltc_get_ltc_count(g), nvgpu_ltc_get_ltc_count(g),
g->ops.fb.compressible_page_size(g)); g->ops.fb.compressible_page_size(g));
@@ -91,7 +91,7 @@ int gp10b_cbc_alloc_comptags(struct gk20a *g, struct nvgpu_cbc *cbc)
ltc_ltcs_ltss_cbc_base_alignment_shift_v(); ltc_ltcs_ltss_cbc_base_alignment_shift_v();
/* must be a multiple of 64KB */ /* must be a multiple of 64KB */
compbit_backing_size = roundup(compbit_backing_size, compbit_backing_size = round_up(compbit_backing_size,
U32(64) * U32(1024)); U32(64) * U32(1024));
nvgpu_log_info(g, "compbit backing store size : %d", nvgpu_log_info(g, "compbit backing store size : %d",

View File

@@ -1,7 +1,7 @@
/* /*
* TU104 CBC * TU104 CBC
* *
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -88,7 +88,7 @@ int tu104_cbc_alloc_comptags(struct gk20a *g, struct nvgpu_cbc *cbc)
ctags_per_cacheline = nvgpu_ltc_get_cacheline_size(g) / ctags_size; ctags_per_cacheline = nvgpu_ltc_get_cacheline_size(g) / ctags_size;
compbit_backing_size = compbit_backing_size =
roundup(max_comptag_lines * ctags_size, round_up(max_comptag_lines * ctags_size,
nvgpu_ltc_get_cacheline_size(g)); nvgpu_ltc_get_cacheline_size(g));
compbit_backing_size = compbit_backing_size =
compbit_backing_size * nvgpu_ltc_get_slices_per_ltc(g) * compbit_backing_size * nvgpu_ltc_get_slices_per_ltc(g) *
@@ -99,7 +99,7 @@ int tu104_cbc_alloc_comptags(struct gk20a *g, struct nvgpu_cbc *cbc)
compbit_backing_size += amap_swizzle_rounding; compbit_backing_size += amap_swizzle_rounding;
/* must be a multiple of 64KB */ /* must be a multiple of 64KB */
compbit_backing_size = roundup(compbit_backing_size, compbit_backing_size = round_up(compbit_backing_size,
U32(64) * U32(1024)); U32(64) * U32(1024));
err = nvgpu_cbc_alloc(g, compbit_backing_size, true); err = nvgpu_cbc_alloc(g, compbit_backing_size, true);

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B FB * GV11B FB
* *
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -65,7 +65,7 @@ void gv11b_fb_cbc_configure(struct gk20a *g, struct nvgpu_cbc *cbc)
&cbc->compbit_store.mem); &cbc->compbit_store.mem);
} }
/* must be aligned to 64 KB */ /* must be aligned to 64 KB */
compbit_store_iova = roundup(compbit_store_iova, (u64)SZ_64K); compbit_store_iova = round_up(compbit_store_iova, (u64)SZ_64K);
compbit_base_post_divide64 = compbit_store_iova >> compbit_base_post_divide64 = compbit_store_iova >>
fb_mmu_cbc_base_address_alignment_shift_v(); fb_mmu_cbc_base_address_alignment_shift_v();

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -119,7 +119,7 @@ int gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct nvgpu_tsg *tsg)
buffer_size = nvgpu_safe_add_u32(nvgpu_safe_mult_u32((9U + 1U + 3U), buffer_size = nvgpu_safe_add_u32(nvgpu_safe_mult_u32((9U + 1U + 3U),
g->ops.ce.get_num_pce(g)), 2U); g->ops.ce.get_num_pce(g)), 2U);
buffer_size = nvgpu_safe_mult_u32((27U * 5U), buffer_size); buffer_size = nvgpu_safe_mult_u32((27U * 5U), buffer_size);
buffer_size = roundup(buffer_size, page_size); buffer_size = round_up(buffer_size, page_size);
nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size); nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size);
tsg->eng_method_buffers = nvgpu_kzalloc(g, tsg->eng_method_buffers = nvgpu_kzalloc(g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -190,22 +190,12 @@
* @brief Round up the value of its argument \a x. * @brief Round up the value of its argument \a x.
* *
* @param x Value to be rounded. * @param x Value to be rounded.
* @param y Value to be used to round up x. * @param y Value to be used to round up x. Must be power-of-two.
* *
* @return Rounded up value of \a x. * @return Rounded up value of \a x.
*/ */
#define round_up(x, y) ((((x) - 1U) | round_mask(x, y)) + 1U) #define round_up(x, y) ((((x) - 1U) | round_mask(x, y)) + 1U)
/**
* @brief Wrapper define for #round_up.
*
* @param x Value to be rounded.
* @param y Value to be used to round up x.
*
* @return Rounded up value of \a x.
*/
#define roundup(x, y) round_up(x, y)
/** /**
* @brief Round down the value of its argument \a x. * @brief Round down the value of its argument \a x.
* *

View File

@@ -1,7 +1,7 @@
/* /*
* Color decompression engine support * Color decompression engine support
* *
* Copyright (c) 2014-2019, NVIDIA Corporation. All rights reserved. * Copyright (c) 2014-2020, NVIDIA Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -1549,10 +1549,10 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
/* Compute per launch parameters */ /* Compute per launch parameters */
const int xtiles = (width + 7) >> 3; const int xtiles = (width + 7) >> 3;
const int ytiles = (height + 7) >> 3; const int ytiles = (height + 7) >> 3;
const int gridw_h = roundup(xtiles, xalign) / xalign; const int gridw_h = round_up(xtiles, xalign) / xalign;
const int gridh_h = roundup(ytiles, yalign) / yalign; const int gridh_h = round_up(ytiles, yalign) / yalign;
const int gridw_v = roundup(ytiles, xalign) / xalign; const int gridw_v = round_up(ytiles, xalign) / xalign;
const int gridh_v = roundup(xtiles, yalign) / yalign; const int gridh_v = round_up(xtiles, yalign) / yalign;
const int xblocks = (xtiles + 1) >> 1; const int xblocks = (xtiles + 1) >> 1;
const int voffset = compbits_voffset - compbits_hoffset; const int voffset = compbits_voffset - compbits_hoffset;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -201,7 +201,7 @@ int nvgpu_gr_fecs_trace_ring_alloc(struct gk20a *g,
{ {
struct nvgpu_ctxsw_ring_header *hdr; struct nvgpu_ctxsw_ring_header *hdr;
*size = roundup(*size, PAGE_SIZE); *size = round_up(*size, PAGE_SIZE);
hdr = vmalloc_user(*size); hdr = vmalloc_user(*size);
if (!hdr) if (!hdr)
return -ENOMEM; return -ENOMEM;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -604,7 +604,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
return 0; return 0;
sched->g = g; sched->g = g;
sched->bitmap_size = roundup(f->num_channels, 64) / 8; sched->bitmap_size = round_up(f->num_channels, 64) / 8;
sched->status = 0; sched->status = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu",