gpu: nvgpu: remove dead code in gm20b_pbdma_acquire_val

Removed BUG_ON statements from gm20b_pbdma_acquire_val, as
condition could never be true. The only overflow that can
happen is in nvgpu_safe_mult_u64.

Compute exponent by shifting timeout (in units of 1024 ns)
until it fits into mantissa. This removes the need to
compute most significant bits, and allows using hw definitions
for mantissa and exponent max values.

Jira NVGPU-3694
Jira NVGPU-4673

Change-Id: Iaf4b5aaafe5b4e759d4e447f76f05f81e201a584
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2263650
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-12-16 17:39:13 -05:00
committed by Alex Waterman
parent 2e03e88431
commit 5629bd900c

View File

@@ -238,8 +238,6 @@ void gm20b_pbdma_reset_method(struct gk20a *g, u32 pbdma_id,
u32 gm20b_pbdma_acquire_val(u64 timeout)
{
u32 val, exponent, mantissa;
unsigned int val_len;
u64 tmp;
val = pbdma_acquire_retry_man_2_f() |
pbdma_acquire_retry_exp_2_f();
@@ -251,35 +249,19 @@ u32 gm20b_pbdma_acquire_val(u64 timeout)
/* set acquire timeout to 80% of channel wdt, and convert to ns */
timeout = nvgpu_safe_mult_u64(timeout, (1000000UL * 80UL) / 100UL);
do_div(timeout, 1024U); /* in unit of 1024ns */
tmp = nvgpu_fls(timeout >> 32U);
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 14_4), "Bug 2277532")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 15_6), "Bug 2277532")
BUG_ON(tmp > U64(U32_MAX));
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 14_4))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
val_len = (u32)tmp + 32U;
if (val_len == 32U) {
val_len = nvgpu_safe_cast_u64_to_u32(nvgpu_fls(timeout));
exponent = 0;
while (timeout > pbdma_acquire_timeout_man_max_v() &&
(exponent <= pbdma_acquire_timeout_exp_max_v())) {
timeout >>= 1;
exponent++;
}
if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */
if (exponent > pbdma_acquire_timeout_exp_max_v()) {
exponent = pbdma_acquire_timeout_exp_max_v();
mantissa = pbdma_acquire_timeout_man_max_v();
} else if (val_len > 16U) {
exponent = val_len - 16U;
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 14_4), "Bug 2277532")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 15_6), "Bug 2277532")
BUG_ON((timeout >> exponent) > U64(U32_MAX));
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 14_4))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
mantissa = (u32)(timeout >> exponent);
} else {
exponent = 0;
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 14_4), "Bug 2277532")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 15_6), "Bug 2277532")
BUG_ON(timeout > U64(U32_MAX));
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 14_4))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
mantissa = (u32)timeout;
mantissa = nvgpu_safe_cast_u64_to_u32(timeout);
}
val |= pbdma_acquire_timeout_exp_f(exponent) |