gpu: nvgpu: remove round_up usage in safety build

- In function gv11b_tsg_init_eng_method_buffers() PAGE_ALIGN can be used
  instead of round_up macro.
- In function nvgpu_posix_find_next_bit() rounding up of start does not
  seem to serve any purpose.

JIRA NVGPU-7057

Change-Id: I4a3a21e95a0f3aa38f7007de1f6959f1d878e511
Signed-off-by: shashank singh <shashsingh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2614326
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2672107
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Rajesh Devaraj <rdevaraj@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
shashank singh
2021-10-21 14:29:49 +05:30
committed by mobile promotions
parent 6c46173be3
commit 29019dff6e
4 changed files with 8 additions and 10 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -111,7 +111,6 @@ int gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct nvgpu_tsg *tsg)
int err = 0; int err = 0;
int i; int i;
unsigned int runque, buffer_size; unsigned int runque, buffer_size;
u32 page_size = U32(NVGPU_CPU_PAGE_SIZE);
unsigned int num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); unsigned int num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
if (tsg->eng_method_buffers != NULL) { if (tsg->eng_method_buffers != NULL) {
@@ -122,7 +121,7 @@ int gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct nvgpu_tsg *tsg)
buffer_size = nvgpu_safe_add_u32(nvgpu_safe_mult_u32((9U + 1U + 3U), buffer_size = nvgpu_safe_add_u32(nvgpu_safe_mult_u32((9U + 1U + 3U),
g->ops.ce.get_num_pce(g)), 2U); g->ops.ce.get_num_pce(g)), 2U);
buffer_size = nvgpu_safe_mult_u32((27U * 5U), buffer_size); buffer_size = nvgpu_safe_mult_u32((27U * 5U), buffer_size);
buffer_size = round_up(buffer_size, page_size); buffer_size = PAGE_ALIGN(buffer_size);
nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size); nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size);
tsg->eng_method_buffers = nvgpu_kzalloc(g, tsg->eng_method_buffers = nvgpu_kzalloc(g,

View File

@@ -216,6 +216,7 @@
*/ */
#define round_mask(x, y) ((__typeof__(x))((y) - 1U)) #define round_mask(x, y) ((__typeof__(x))((y) - 1U))
#ifdef CONFIG_NVGPU_NON_FUSA
/** /**
* @brief Round up the value of its argument \a x. * @brief Round up the value of its argument \a x.
* *
@@ -229,6 +230,7 @@
* @return Rounded up value of \a x. * @return Rounded up value of \a x.
*/ */
#define round_up(x, y) ((((x) - 1U) | round_mask(x, y)) + 1U) #define round_up(x, y) ((((x) - 1U) | round_mask(x, y)) + 1U)
#endif
/** /**
* @brief Round down the value of its argument \a x. * @brief Round down the value of its argument \a x.

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -111,8 +111,6 @@ static unsigned long nvgpu_posix_find_next_bit(const unsigned long *address,
idx = start / BITS_PER_LONG; idx = start / BITS_PER_LONG;
w = (base_addr[idx] ^ invert_mask) & start_mask; w = (base_addr[idx] ^ invert_mask) & start_mask;
start = round_up(start, BITS_PER_LONG);
idx_max = (n - 1UL) / BITS_PER_LONG; idx_max = (n - 1UL) / BITS_PER_LONG;
/* /*
@@ -125,8 +123,6 @@ static unsigned long nvgpu_posix_find_next_bit(const unsigned long *address,
return n; return n;
} }
start = nvgpu_safe_add_u64(start, BITS_PER_LONG);
w = base_addr[idx] ^ invert_mask; w = base_addr[idx] ^ invert_mask;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -460,7 +460,7 @@ int test_round_macros(struct unit_module *m,
"round_mask failure %d\n", result); "round_mask failure %d\n", result);
} }
} }
#ifdef CONFIG_NVGPU_NON_FUSA
result = ROUND_BY_VALUE; result = ROUND_BY_VALUE;
for (i = 0; i < ROUND_BY_VALUE; i++) { for (i = 0; i < ROUND_BY_VALUE; i++) {
test1 = (ROUND_DOWN_RESULT + 1U) + i; test1 = (ROUND_DOWN_RESULT + 1U) + i;
@@ -468,6 +468,7 @@ int test_round_macros(struct unit_module *m,
unit_return_fail(m, "round_up failure %d %d\n", test1, i); unit_return_fail(m, "round_up failure %d %d\n", test1, i);
} }
} }
#endif
result = ROUND_BY_VALUE; result = ROUND_BY_VALUE;
for (i = 0; i < ROUND_BY_VALUE; i++) { for (i = 0; i < ROUND_BY_VALUE; i++) {