gpu: nvgpu: MISRA integer fixes

Apply various MISRA integer related fixes. Some fixes simply required
adding a "U" suffix to integer constants. Other fixes were more
complicated and required breaking up complex composite expressions into
multiple smaller expressions.

JIRA NVGPU-3873

Change-Id: Id8a08a17d1cf9e20193bd3e4f2d4104774d81767
Signed-off-by: Adeel Raza <araza@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2262189
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Adeel Raza
2019-12-13 15:03:13 -08:00
committed by Alex Waterman
parent eb0b0c78d4
commit 26af1c2270
4 changed files with 13 additions and 11 deletions

View File

@@ -401,7 +401,7 @@ nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c, bool user_managed)
c->chid, syncpt_name);
}
#endif
if (sp->id == 0) {
if (sp->id == 0U) {
nvgpu_kfree(c->g, sp);
nvgpu_err(c->g, "failed to get free syncpt");
return NULL;

View File

@@ -35,18 +35,21 @@
u64 gp10b_determine_L2_size_bytes(struct gk20a *g)
{
u32 tmp;
u32 reg_val;
u32 slice_size;
u32 slices_per_l2;
u64 ret;
nvgpu_log_fn(g, " ");
tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r());
reg_val = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r());
slice_size = ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(reg_val);
slices_per_l2 = ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(reg_val);
ret = nvgpu_safe_mult_u64(g->ltc->ltc_count,
nvgpu_safe_mult_u64(
nvgpu_safe_mult_u64(
ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp), 1024U),
ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp)));
nvgpu_safe_mult_u64(U64(slice_size), 1024ULL),
U64(slices_per_l2)));
nvgpu_log(g, gpu_dbg_info, "L2 size: %llu\n", ret);

View File

@@ -38,7 +38,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
{
struct gk20a *g = gk20a_from_vm(vm);
if (vm->syncpt_ro_map_gpu_va) {
if (vm->syncpt_ro_map_gpu_va != 0ULL) {
return 0;
}
@@ -47,7 +47,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
0, gk20a_mem_flag_read_only,
false, APERTURE_SYSMEM);
if (!vm->syncpt_ro_map_gpu_va) {
if (vm->syncpt_ro_map_gpu_va == 0ULL) {
nvgpu_err(g, "failed to ro map syncpt buffer");
return -ENOMEM;
}
@@ -88,7 +88,7 @@ int gv11b_syncpt_alloc_buf(struct nvgpu_channel *c,
g->syncpt_size, 0, gk20a_mem_flag_none,
false, APERTURE_SYSMEM);
if (!syncpt_buf->gpu_va) {
if (syncpt_buf->gpu_va == 0ULL) {
nvgpu_err(g, "failed to map syncpt buffer");
nvgpu_dma_free(g, syncpt_buf);
err = -ENOMEM;

View File

@@ -60,8 +60,7 @@
*/
static inline u32 u64_hi32(u64 n)
{
return nvgpu_safe_cast_u64_to_u32(nvgpu_safe_cast_u64_to_u32(n >> 32)
& ~(u32)0);
return nvgpu_safe_cast_u64_to_u32(n >> 32);
}
/**