gpu: nvgpu: fixing MISRA violations

- MISRA Directive 4.7
  Calling function "nvgpu_tsg_unbind_channel(tsg, ch, true)" which returns
  error information without testing the error information.

- MISRA Rule 10.3
  Implicit conversion from essential type "unsigned 64-bit int" to different
  or narrower essential type "unsigned 32-bit int"

- MISRA Rule 5.7
  A tag name shall be a unique identifier

JIRA NVGPU-5955

Change-Id: I109e0c01848c76a0947848e91cc6bb17d4cf7d24
Signed-off-by: srajum <srajum@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2572776
(cherry picked from commit 073daafe8a11e86806be966711271be51d99c18e)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2678681
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
srajum
2021-08-08 23:08:34 +05:30
committed by mobile promotions
parent 069fe05dca
commit 8381647662
15 changed files with 30 additions and 50 deletions

View File

@@ -439,8 +439,6 @@ done:
#define F_CHANNEL_CLOSE_AS_BOUND BIT(14)
#define F_CHANNEL_CLOSE_LAST BIT(15)
/* nvgpu_tsg_force_unbind_channel always return 0 */
static const char *f_channel_close[] = {
"already_freed",
"force",
@@ -485,7 +483,6 @@ static bool channel_close_pruned(u32 branches, u32 final)
return true;
}
/* TODO: nvgpu_tsg_force_unbind_channel always returns 0 */
branches &= ~F_CHANNEL_CLOSE_TSG_UNBIND_FAIL;
@@ -1227,7 +1224,7 @@ int test_channel_enable_disable_tsg(struct unit_module *m,
subtest_setup(branches);
err = nvgpu_tsg_force_unbind_channel(tsg, ch);
err = nvgpu_tsg_unbind_channel(tsg, ch, true);
unit_assert(err == 0, goto done);
err = nvgpu_channel_enable_tsg(g, ch);
@@ -1648,7 +1645,7 @@ done:
f_channel_suspend_resume));
}
if (ch != NULL) {
nvgpu_tsg_force_unbind_channel(tsg, ch);
nvgpu_tsg_unbind_channel(tsg, ch, true);
nvgpu_channel_close(ch);
}
if (tsg != NULL) {
@@ -1747,7 +1744,7 @@ done:
branches_str(branches, f_channel_debug_dump));
}
if (ch != NULL) {
nvgpu_tsg_force_unbind_channel(tsg, ch);
nvgpu_tsg_unbind_channel(tsg, ch, true);
nvgpu_channel_close(ch);
}
if (tsg != NULL) {
@@ -2091,7 +2088,7 @@ int test_channel_abort_cleanup(struct unit_module *m, struct gk20a *g,
err = nvgpu_tsg_bind_channel(tsg, ch);
unit_assert(err == 0, goto done);
err = nvgpu_tsg_force_unbind_channel(tsg, ch);
err = nvgpu_tsg_unbind_channel(tsg, ch, true);
unit_assert(err == 0, goto done);
nvgpu_channel_close(ch);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -296,7 +296,7 @@ done:
}
g->ops.fifo.is_preempt_pending =
stub_fifo_is_preempt_pending_pass;
err = nvgpu_tsg_force_unbind_channel(tsg, ch);
err = nvgpu_tsg_unbind_channel(tsg, ch, true);
if (err != 0) {
unit_err(m, "Cannot unbind channel\n");
}
@@ -495,7 +495,7 @@ done:
}
g->ops.fifo.is_preempt_pending =
stub_fifo_is_preempt_pending_pass;
err = nvgpu_tsg_force_unbind_channel(tsg, ch);
err = nvgpu_tsg_unbind_channel(tsg, ch, true);
if (err != 0) {
unit_err(m, "Cannot unbind channel\n");
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -379,7 +379,7 @@ int test_tsg_bind_channel(struct unit_module *m,
goto done);
unit_assert(nvgpu_tsg_from_ch(ch) == tsg, goto done);
err = nvgpu_tsg_force_unbind_channel(tsg, ch);
err = nvgpu_tsg_unbind_channel(tsg, ch, true);
unit_assert(err == 0, goto done);
unit_assert(ch->tsgid == NVGPU_INVALID_TSG_ID,
goto done);
@@ -564,7 +564,7 @@ int test_tsg_unbind_channel(struct unit_module *m,
branches & F_TSG_UNBIND_CHANNEL_ABORT_CLEAN_UP_NULL ?
NULL : gops.channel.abort_clean_up;
err = nvgpu_tsg_force_unbind_channel(tsg, chA);
err = nvgpu_tsg_unbind_channel(tsg, chA, true);
if (branches & fail) {
/* check that TSG has been torn down */
@@ -986,7 +986,7 @@ done:
}
if (ch != NULL) {
nvgpu_tsg_force_unbind_channel(tsg, ch);
nvgpu_tsg_unbind_channel(tsg, ch, true);
nvgpu_channel_close(ch);
}
if (tsg != NULL) {
@@ -1059,7 +1059,7 @@ int test_tsg_unbind_channel_check_ctx_reload(struct unit_module *m,
if ((branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_SET) &&
(branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_CHID_MATCH)) {
nvgpu_tsg_force_unbind_channel(tsg, chB);
nvgpu_tsg_unbind_channel(tsg, chB, true);
unit_assert(stub[0].chid == chB->chid, goto done);
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -91,7 +91,7 @@ int test_tsg_open(struct unit_module *m,
* - Allocate channel with nvgpu_channel_open_new.
* - Check that nvgpu_tsg_bind_channel returns 0.
* - Check that TSG's list of channel is not empty.
* - Unbind channel with nvgpu_tsg_force_unbind_channel.
* - Unbind channel with nvgpu_tsg_unbind_channel.
* - Check that ch->tsgid is now invalid.
* - Check that tsg can be retrieved from ch using nvgpu_tsg_from_ch.
* - Check TSG bind failure cases:
@@ -121,7 +121,7 @@ int test_tsg_bind_channel(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_tsg_force_unbind_channel
* Targets: nvgpu_tsg_unbind_channel
*
* Input: test_fifo_init_support() run for this GPU
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -337,7 +337,7 @@ static int gr_test_intr_allocate_ch_tsg(struct unit_module *m,
ch->notifier_wq.initialized = notify_init;
tsg_unbind:
err = nvgpu_tsg_force_unbind_channel(tsg, ch);
err = nvgpu_tsg_unbind_channel(tsg, ch, true);
if (err != 0) {
unit_err(m, "failed tsg channel unbind\n");
}

View File

@@ -154,7 +154,7 @@ static int gr_test_setup_unbind_tsg(struct unit_module *m, struct gk20a *g)
goto unbind_tsg;
}
err = nvgpu_tsg_force_unbind_channel(gr_setup_tsg, gr_setup_ch);
err = nvgpu_tsg_unbind_channel(gr_setup_tsg, gr_setup_ch, true);
if (err != 0) {
unit_err(m, "failed tsg channel unbind\n");
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -161,7 +161,7 @@ int test_gr_setup_set_preemption_mode(struct unit_module *m,
* must have been executed successfully.
*
* Steps:
* - Call nvgpu_tsg_force_unbind_channel.
* - Call nvgpu_tsg_unbind_channel.
* - Call nvgpu_channel_close.
* - Call nvgpu_tsg_release.
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -163,7 +163,7 @@ clear_tsg:
int test_rc_deinit(struct unit_module *m, struct gk20a *g, void *args)
{
struct nvgpu_posix_channel *posix_channel = ch->os_priv;
int ret = nvgpu_tsg_force_unbind_channel(tsg, ch);
int ret = nvgpu_tsg_unbind_channel(tsg, ch, true);
if (ret != 0) {
ret = UNIT_FAIL;
unit_err(m , "channel already unbound");