gpu: nvgpu: channel MISRA fix for Rule 21.2

Rename functions starting with '_' and '__'.
__gk20a_channel_kill -> nvgpu_channel_kill
_gk20a_channel_from_id -> nvgpu_channel_from_id__func
gk20a_channel_from_id -> nvgpu_channel_from_id

JIRA NVGPU-3388

Change-Id: I3b5f63bf214c5c5e49bc84ba8ef79bd49831c56e
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2114037
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-05-07 13:22:08 -07:00
committed by mobile promotions
parent 842c42249d
commit 26d13b3b6b
10 changed files with 31 additions and 29 deletions

View File

@@ -625,8 +625,8 @@ void _gk20a_channel_put(struct nvgpu_channel *ch, const char *caller)
WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable);
}
struct nvgpu_channel *_gk20a_channel_from_id(struct gk20a *g, u32 chid,
const char *caller)
struct nvgpu_channel *nvgpu_channel_from_id__func(struct gk20a *g,
u32 chid, const char *caller)
{
if (chid == NVGPU_INVALID_CHANNEL_ID) {
return NULL;
@@ -645,7 +645,7 @@ void gk20a_channel_close(struct nvgpu_channel *ch)
* driver is otherwise dying. Ref counts and the like are ignored by this
* version of the cleanup.
*/
void __gk20a_channel_kill(struct nvgpu_channel *ch)
void nvgpu_channel_kill(struct nvgpu_channel *ch)
{
gk20a_free_channel(ch, true);
}
@@ -1594,7 +1594,7 @@ void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g)
u32 chid;
for (chid = 0; chid < f->num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch != NULL) {
if (!gk20a_channel_check_unserviceable(ch)) {
@@ -1693,7 +1693,7 @@ static void nvgpu_channel_poll_wdt(struct gk20a *g)
for (chid = 0; chid < g->fifo.num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch != NULL) {
if (!gk20a_channel_check_unserviceable(ch)) {
@@ -2115,7 +2115,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
nvgpu_rwsem_down_write(&g->deterministic_busy);
for (chid = 0; chid < f->num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
continue;
@@ -2153,7 +2153,7 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
u32 chid;
for (chid = 0; chid < f->num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
continue;
@@ -2207,7 +2207,7 @@ void nvgpu_channel_cleanup_sw(struct gk20a *g)
* from gk20a_free_channel() complaining about multiple closes.
*/
if (ch->referenceable) {
__gk20a_channel_kill(ch);
nvgpu_channel_kill(ch);
}
nvgpu_channel_destroy(g, ch);
@@ -2356,7 +2356,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
nvgpu_log_fn(g, " ");
for (chid = 0; chid < f->num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
continue;
@@ -2387,7 +2387,8 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
nvgpu_runlist_reload_ids(g, active_runlist_ids, false);
for (chid = 0; chid < f->num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch =
nvgpu_channel_from_id(g, chid);
if (ch != NULL) {
if (gk20a_channel_check_unserviceable(ch)) {
@@ -2416,7 +2417,7 @@ void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
nvgpu_log_fn(g, " ");
for (chid = 0; chid < f->num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
continue;
@@ -2501,7 +2502,7 @@ struct nvgpu_channel *nvgpu_channel_refch_from_inst_ptr(struct gk20a *g,
struct nvgpu_channel *ch;
u64 ch_inst_ptr;
ch = gk20a_channel_from_id(g, ci);
ch = nvgpu_channel_from_id(g, ci);
/* only alive channels are searched */
if (ch == NULL) {
continue;
@@ -2554,7 +2555,7 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
}
for (chid = 0U; chid < f->num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch != NULL) {
struct nvgpu_channel_dump_info *info;

View File

@@ -316,7 +316,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
}
if (pbdma_chid != NVGPU_INVALID_CHANNEL_ID) {
ch = gk20a_channel_from_id(g, pbdma_chid);
ch = nvgpu_channel_from_id(g, pbdma_chid);
if (ch != NULL) {
err = g->ops.fifo.preempt_channel(g, ch);
gk20a_channel_put(ch);
@@ -338,7 +338,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
}
if (engine_chid != NVGPU_INVALID_ENG_ID && engine_chid != pbdma_chid) {
ch = gk20a_channel_from_id(g, engine_chid);
ch = nvgpu_channel_from_id(g, engine_chid);
if (ch != NULL) {
err = g->ops.fifo.preempt_channel(g, ch);
gk20a_channel_put(ch);

View File

@@ -255,14 +255,14 @@ struct nvgpu_channel *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
if (intr->chid_tlb[i].curr_ctx == curr_ctx) {
chid = intr->chid_tlb[i].chid;
tsgid = intr->chid_tlb[i].tsgid;
ret_ch = gk20a_channel_from_id(g, chid);
ret_ch = nvgpu_channel_from_id(g, chid);
goto unlock;
}
}
/* slow path */
for (chid = 0; chid < f->num_channels; chid++) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
continue;

View File

@@ -91,7 +91,7 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, struct nvgpu_fifo *f,
nvgpu_rc_tsg_and_related_engines(g, tsg, true,
RC_TYPE_PBDMA_FAULT);
} else if(pbdma_status.id_type == PBDMA_STATUS_ID_TYPE_CHID) {
struct nvgpu_channel *ch = gk20a_channel_from_id(g, id);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, id);
struct nvgpu_tsg *tsg;
if (ch == NULL) {
nvgpu_err(g, "channel is not referenceable");

View File

@@ -445,7 +445,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
{
struct nvgpu_channel *ch = gk20a_channel_from_id(g, info->chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, info->chid);
nvgpu_log_fn(g, " ");
if (!ch) {
@@ -521,7 +521,7 @@ void vgpu_handle_channel_event(struct gk20a *g,
void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
{
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
nvgpu_err(g, "invalid channel id %d", chid);

View File

@@ -759,7 +759,7 @@ int vgpu_init_gr_support(struct gk20a *g)
int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
{
struct nvgpu_channel *ch = gk20a_channel_from_id(g, info->chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, info->chid);
nvgpu_log_fn(g, " ");

View File

@@ -125,7 +125,7 @@ static void vgpu_handle_channel_event(struct gk20a *g,
static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
{
struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
nvgpu_err(g, "invalid channel id %d", chid);

View File

@@ -109,7 +109,7 @@ bool gk20a_fifo_handle_ctxsw_timeout(struct gk20a *g)
if (is_tsg) {
tsg = nvgpu_tsg_check_and_get_from_id(g, id);
} else {
ch = gk20a_channel_from_id(g, id);
ch = nvgpu_channel_from_id(g, id);
if (ch != NULL) {
tsg = tsg_gk20a_from_ch(ch);
gk20a_channel_put(ch);

View File

@@ -81,7 +81,7 @@ static int gp10b_gr_intr_get_cilp_preempt_pending_chid(struct gk20a *g,
return ret;
}
ch = gk20a_channel_from_id(g, chid);
ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
return ret;
}
@@ -140,7 +140,7 @@ int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
goto clean_up;
}
ch = gk20a_channel_from_id(g, chid);
ch = nvgpu_channel_from_id(g, chid);
if (ch == NULL) {
goto clean_up;
}

View File

@@ -412,7 +412,7 @@ void nvgpu_channel_cleanup_sw(struct gk20a *g);
/* must be inside gk20a_busy()..gk20a_idle() */
void gk20a_channel_close(struct nvgpu_channel *ch);
void __gk20a_channel_kill(struct nvgpu_channel *ch);
void nvgpu_channel_kill(struct nvgpu_channel *ch);
void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g,
struct nvgpu_channel *ch);
@@ -456,9 +456,10 @@ void _gk20a_channel_put(struct nvgpu_channel *ch, const char *caller);
#define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__)
/* returns NULL if could not take a ref to the channel */
struct nvgpu_channel *__must_check _gk20a_channel_from_id(struct gk20a *g,
u32 chid, const char *caller);
#define gk20a_channel_from_id(g, chid) _gk20a_channel_from_id(g, chid, __func__)
struct nvgpu_channel *__must_check nvgpu_channel_from_id__func(
struct gk20a *g, u32 chid, const char *caller);
#define nvgpu_channel_from_id(g, chid) \
nvgpu_channel_from_id__func(g, chid, __func__)
int gk20a_wait_channel_idle(struct nvgpu_channel *ch);