gpu: nvgpu: use refcnt for ch mmu_debug_mode

Replaced ch->mmu_debug_mode_enabled with ch->mmu_debug_mode_refcnt.
If channel is enabled multiple times by userspace, then ref count is
updated accordingly. There is an expectation that enable/disable
calls are balanced for setting channel's mmu debug mode.
When unbinding the channel, decrease refcnt for the channel until it
reaches 0.
Also, removed tsg parameter from nvgpu_tsg_set_mmu_debug_mode as it
can be retrieved from ch.

Bug 2515097
Bug 2713590

Change-Id: If334e374a55bd14ae219edbfd3b1fce5ff25c226
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2184702
(cherry picked from commit f422aee393)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2208772
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Kajetan Dutka <kdutka@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Winnie Hsu <whsu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: Kajetan Dutka <kdutka@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Thomas Fleury
2019-08-27 10:09:51 -04:00
committed by mobile promotions
parent e0587aaf4d
commit e41fd09031
5 changed files with 23 additions and 16 deletions

View File

@@ -446,13 +446,14 @@ void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg,
sm_error_state->hww_warp_esr_report_mask; sm_error_state->hww_warp_esr_report_mask;
} }
int nvgpu_tsg_set_mmu_debug_mode(struct tsg_gk20a *tsg, int nvgpu_tsg_set_mmu_debug_mode(struct channel_gk20a *ch, bool enable)
struct channel_gk20a *ch, bool enable)
{ {
struct gk20a *g; struct gk20a *g;
int err = 0; int err = 0;
u32 ch_refcnt;
u32 tsg_refcnt; u32 tsg_refcnt;
u32 fb_refcnt; u32 fb_refcnt;
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
if ((ch == NULL) || (tsg == NULL)) { if ((ch == NULL) || (tsg == NULL)) {
return -EINVAL; return -EINVAL;
@@ -465,17 +466,11 @@ int nvgpu_tsg_set_mmu_debug_mode(struct tsg_gk20a *tsg,
} }
if (enable) { if (enable) {
if (ch->mmu_debug_mode_enabled) { ch_refcnt = ch->mmu_debug_mode_refcnt + 1U;
/* already enabled for this channel */
return 0;
}
tsg_refcnt = tsg->mmu_debug_mode_refcnt + 1U; tsg_refcnt = tsg->mmu_debug_mode_refcnt + 1U;
fb_refcnt = g->mmu_debug_mode_refcnt + 1U; fb_refcnt = g->mmu_debug_mode_refcnt + 1U;
} else { } else {
if (!ch->mmu_debug_mode_enabled) { ch_refcnt = ch->mmu_debug_mode_refcnt - 1U;
/* already disabled for this channel */
return 0;
}
tsg_refcnt = tsg->mmu_debug_mode_refcnt - 1U; tsg_refcnt = tsg->mmu_debug_mode_refcnt - 1U;
fb_refcnt = g->mmu_debug_mode_refcnt - 1U; fb_refcnt = g->mmu_debug_mode_refcnt - 1U;
} }
@@ -500,7 +495,7 @@ int nvgpu_tsg_set_mmu_debug_mode(struct tsg_gk20a *tsg,
g->ops.fb.set_mmu_debug_mode(g, fb_refcnt > 0U); g->ops.fb.set_mmu_debug_mode(g, fb_refcnt > 0U);
} }
ch->mmu_debug_mode_enabled = enable; ch->mmu_debug_mode_refcnt = ch_refcnt;
tsg->mmu_debug_mode_refcnt = tsg_refcnt; tsg->mmu_debug_mode_refcnt = tsg_refcnt;
g->mmu_debug_mode_refcnt = fb_refcnt; g->mmu_debug_mode_refcnt = fb_refcnt;

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Graphics FIFO (gr host) * GK20A Graphics FIFO (gr host)
* *
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -2250,6 +2250,15 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
goto fail_enable_tsg; goto fail_enable_tsg;
} }
while (ch->mmu_debug_mode_refcnt > 0U) {
err = nvgpu_tsg_set_mmu_debug_mode(ch, false);
if (err != 0) {
nvgpu_err(g, "disable mmu debug mode failed ch:%u",
ch->chid);
break;
}
}
/* Remove channel from TSG and re-enable rest of the channels */ /* Remove channel from TSG and re-enable rest of the channels */
nvgpu_rwsem_down_write(&tsg->ch_list_lock); nvgpu_rwsem_down_write(&tsg->ch_list_lock);
nvgpu_list_del(&ch->ch_entry); nvgpu_list_del(&ch->ch_entry);

View File

@@ -328,7 +328,11 @@ struct channel_gk20a {
bool has_os_fence_framework_support; bool has_os_fence_framework_support;
bool is_privileged_channel; bool is_privileged_channel;
bool mmu_debug_mode_enabled;
/**
* MMU Debugger Mode is enabled for this channel if refcnt > 0
*/
u32 mmu_debug_mode_refcnt;
}; };
static inline struct channel_gk20a * static inline struct channel_gk20a *

View File

@@ -127,7 +127,6 @@ gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node)
((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node)); ((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node));
}; };
int nvgpu_tsg_set_mmu_debug_mode(struct tsg_gk20a *tsg, int nvgpu_tsg_set_mmu_debug_mode(struct channel_gk20a *ch, bool enable);
struct channel_gk20a *ch, bool enable);
#endif /* TSG_GK20A_H */ #endif /* TSG_GK20A_H */

View File

@@ -1112,7 +1112,7 @@ static int nvgpu_dbg_gpu_ioctl_set_mmu_debug_mode(
goto clean_up; goto clean_up;
} }
err = nvgpu_tsg_set_mmu_debug_mode(tsg_gk20a_from_ch(ch), ch, enable); err = nvgpu_tsg_set_mmu_debug_mode(ch, enable);
if (err) { if (err) {
nvgpu_err(g, "set mmu debug mode failed, err=%d", err); nvgpu_err(g, "set mmu debug mode failed, err=%d", err);
} }