mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: add refcounting for MMU debug mode
GPC MMU debug mode should be set if at least one channel
in the TSG has requested it. Add refcounting for MMU debug
mode, to make sure debug mode is disabled only when no
channel in the TSG is using it.
Bug 2515097
Bug 2713590
Change-Id: Ic5530f93523a9ec2cd3bfebc97adf7b7000531e0
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2123017
(cherry picked from commit a1248d87fe)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2208769
Reviewed-by: Kajetan Dutka <kdutka@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Winnie Hsu <whsu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: Kajetan Dutka <kdutka@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
41a85b8d2a
commit
9e328ed6b8
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -445,3 +445,49 @@ void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg,
|
||||
tsg_sm_error_states->hww_warp_esr_report_mask =
|
||||
sm_error_state->hww_warp_esr_report_mask;
|
||||
}
|
||||
|
||||
int nvgpu_tsg_set_mmu_debug_mode(struct tsg_gk20a *tsg,
|
||||
struct channel_gk20a *ch, bool enable)
|
||||
{
|
||||
struct gk20a *g;
|
||||
int err = 0;
|
||||
u32 tsg_refcnt;
|
||||
|
||||
if ((ch == NULL) || (tsg == NULL)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
g = ch->g;
|
||||
|
||||
if (g->ops.gr.set_mmu_debug_mode == NULL) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
if (ch->mmu_debug_mode_enabled) {
|
||||
/* already enabled for this channel */
|
||||
return 0;
|
||||
}
|
||||
tsg_refcnt = tsg->mmu_debug_mode_refcnt + 1U;
|
||||
} else {
|
||||
if (!ch->mmu_debug_mode_enabled) {
|
||||
/* already disabled for this channel */
|
||||
return 0;
|
||||
}
|
||||
tsg_refcnt = tsg->mmu_debug_mode_refcnt - 1U;
|
||||
}
|
||||
|
||||
/*
|
||||
* enable GPC MMU debug mode if it was requested for at
|
||||
* least one channel in the TSG
|
||||
*/
|
||||
err = g->ops.gr.set_mmu_debug_mode(g, ch, tsg_refcnt > 0U);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "set mmu debug mode failed, err=%d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
ch->mmu_debug_mode_enabled = enable;
|
||||
tsg->mmu_debug_mode_refcnt = tsg_refcnt;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1468,10 +1468,15 @@ int gm20b_gr_set_mmu_debug_mode(struct gk20a *g,
|
||||
gr_gpcs_pri_mmu_debug_ctrl_debug_disabled_f(),
|
||||
};
|
||||
int err;
|
||||
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
|
||||
|
||||
if (tsg == NULL) {
|
||||
return enable ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
err = gr_gk20a_exec_ctx_ops(ch, &ctx_ops, 1, 1, 0, NULL);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Failed to access register");
|
||||
nvgpu_err(g, "update MMU debug mode failed");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -328,6 +328,7 @@ struct channel_gk20a {
|
||||
bool has_os_fence_framework_support;
|
||||
|
||||
bool is_privileged_channel;
|
||||
bool mmu_debug_mode_enabled;
|
||||
};
|
||||
|
||||
static inline struct channel_gk20a *
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -76,6 +76,9 @@ struct tsg_gk20a {
|
||||
bool tpc_num_initialized;
|
||||
bool in_use;
|
||||
|
||||
/* MMU debug mode enabled if mmu_debug_mode_refcnt > 0 */
|
||||
u32 mmu_debug_mode_refcnt;
|
||||
|
||||
struct nvgpu_tsg_sm_error_state *sm_error_states;
|
||||
|
||||
#define NVGPU_SM_EXCEPTION_TYPE_MASK_NONE (0x0U)
|
||||
@@ -124,4 +127,7 @@ gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node)
|
||||
((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node));
|
||||
};
|
||||
|
||||
int nvgpu_tsg_set_mmu_debug_mode(struct tsg_gk20a *tsg,
|
||||
struct channel_gk20a *ch, bool enable);
|
||||
|
||||
#endif /* TSG_GK20A_H */
|
||||
|
||||
@@ -1107,10 +1107,11 @@ static int nvgpu_dbg_gpu_ioctl_set_mmu_debug_mode(
|
||||
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
|
||||
if (!ch) {
|
||||
nvgpu_err(g, "no bound channel for mmu debug mode");
|
||||
err = -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = g->ops.gr.set_mmu_debug_mode(g, ch, enable);
|
||||
err = nvgpu_tsg_set_mmu_debug_mode(tsg_gk20a_from_ch(ch), ch, enable);
|
||||
if (err) {
|
||||
nvgpu_err(g, "set mmu debug mode failed, err=%d", err);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user