mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: add SET_CTX_MMU_DEBUG_MODE ioctl
Added NVGPU_DBG_GPU_IOCTL_SET_CTX_MMU_DEBUG_MODE ioctl to set MMU
debug mode for a given context.
Added gr.set_mmu_debug_mode HAL to change NV_PGPC_PRI_MMU_DEBUG_CTRL
for a given channel. HAL implementation for native case is
gm20b_gr_set_mmu_debug_mode. It internally uses regops, which directly
writes to the register if the context is resident, or writes to
gr context otherwise.
Added NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE to enable the feature.
NV_PGPC_PRI_MMU_DEBUG_CTRL has to be context switched in FECS ucode,
so the feature is only enabled on TU104 for now.
Bug 2515097
But 2713590
Change-Id: Ib4efaf06fc47a8539b4474f94c68c20ce225263f
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2110720
(cherry-picked from commit af2ccb811d)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2208767
Reviewed-by: Kajetan Dutka <kdutka@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Winnie Hsu <whsu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: Kajetan Dutka <kdutka@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
6e91ecaae7
commit
dc281d6a9e
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GM20B GPC MMU
|
* GM20B GPC MMU
|
||||||
*
|
*
|
||||||
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -33,6 +33,7 @@
|
|||||||
#include <nvgpu/channel.h>
|
#include <nvgpu/channel.h>
|
||||||
|
|
||||||
#include "gk20a/gr_gk20a.h"
|
#include "gk20a/gr_gk20a.h"
|
||||||
|
#include "gk20a/regops_gk20a.h"
|
||||||
|
|
||||||
#include "gr_gm20b.h"
|
#include "gr_gm20b.h"
|
||||||
#include "pmu_gm20b.h"
|
#include "pmu_gm20b.h"
|
||||||
@@ -1455,6 +1456,26 @@ u32 gr_gm20b_get_pmm_per_chiplet_offset(void)
|
|||||||
return (perf_pmmsys_extent_v() - perf_pmmsys_base_v() + 1);
|
return (perf_pmmsys_extent_v() - perf_pmmsys_base_v() + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int gm20b_gr_set_mmu_debug_mode(struct gk20a *g,
|
||||||
|
struct channel_gk20a *ch, bool enable)
|
||||||
|
{
|
||||||
|
struct nvgpu_dbg_reg_op ctx_ops = {
|
||||||
|
.op = REGOP(WRITE_32),
|
||||||
|
.type = REGOP(TYPE_GR_CTX),
|
||||||
|
.offset = gr_gpcs_pri_mmu_debug_ctrl_r(),
|
||||||
|
.value_lo = enable ?
|
||||||
|
gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_f() :
|
||||||
|
gr_gpcs_pri_mmu_debug_ctrl_debug_disabled_f(),
|
||||||
|
};
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = gr_gk20a_exec_ctx_ops(ch, &ctx_ops, 1, 1, 0, NULL);
|
||||||
|
if (err != 0) {
|
||||||
|
nvgpu_err(g, "Failed to access register");
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
void gm20b_gr_set_debug_mode(struct gk20a *g, bool enable)
|
void gm20b_gr_set_debug_mode(struct gk20a *g, bool enable)
|
||||||
{
|
{
|
||||||
u32 reg_val, gpc_debug_ctrl;
|
u32 reg_val, gpc_debug_ctrl;
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GM20B GPC MMU
|
* GM20B GPC MMU
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -125,5 +125,7 @@ int gr_gm20b_get_preemption_mode_flags(struct gk20a *g,
|
|||||||
void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
|
void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
|
||||||
u32 global_esr);
|
u32 global_esr);
|
||||||
u32 gr_gm20b_get_pmm_per_chiplet_offset(void);
|
u32 gr_gm20b_get_pmm_per_chiplet_offset(void);
|
||||||
|
int gm20b_gr_set_mmu_debug_mode(struct gk20a *g,
|
||||||
|
struct channel_gk20a *ch, bool enable);
|
||||||
void gm20b_gr_set_debug_mode(struct gk20a *g, bool enable);
|
void gm20b_gr_set_debug_mode(struct gk20a *g, bool enable);
|
||||||
#endif /* NVGPU_GM20B_GR_GM20B_H */
|
#endif /* NVGPU_GM20B_GR_GM20B_H */
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GM20B Graphics
|
* GM20B Graphics
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -277,6 +277,7 @@ static const struct gpu_ops gm20b_ops = {
|
|||||||
.get_lrf_tex_ltc_dram_override = NULL,
|
.get_lrf_tex_ltc_dram_override = NULL,
|
||||||
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
|
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
|
||||||
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
|
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
|
||||||
|
.set_mmu_debug_mode = gm20b_gr_set_mmu_debug_mode,
|
||||||
.record_sm_error_state = gm20b_gr_record_sm_error_state,
|
.record_sm_error_state = gm20b_gr_record_sm_error_state,
|
||||||
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
|
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
|
||||||
.suspend_contexts = gr_gk20a_suspend_contexts,
|
.suspend_contexts = gr_gk20a_suspend_contexts,
|
||||||
@@ -753,6 +754,7 @@ int gm20b_init_hal(struct gk20a *g)
|
|||||||
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
|
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
|
||||||
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
|
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
|
||||||
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, false);
|
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, false);
|
||||||
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE, false);
|
||||||
|
|
||||||
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */
|
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */
|
||||||
if (gops->fuse.check_priv_security(g)) {
|
if (gops->fuse.check_priv_security(g)) {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GP10B Tegra HAL interface
|
* GP10B Tegra HAL interface
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -295,6 +295,7 @@ static const struct gpu_ops gp10b_ops = {
|
|||||||
.get_lrf_tex_ltc_dram_override = get_ecc_override_val,
|
.get_lrf_tex_ltc_dram_override = get_ecc_override_val,
|
||||||
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
|
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
|
||||||
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
|
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
|
||||||
|
.set_mmu_debug_mode = NULL,
|
||||||
.record_sm_error_state = gm20b_gr_record_sm_error_state,
|
.record_sm_error_state = gm20b_gr_record_sm_error_state,
|
||||||
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
|
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
|
||||||
.suspend_contexts = gr_gp10b_suspend_contexts,
|
.suspend_contexts = gr_gp10b_suspend_contexts,
|
||||||
@@ -783,6 +784,7 @@ int gp10b_init_hal(struct gk20a *g)
|
|||||||
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
|
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
|
||||||
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, false);
|
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, false);
|
||||||
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, false);
|
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, false);
|
||||||
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE, false);
|
||||||
|
|
||||||
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */
|
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */
|
||||||
if (gops->fuse.check_priv_security(g)) {
|
if (gops->fuse.check_priv_security(g)) {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GV100 Tegra HAL interface
|
* GV100 Tegra HAL interface
|
||||||
*
|
*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -404,6 +404,7 @@ static const struct gpu_ops gv100_ops = {
|
|||||||
.get_num_hwpm_perfmon = gr_gv100_get_num_hwpm_perfmon,
|
.get_num_hwpm_perfmon = gr_gv100_get_num_hwpm_perfmon,
|
||||||
.set_pmm_register = gr_gv100_set_pmm_register,
|
.set_pmm_register = gr_gv100_set_pmm_register,
|
||||||
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
|
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
|
||||||
|
.set_mmu_debug_mode = NULL,
|
||||||
.init_hwpm_pmm_register = gr_gv100_init_hwpm_pmm_register,
|
.init_hwpm_pmm_register = gr_gv100_init_hwpm_pmm_register,
|
||||||
.record_sm_error_state = gv11b_gr_record_sm_error_state,
|
.record_sm_error_state = gv11b_gr_record_sm_error_state,
|
||||||
.clear_sm_error_state = gv11b_gr_clear_sm_error_state,
|
.clear_sm_error_state = gv11b_gr_clear_sm_error_state,
|
||||||
@@ -1040,6 +1041,7 @@ int gv100_init_hal(struct gk20a *g)
|
|||||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false);
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false);
|
||||||
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
|
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
|
||||||
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, false);
|
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, false);
|
||||||
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE, false);
|
||||||
|
|
||||||
/* for now */
|
/* for now */
|
||||||
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true);
|
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true);
|
||||||
|
|||||||
@@ -356,6 +356,7 @@ static const struct gpu_ops gv11b_ops = {
|
|||||||
.get_num_hwpm_perfmon = gr_gv100_get_num_hwpm_perfmon,
|
.get_num_hwpm_perfmon = gr_gv100_get_num_hwpm_perfmon,
|
||||||
.set_pmm_register = gr_gv100_set_pmm_register,
|
.set_pmm_register = gr_gv100_set_pmm_register,
|
||||||
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
|
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
|
||||||
|
.set_mmu_debug_mode = gm20b_gr_set_mmu_debug_mode,
|
||||||
.init_hwpm_pmm_register = gr_gv100_init_hwpm_pmm_register,
|
.init_hwpm_pmm_register = gr_gv100_init_hwpm_pmm_register,
|
||||||
.record_sm_error_state = gv11b_gr_record_sm_error_state,
|
.record_sm_error_state = gv11b_gr_record_sm_error_state,
|
||||||
.clear_sm_error_state = gv11b_gr_clear_sm_error_state,
|
.clear_sm_error_state = gv11b_gr_clear_sm_error_state,
|
||||||
@@ -955,6 +956,7 @@ int gv11b_init_hal(struct gk20a *g)
|
|||||||
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
|
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
|
||||||
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
|
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
|
||||||
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, true);
|
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, true);
|
||||||
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE, false);
|
||||||
|
|
||||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false);
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false);
|
||||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC, true);
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC, true);
|
||||||
|
|||||||
@@ -181,10 +181,13 @@ struct gk20a;
|
|||||||
/* PLATFORM_ATOMIC support */
|
/* PLATFORM_ATOMIC support */
|
||||||
#define NVGPU_SUPPORT_PLATFORM_ATOMIC 71
|
#define NVGPU_SUPPORT_PLATFORM_ATOMIC 71
|
||||||
|
|
||||||
|
/* NVGPU_GPU_IOCTL_SET_MMU_DEBUG_MODE is available */
|
||||||
|
#define NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE 72
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be greater than the largest bit offset in the above list.
|
* Must be greater than the largest bit offset in the above list.
|
||||||
*/
|
*/
|
||||||
#define NVGPU_MAX_ENABLED_BITS 72
|
#define NVGPU_MAX_ENABLED_BITS 73U
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nvgpu_is_enabled - Check if the passed flag is enabled.
|
* nvgpu_is_enabled - Check if the passed flag is enabled.
|
||||||
|
|||||||
@@ -529,6 +529,8 @@ struct gpu_ops {
|
|||||||
u32 num_ppcs, u32 reg_list_ppc_count,
|
u32 num_ppcs, u32 reg_list_ppc_count,
|
||||||
u32 *__offset_in_segment);
|
u32 *__offset_in_segment);
|
||||||
void (*set_debug_mode)(struct gk20a *g, bool enable);
|
void (*set_debug_mode)(struct gk20a *g, bool enable);
|
||||||
|
int (*set_mmu_debug_mode)(struct gk20a *g,
|
||||||
|
struct channel_gk20a *ch, bool enable);
|
||||||
} gr;
|
} gr;
|
||||||
struct {
|
struct {
|
||||||
void (*init_hw)(struct gk20a *g);
|
void (*init_hw)(struct gk20a *g);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2018, NVIDIA Corporation. All rights reserved.
|
* Copyright (c) 2011-2020, NVIDIA Corporation. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -224,6 +224,8 @@ static struct nvgpu_flags_mapping flags_mapping[] = {
|
|||||||
NVGPU_SUPPORT_SCG},
|
NVGPU_SUPPORT_SCG},
|
||||||
{NVGPU_GPU_FLAGS_SUPPORT_VPR,
|
{NVGPU_GPU_FLAGS_SUPPORT_VPR,
|
||||||
NVGPU_SUPPORT_VPR},
|
NVGPU_SUPPORT_VPR},
|
||||||
|
{NVGPU_GPU_FLAGS_SUPPORT_SET_CTX_MMU_DEBUG_MODE,
|
||||||
|
NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE},
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags(struct gk20a *g)
|
static u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags(struct gk20a *g)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Tegra GK20A GPU Debugger/Profiler Driver
|
* Tegra GK20A GPU Debugger/Profiler Driver
|
||||||
*
|
*
|
||||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -117,6 +117,10 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
|
|||||||
static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
|
static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
|
||||||
struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args);
|
struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args);
|
||||||
|
|
||||||
|
static int nvgpu_dbg_gpu_ioctl_set_mmu_debug_mode(
|
||||||
|
struct dbg_session_gk20a *dbg_s,
|
||||||
|
struct nvgpu_dbg_gpu_set_ctx_mmu_debug_mode_args *args);
|
||||||
|
|
||||||
static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
|
static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
|
||||||
struct dbg_session_gk20a *dbg_s,
|
struct dbg_session_gk20a *dbg_s,
|
||||||
struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args);
|
struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args);
|
||||||
@@ -1072,6 +1076,51 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvgpu_dbg_gpu_ioctl_set_mmu_debug_mode(
|
||||||
|
struct dbg_session_gk20a *dbg_s,
|
||||||
|
struct nvgpu_dbg_gpu_set_ctx_mmu_debug_mode_args *args)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
struct gk20a *g = dbg_s->g;
|
||||||
|
struct channel_gk20a *ch;
|
||||||
|
bool enable = (args->mode == NVGPU_DBG_GPU_CTX_MMU_DEBUG_MODE_ENABLED);
|
||||||
|
|
||||||
|
nvgpu_log_fn(g, "mode=%u", args->mode);
|
||||||
|
|
||||||
|
if (args->reserved != 0U) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g->ops.gr.set_mmu_debug_mode == NULL) {
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gk20a_busy(g);
|
||||||
|
if (err) {
|
||||||
|
nvgpu_err(g, "failed to poweron");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Take the global lock, since we'll be doing global regops */
|
||||||
|
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
|
||||||
|
|
||||||
|
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
|
||||||
|
if (!ch) {
|
||||||
|
nvgpu_err(g, "no bound channel for mmu debug mode");
|
||||||
|
goto clean_up;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = g->ops.gr.set_mmu_debug_mode(g, ch, enable);
|
||||||
|
if (err) {
|
||||||
|
nvgpu_err(g, "set mmu debug mode failed, err=%d", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
clean_up:
|
||||||
|
nvgpu_mutex_release(&g->dbg_sessions_lock);
|
||||||
|
gk20a_idle(g);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
|
static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
|
||||||
struct dbg_session_gk20a *dbg_s,
|
struct dbg_session_gk20a *dbg_s,
|
||||||
struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
|
struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
|
||||||
@@ -2030,6 +2079,11 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
|
|||||||
(struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args *)buf);
|
(struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args *)buf);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case NVGPU_DBG_GPU_IOCTL_SET_CTX_MMU_DEBUG_MODE:
|
||||||
|
err = nvgpu_dbg_gpu_ioctl_set_mmu_debug_mode(dbg_s,
|
||||||
|
(struct nvgpu_dbg_gpu_set_ctx_mmu_debug_mode_args *)buf);
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"unrecognized dbg gpu ioctl cmd: 0x%x",
|
"unrecognized dbg gpu ioctl cmd: 0x%x",
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -234,6 +234,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
|||||||
.get_offset_in_gpccs_segment =
|
.get_offset_in_gpccs_segment =
|
||||||
gr_gk20a_get_offset_in_gpccs_segment,
|
gr_gk20a_get_offset_in_gpccs_segment,
|
||||||
.set_debug_mode = gm20b_gr_set_debug_mode,
|
.set_debug_mode = gm20b_gr_set_debug_mode,
|
||||||
|
.set_mmu_debug_mode = NULL,
|
||||||
},
|
},
|
||||||
.fb = {
|
.fb = {
|
||||||
.init_hw = NULL,
|
.init_hw = NULL,
|
||||||
|
|||||||
@@ -44,6 +44,7 @@ int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g)
|
|||||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true);
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true);
|
||||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true);
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true);
|
||||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC, true);
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC, true);
|
||||||
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -189,6 +189,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
|||||||
.get_hw_accessor_stream_out_mode =
|
.get_hw_accessor_stream_out_mode =
|
||||||
gr_gv100_get_hw_accessor_stream_out_mode,
|
gr_gv100_get_hw_accessor_stream_out_mode,
|
||||||
.update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
|
.update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
|
||||||
|
.set_mmu_debug_mode = NULL,
|
||||||
.record_sm_error_state = gv11b_gr_record_sm_error_state,
|
.record_sm_error_state = gv11b_gr_record_sm_error_state,
|
||||||
.clear_sm_error_state = vgpu_gr_clear_sm_error_state,
|
.clear_sm_error_state = vgpu_gr_clear_sm_error_state,
|
||||||
.suspend_contexts = vgpu_gr_suspend_contexts,
|
.suspend_contexts = vgpu_gr_suspend_contexts,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -282,6 +282,7 @@ int vgpu_init_gpu_characteristics(struct gk20a *g)
|
|||||||
|
|
||||||
/* features vgpu does not support */
|
/* features vgpu does not support */
|
||||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
|
||||||
|
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -166,6 +166,8 @@ struct nvgpu_gpu_zbc_query_table_args {
|
|||||||
#define NVGPU_GPU_FLAGS_CAN_RAILGATE (1ULL << 29)
|
#define NVGPU_GPU_FLAGS_CAN_RAILGATE (1ULL << 29)
|
||||||
/* Usermode submit is available */
|
/* Usermode submit is available */
|
||||||
#define NVGPU_GPU_FLAGS_SUPPORT_USERMODE_SUBMIT (1ULL << 30)
|
#define NVGPU_GPU_FLAGS_SUPPORT_USERMODE_SUBMIT (1ULL << 30)
|
||||||
|
/* Set MMU debug mode is available */
|
||||||
|
#define NVGPU_GPU_FLAGS_SUPPORT_SET_CTX_MMU_DEBUG_MODE (1ULL << 32)
|
||||||
/* SM LRF ECC is enabled */
|
/* SM LRF ECC is enabled */
|
||||||
#define NVGPU_GPU_FLAGS_ECC_ENABLED_SM_LRF (1ULL << 60)
|
#define NVGPU_GPU_FLAGS_ECC_ENABLED_SM_LRF (1ULL << 60)
|
||||||
/* SM SHM ECC is enabled */
|
/* SM SHM ECC is enabled */
|
||||||
@@ -1414,8 +1416,20 @@ struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args {
|
|||||||
_IOW(NVGPU_DBG_GPU_IOCTL_MAGIC, 23, \
|
_IOW(NVGPU_DBG_GPU_IOCTL_MAGIC, 23, \
|
||||||
struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args)
|
struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args)
|
||||||
|
|
||||||
|
/* MMU Debug Mode */
|
||||||
|
#define NVGPU_DBG_GPU_CTX_MMU_DEBUG_MODE_DISABLED 0
|
||||||
|
#define NVGPU_DBG_GPU_CTX_MMU_DEBUG_MODE_ENABLED 1
|
||||||
|
|
||||||
|
struct nvgpu_dbg_gpu_set_ctx_mmu_debug_mode_args {
|
||||||
|
__u32 mode;
|
||||||
|
__u32 reserved;
|
||||||
|
};
|
||||||
|
#define NVGPU_DBG_GPU_IOCTL_SET_CTX_MMU_DEBUG_MODE \
|
||||||
|
_IOW(NVGPU_DBG_GPU_IOCTL_MAGIC, 26, \
|
||||||
|
struct nvgpu_dbg_gpu_set_ctx_mmu_debug_mode_args)
|
||||||
|
|
||||||
#define NVGPU_DBG_GPU_IOCTL_LAST \
|
#define NVGPU_DBG_GPU_IOCTL_LAST \
|
||||||
_IOC_NR(NVGPU_DBG_GPU_IOCTL_SET_SM_EXCEPTION_TYPE_MASK)
|
_IOC_NR(NVGPU_DBG_GPU_IOCTL_SET_CTX_MMU_DEBUG_MODE)
|
||||||
|
|
||||||
#define NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE \
|
#define NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE \
|
||||||
sizeof(struct nvgpu_dbg_gpu_access_fb_memory_args)
|
sizeof(struct nvgpu_dbg_gpu_access_fb_memory_args)
|
||||||
|
|||||||
Reference in New Issue
Block a user