gpu: nvgpu: split gr ctxsw fusa/non-fusa hal

Moved debugger/cilp functions from gr ctxsw prog hal files for various
platforms to corresponding fusa files as currently they are enabled in
the safety build. Updated the arch yaml to reflect the non-fusa and
fusa units for gr ctxsw_prog.

JIRA NVGPU-3690

Change-Id: I188d3de223aa65816b5f511b776eb8278e221219
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2156877
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-07-19 11:08:58 +05:30
committed by mobile promotions
parent 249ffa0fb0
commit e3686b5c07
8 changed files with 237 additions and 235 deletions

View File

@@ -389,17 +389,19 @@ gr:
hal/gr/ecc/ecc_tu104.c,
hal/gr/ecc/ecc_gp10b.h,
hal/gr/ecc/ecc_tu104.h ]
ctxsw_prog:
ctxsw_prog_fusa:
safe: yes
sources: [ hal/gr/ctxsw_prog/ctxsw_prog_gm20b.c,
hal/gr/ctxsw_prog/ctxsw_prog_gm20b_fusa.c,
sources: [ hal/gr/ctxsw_prog/ctxsw_prog_gm20b_fusa.c,
hal/gr/ctxsw_prog/ctxsw_prog_gm20b.h,
hal/gr/ctxsw_prog/ctxsw_prog_gp10b.c,
hal/gr/ctxsw_prog/ctxsw_prog_gp10b_fusa.c,
hal/gr/ctxsw_prog/ctxsw_prog_gp10b.h,
hal/gr/ctxsw_prog/ctxsw_prog_gv11b.c,
hal/gr/ctxsw_prog/ctxsw_prog_gv11b_fusa.c,
hal/gr/ctxsw_prog/ctxsw_prog_gv11b.h ]
ctxsw_prog:
safe: no
sources: [ hal/gr/ctxsw_prog/ctxsw_prog_gm20b.c,
hal/gr/ctxsw_prog/ctxsw_prog_gp10b.c,
hal/gr/ctxsw_prog/ctxsw_prog_gv11b.c ]
config:
safe: yes
sources: [ hal/gr/config/gr_config_gm20b.c,

View File

@@ -145,9 +145,6 @@ srcs += common/utils/enabled.c \
common/fifo/pbdma_status.c \
common/fifo/userd.c \
common/mc/mc.c \
hal/gr/ctxsw_prog/ctxsw_prog_gm20b.c \
hal/gr/ctxsw_prog/ctxsw_prog_gp10b.c \
hal/gr/ctxsw_prog/ctxsw_prog_gv11b.c \
hal/init/hal_gv11b.c \
hal/init/hal_gv11b_litter.c \
hal/init/hal_init.c \
@@ -314,7 +311,10 @@ srcs += hal/init/hal_gp10b.c \
hal/pmu/pmu_gp106.c \
hal/pmu/pmu_gv11b.c \
hal/top/top_gm20b.c \
hal/top/top_gp106.c
hal/top/top_gp106.c \
hal/gr/ctxsw_prog/ctxsw_prog_gm20b.c \
hal/gr/ctxsw_prog/ctxsw_prog_gp10b.c \
hal/gr/ctxsw_prog/ctxsw_prog_gv11b.c
endif
ifeq ($(CONFIG_NVGPU_CLK_ARB),1)

View File

@@ -65,146 +65,6 @@ bool gm20b_ctxsw_prog_is_zcull_mode_separate_buffer(u32 mode)
}
#endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef CONFIG_NVGPU_DEBUGGER
u32 gm20b_ctxsw_prog_hw_get_gpccs_header_size(void)
{
return ctxsw_prog_gpccs_header_stride_v();
}
u32 gm20b_ctxsw_prog_hw_get_extended_buffer_segments_size_in_bytes(void)
{
return ctxsw_prog_extended_buffer_segments_size_in_bytes_v();
}
u32 gm20b_ctxsw_prog_hw_extended_marker_size_in_bytes(void)
{
return ctxsw_prog_extended_marker_size_in_bytes_v();
}
u32 gm20b_ctxsw_prog_hw_get_perf_counter_control_register_stride(void)
{
return ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v();
}
u32 gm20b_ctxsw_prog_get_main_image_ctx_id(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{
return nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_context_id_o());
}
void gm20b_ctxsw_prog_set_pm_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem,
u64 addr)
{
addr = addr >> 8;
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_o(),
u64_lo32(addr));
}
void gm20b_ctxsw_prog_set_pm_mode(struct gk20a *g,
struct nvgpu_mem *ctx_mem, u32 mode)
{
u32 data;
data = nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_pm_o());
data = data & ~ctxsw_prog_main_image_pm_mode_m();
data |= mode;
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data);
}
void gm20b_ctxsw_prog_set_pm_smpc_mode(struct gk20a *g,
struct nvgpu_mem *ctx_mem, bool enable)
{
u32 data;
data = nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_pm_o());
data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m();
data |= enable ?
ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() :
ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f();
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data);
}
u32 gm20b_ctxsw_prog_hw_get_pm_mode_no_ctxsw(void)
{
return ctxsw_prog_main_image_pm_mode_no_ctxsw_f();
}
u32 gm20b_ctxsw_prog_hw_get_pm_mode_ctxsw(void)
{
return ctxsw_prog_main_image_pm_mode_ctxsw_f();
}
void gm20b_ctxsw_prog_set_cde_enabled(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{
u32 data = nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_ctl_o());
data |= ctxsw_prog_main_image_ctl_cde_enabled_f();
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_ctl_o(), data);
}
void gm20b_ctxsw_prog_set_pc_sampling(struct gk20a *g,
struct nvgpu_mem *ctx_mem, bool enable)
{
u32 data = nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_pm_o());
data &= ~ctxsw_prog_main_image_pm_pc_sampling_m();
data |= ctxsw_prog_main_image_pm_pc_sampling_f(
nvgpu_safe_cast_bool_to_u32(enable));
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data);
}
bool gm20b_ctxsw_prog_check_main_image_header_magic(u32 *context)
{
u32 magic = *(context + (ctxsw_prog_main_image_magic_value_o() >> 2));
return magic == ctxsw_prog_main_image_magic_value_v_value_v();
}
bool gm20b_ctxsw_prog_check_local_header_magic(u32 *context)
{
u32 magic = *(context + (ctxsw_prog_local_magic_value_o() >> 2));
return magic == ctxsw_prog_local_magic_value_v_value_v();
}
u32 gm20b_ctxsw_prog_get_num_gpcs(u32 *context)
{
return *(context + (ctxsw_prog_main_image_num_gpcs_o() >> 2));
}
u32 gm20b_ctxsw_prog_get_num_tpcs(u32 *context)
{
return *(context + (ctxsw_prog_local_image_num_tpcs_o() >> 2));
}
void gm20b_ctxsw_prog_get_extended_buffer_size_offset(u32 *context,
u32 *size, u32 *offset)
{
u32 data = *(context + (ctxsw_prog_main_extended_buffer_ctl_o() >> 2));
*size = ctxsw_prog_main_extended_buffer_ctl_size_v(data);
*offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data);
}
void gm20b_ctxsw_prog_get_ppc_info(u32 *context, u32 *num_ppcs, u32 *ppc_mask)
{
u32 data = *(context + (ctxsw_prog_local_image_ppc_info_o() >> 2));
*num_ppcs = ctxsw_prog_local_image_ppc_info_num_ppcs_v(data);
*ppc_mask = ctxsw_prog_local_image_ppc_info_ppc_mask_v(data);
}
u32 gm20b_ctxsw_prog_get_local_priv_register_ctl_offset(u32 *context)
{
u32 data = *(context + (ctxsw_prog_local_priv_register_ctl_o() >> 2));
return ctxsw_prog_local_priv_register_ctl_offset_v(data);
}
#endif /* CONFIG_NVGPU_DEBUGGER */
#ifdef CONFIG_NVGPU_FECS_TRACE
u32 gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp(void)
{

View File

@@ -29,6 +29,146 @@
#include <nvgpu/hw/gm20b/hw_ctxsw_prog_gm20b.h>
#ifdef CONFIG_NVGPU_DEBUGGER
u32 gm20b_ctxsw_prog_hw_get_gpccs_header_size(void)
{
return ctxsw_prog_gpccs_header_stride_v();
}
u32 gm20b_ctxsw_prog_hw_get_extended_buffer_segments_size_in_bytes(void)
{
return ctxsw_prog_extended_buffer_segments_size_in_bytes_v();
}
u32 gm20b_ctxsw_prog_hw_extended_marker_size_in_bytes(void)
{
return ctxsw_prog_extended_marker_size_in_bytes_v();
}
u32 gm20b_ctxsw_prog_hw_get_perf_counter_control_register_stride(void)
{
return ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v();
}
u32 gm20b_ctxsw_prog_get_main_image_ctx_id(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{
return nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_context_id_o());
}
void gm20b_ctxsw_prog_set_pm_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem,
u64 addr)
{
addr = addr >> 8;
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_o(),
u64_lo32(addr));
}
void gm20b_ctxsw_prog_set_pm_mode(struct gk20a *g,
struct nvgpu_mem *ctx_mem, u32 mode)
{
u32 data;
data = nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_pm_o());
data = data & ~ctxsw_prog_main_image_pm_mode_m();
data |= mode;
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data);
}
void gm20b_ctxsw_prog_set_pm_smpc_mode(struct gk20a *g,
struct nvgpu_mem *ctx_mem, bool enable)
{
u32 data;
data = nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_pm_o());
data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m();
data |= enable ?
ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() :
ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f();
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data);
}
u32 gm20b_ctxsw_prog_hw_get_pm_mode_no_ctxsw(void)
{
return ctxsw_prog_main_image_pm_mode_no_ctxsw_f();
}
u32 gm20b_ctxsw_prog_hw_get_pm_mode_ctxsw(void)
{
return ctxsw_prog_main_image_pm_mode_ctxsw_f();
}
void gm20b_ctxsw_prog_set_cde_enabled(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{
u32 data = nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_ctl_o());
data |= ctxsw_prog_main_image_ctl_cde_enabled_f();
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_ctl_o(), data);
}
void gm20b_ctxsw_prog_set_pc_sampling(struct gk20a *g,
struct nvgpu_mem *ctx_mem, bool enable)
{
u32 data = nvgpu_mem_rd(g, ctx_mem, ctxsw_prog_main_image_pm_o());
data &= ~ctxsw_prog_main_image_pm_pc_sampling_m();
data |= ctxsw_prog_main_image_pm_pc_sampling_f(
nvgpu_safe_cast_bool_to_u32(enable));
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data);
}
bool gm20b_ctxsw_prog_check_main_image_header_magic(u32 *context)
{
u32 magic = *(context + (ctxsw_prog_main_image_magic_value_o() >> 2));
return magic == ctxsw_prog_main_image_magic_value_v_value_v();
}
bool gm20b_ctxsw_prog_check_local_header_magic(u32 *context)
{
u32 magic = *(context + (ctxsw_prog_local_magic_value_o() >> 2));
return magic == ctxsw_prog_local_magic_value_v_value_v();
}
u32 gm20b_ctxsw_prog_get_num_gpcs(u32 *context)
{
return *(context + (ctxsw_prog_main_image_num_gpcs_o() >> 2));
}
u32 gm20b_ctxsw_prog_get_num_tpcs(u32 *context)
{
return *(context + (ctxsw_prog_local_image_num_tpcs_o() >> 2));
}
void gm20b_ctxsw_prog_get_extended_buffer_size_offset(u32 *context,
u32 *size, u32 *offset)
{
u32 data = *(context + (ctxsw_prog_main_extended_buffer_ctl_o() >> 2));
*size = ctxsw_prog_main_extended_buffer_ctl_size_v(data);
*offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data);
}
void gm20b_ctxsw_prog_get_ppc_info(u32 *context, u32 *num_ppcs, u32 *ppc_mask)
{
u32 data = *(context + (ctxsw_prog_local_image_ppc_info_o() >> 2));
*num_ppcs = ctxsw_prog_local_image_ppc_info_num_ppcs_v(data);
*ppc_mask = ctxsw_prog_local_image_ppc_info_ppc_mask_v(data);
}
u32 gm20b_ctxsw_prog_get_local_priv_register_ctl_offset(u32 *context)
{
u32 data = *(context + (ctxsw_prog_local_priv_register_ctl_o() >> 2));
return ctxsw_prog_local_priv_register_ctl_offset_v(data);
}
#endif /* CONFIG_NVGPU_DEBUGGER */
u32 gm20b_ctxsw_prog_hw_get_fecs_header_size(void)
{
return ctxsw_prog_fecs_header_v();

View File

@@ -46,67 +46,3 @@ void gp10b_ctxsw_prog_set_full_preemption_ptr(struct gk20a *g,
ctxsw_prog_main_image_full_preemption_ptr_o(), u64_lo32(addr));
}
#endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef CONFIG_NVGPU_CILP
void gp10b_ctxsw_prog_set_compute_preemption_mode_cilp(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{
nvgpu_mem_wr(g, ctx_mem,
ctxsw_prog_main_image_compute_preemption_options_o(),
ctxsw_prog_main_image_compute_preemption_options_control_cilp_f());
}
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
void gp10b_ctxsw_prog_set_pmu_options_boost_clock_frequencies(struct gk20a *g,
struct nvgpu_mem *ctx_mem, u32 boosted_ctx)
{
u32 data = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f(boosted_ctx);
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pmu_options_o(), data);
}
void gp10b_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{
nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_magic_value_o()),
ctxsw_prog_main_image_magic_value_v_value_v());
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_control : %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_save_ops_o()));
nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_wfi_save_ops_o()));
nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_cta_save_ops_o()));
nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_gfxp_save_ops_o()));
nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_cilp_save_ops_o()));
nvgpu_err(g,
"image gfx preemption option (GFXP is 1) %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_graphics_preemption_options_o()));
nvgpu_err(g,
"image compute preemption option (CTA is 1) %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_compute_preemption_options_o()));
}
#endif /* CONFIG_NVGPU_DEBUGGER */

View File

@@ -29,6 +29,70 @@
#include <nvgpu/hw/gp10b/hw_ctxsw_prog_gp10b.h>
#ifdef CONFIG_NVGPU_CILP
void gp10b_ctxsw_prog_set_compute_preemption_mode_cilp(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{
nvgpu_mem_wr(g, ctx_mem,
ctxsw_prog_main_image_compute_preemption_options_o(),
ctxsw_prog_main_image_compute_preemption_options_control_cilp_f());
}
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
void gp10b_ctxsw_prog_set_pmu_options_boost_clock_frequencies(struct gk20a *g,
struct nvgpu_mem *ctx_mem, u32 boosted_ctx)
{
u32 data = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f(boosted_ctx);
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pmu_options_o(), data);
}
void gp10b_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{
nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_magic_value_o()),
ctxsw_prog_main_image_magic_value_v_value_v());
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_control : %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_save_ops_o()));
nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_wfi_save_ops_o()));
nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_cta_save_ops_o()));
nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_gfxp_save_ops_o()));
nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_num_cilp_save_ops_o()));
nvgpu_err(g,
"image gfx preemption option (GFXP is 1) %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_graphics_preemption_options_o()));
nvgpu_err(g,
"image compute preemption option (CTA is 1) %x",
nvgpu_mem_rd(g, ctx_mem,
ctxsw_prog_main_image_compute_preemption_options_o()));
}
#endif /* CONFIG_NVGPU_DEBUGGER */
void gp10b_ctxsw_prog_set_compute_preemption_mode_cta(struct gk20a *g,
struct nvgpu_mem *ctx_mem)
{

View File

@@ -64,25 +64,3 @@ void gv11b_ctxsw_prog_set_full_preemption_ptr_veid0(struct gk20a *g,
u64_hi32(addr));
}
#endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef CONFIG_NVGPU_DEBUGGER
void gv11b_ctxsw_prog_set_pm_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem,
u64 addr)
{
addr = addr >> 8;
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_o(),
u64_lo32(addr));
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_hi_o(),
u64_hi32(addr));
}
u32 gv11b_ctxsw_prog_hw_get_pm_mode_stream_out_ctxsw(void)
{
return ctxsw_prog_main_image_pm_mode_stream_out_ctxsw_f();
}
u32 gv11b_ctxsw_prog_hw_get_perf_counter_register_stride(void)
{
return ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v();
}
#endif /* CONFIG_NVGPU_DEBUGGER */

View File

@@ -29,6 +29,28 @@
#include <nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h>
#ifdef CONFIG_NVGPU_DEBUGGER
void gv11b_ctxsw_prog_set_pm_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem,
u64 addr)
{
addr = addr >> 8;
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_o(),
u64_lo32(addr));
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_hi_o(),
u64_hi32(addr));
}
u32 gv11b_ctxsw_prog_hw_get_pm_mode_stream_out_ctxsw(void)
{
return ctxsw_prog_main_image_pm_mode_stream_out_ctxsw_f();
}
u32 gv11b_ctxsw_prog_hw_get_perf_counter_register_stride(void)
{
return ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v();
}
#endif /* CONFIG_NVGPU_DEBUGGER */
void gv11b_ctxsw_prog_set_context_buffer_ptr(struct gk20a *g,
struct nvgpu_mem *ctx_mem, u64 addr)
{