mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: reduce traffic on dbg_fn or dbg_info
Reduce debug logs printed when gpu_dbg_info or gpu_dbg_fn is set. - Add gpu_dbg_verbose flag for more verbose debug prints. Update prints in to ga10b_gr_init_wait_idle(), gm20b_gr_init_wait_fe_idle(), gv11b_gr_init_write_bundle_veid_state() and gv11b_gr_init_load_sw_veid_bundle(). - Add gpu_dbg_hwpm flag for hwpm specific debug prints. Update print in nvgpu_gr_hwpm_map_create(). - Add gpu_dbg_mm for MM specific debug prints. Update prints in gm20b_fb_tlb_invalidate(), gk20a_mm_fb_flush(), gk20a_mm_l2_invalidate_locked(), gk20a_mm_l2_flush() and gv11b_mm_l2_flush(). - Remove gpu_dbg_fn mask print in gr_ga10b_create_priv_addr_table(), gr_gk20a_get_pm_ctx_buffer_offsets(), gr_gv11b_decode_priv_addr() and gr_gv11b_create_priv_addr_table(). Jira NVGPU-7183 Change-Id: I9842d567047cb95a42e23b5907ae324214eed606 Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2602797 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1c1fec6d9f
commit
b24f577a5c
@@ -560,10 +560,12 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g,
|
|||||||
hwpm_map->count = count;
|
hwpm_map->count = count;
|
||||||
hwpm_map->init = true;
|
hwpm_map->init = true;
|
||||||
|
|
||||||
nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset");
|
nvgpu_log(g, gpu_dbg_hwpm,
|
||||||
|
"Reg Addr => HWPM Ctxt switch buffer offset");
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset);
|
nvgpu_log(g, gpu_dbg_hwpm, "%08x => %08x",
|
||||||
|
map[i].addr, map[i].offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GM20B GPC MMU
|
* GM20B GPC MMU
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -80,7 +80,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
|||||||
u32 data;
|
u32 data;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log(g, gpu_dbg_mm, " ");
|
||||||
|
|
||||||
/* pagetables are considered sw states which are preserved after
|
/* pagetables are considered sw states which are preserved after
|
||||||
prepare_poweroff. When gk20a deinit releases those pagetables,
|
prepare_poweroff. When gk20a deinit releases those pagetables,
|
||||||
|
|||||||
@@ -538,7 +538,7 @@ int gr_ga10b_create_priv_addr_table(struct gk20a *g,
|
|||||||
t = 0U;
|
t = 0U;
|
||||||
*num_registers = 0U;
|
*num_registers = 0U;
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
|
nvgpu_log(g, gpu_dbg_gpu_dbg, "addr=0x%x", addr);
|
||||||
|
|
||||||
err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
|
err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
|
||||||
&gpc_num, &tpc_num, &ppc_num, &be_num,
|
&gpc_num, &tpc_num, &ppc_num, &be_num,
|
||||||
|
|||||||
@@ -528,7 +528,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
|
|||||||
nvgpu_gr_config_get_max_tpc_per_gpc_count(gr->config) *
|
nvgpu_gr_config_get_max_tpc_per_gpc_count(gr->config) *
|
||||||
sm_per_tpc;
|
sm_per_tpc;
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
|
nvgpu_log(g, gpu_dbg_gpu_dbg, "addr=0x%x", addr);
|
||||||
|
|
||||||
/* implementation is crossed-up if either of these happen */
|
/* implementation is crossed-up if either of these happen */
|
||||||
if (max_offsets > potential_offsets) {
|
if (max_offsets > potential_offsets) {
|
||||||
|
|||||||
@@ -1750,7 +1750,7 @@ int gr_gv11b_decode_priv_addr(struct gk20a *g, u32 addr,
|
|||||||
{
|
{
|
||||||
u32 gpc_addr, tpc_addr;
|
u32 gpc_addr, tpc_addr;
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
|
nvgpu_log(g, gpu_dbg_gpu_dbg, "addr=0x%x", addr);
|
||||||
|
|
||||||
/* setup defaults */
|
/* setup defaults */
|
||||||
*addr_type = CTXSW_ADDR_TYPE_SYS;
|
*addr_type = CTXSW_ADDR_TYPE_SYS;
|
||||||
@@ -1905,7 +1905,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
|
|||||||
t = 0;
|
t = 0;
|
||||||
*num_registers = 0;
|
*num_registers = 0;
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
|
nvgpu_log(g, gpu_dbg_gpu_dbg, "addr=0x%x", addr);
|
||||||
|
|
||||||
err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
|
err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
|
||||||
&gpc_num, &tpc_num, &ppc_num, &rop_num,
|
&gpc_num, &tpc_num, &ppc_num, &rop_num,
|
||||||
|
|||||||
@@ -332,7 +332,7 @@ int ga10b_gr_init_wait_idle(struct gk20a *g)
|
|||||||
bool gr_busy;
|
bool gr_busy;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
|
nvgpu_log(g, gpu_dbg_verbose | gpu_dbg_gr, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
NVGPU_TIMER_CPU_TIMER);
|
||||||
@@ -355,7 +355,7 @@ int ga10b_gr_init_wait_idle(struct gk20a *g)
|
|||||||
gr_status_state_busy_v()) != 0U;
|
gr_status_state_busy_v()) != 0U;
|
||||||
|
|
||||||
if (!gr_busy) {
|
if (!gr_busy) {
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done");
|
nvgpu_log(g, gpu_dbg_verbose | gpu_dbg_gr, "done");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -244,7 +244,7 @@ int gm20b_gr_init_wait_fe_idle(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log(g, gpu_dbg_verbose, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
NVGPU_TIMER_CPU_TIMER);
|
||||||
@@ -256,7 +256,7 @@ int gm20b_gr_init_wait_fe_idle(struct gk20a *g)
|
|||||||
val = nvgpu_readl(g, gr_status_r());
|
val = nvgpu_readl(g, gr_status_r());
|
||||||
|
|
||||||
if (gr_status_fe_method_lower_v(val) == 0U) {
|
if (gr_status_fe_method_lower_v(val) == 0U) {
|
||||||
nvgpu_log_fn(g, "done");
|
nvgpu_log(g, gpu_dbg_verbose, "done");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -805,7 +805,8 @@ static int gv11b_gr_init_write_bundle_veid_state(struct gk20a *g, u32 index,
|
|||||||
nvgpu_gr_get_cur_instance_id(g));
|
nvgpu_gr_get_cur_instance_id(g));
|
||||||
|
|
||||||
for (j = 0U; j < num_subctx; j++) {
|
for (j = 0U; j < num_subctx; j++) {
|
||||||
nvgpu_log_fn(g, "write bundle_address_r for subctx: %d", j);
|
nvgpu_log(g, gpu_dbg_verbose,
|
||||||
|
"write bundle_address_r for subctx: %d", j);
|
||||||
nvgpu_writel(g, gr_pipe_bundle_address_r(),
|
nvgpu_writel(g, gr_pipe_bundle_address_r(),
|
||||||
sw_veid_bundle_init->l[index].addr |
|
sw_veid_bundle_init->l[index].addr |
|
||||||
gr_pipe_bundle_address_veid_f(j));
|
gr_pipe_bundle_address_veid_f(j));
|
||||||
@@ -825,7 +826,7 @@ int gv11b_gr_init_load_sw_veid_bundle(struct gk20a *g,
|
|||||||
int context = 0;
|
int context = 0;
|
||||||
|
|
||||||
for (i = 0U; i < sw_veid_bundle_init->count; i++) {
|
for (i = 0U; i < sw_veid_bundle_init->count; i++) {
|
||||||
nvgpu_log_fn(g, "veid bundle count: %d", i);
|
nvgpu_log(g, gpu_dbg_verbose, "veid bundle count: %d", i);
|
||||||
if (!g->ops.gr.init.is_allowed_sw_bundle(g,
|
if (!g->ops.gr.init.is_allowed_sw_bundle(g,
|
||||||
sw_veid_bundle_init->l[i].addr,
|
sw_veid_bundle_init->l[i].addr,
|
||||||
sw_veid_bundle_init->l[i].value,
|
sw_veid_bundle_init->l[i].value,
|
||||||
@@ -838,13 +839,13 @@ int gv11b_gr_init_load_sw_veid_bundle(struct gk20a *g,
|
|||||||
nvgpu_writel(g, gr_pipe_bundle_data_r(),
|
nvgpu_writel(g, gr_pipe_bundle_data_r(),
|
||||||
sw_veid_bundle_init->l[i].value);
|
sw_veid_bundle_init->l[i].value);
|
||||||
last_bundle_data = sw_veid_bundle_init->l[i].value;
|
last_bundle_data = sw_veid_bundle_init->l[i].value;
|
||||||
nvgpu_log_fn(g, "last_bundle_data : 0x%08x",
|
nvgpu_log(g, gpu_dbg_verbose, "last_bundle_data : 0x%08x",
|
||||||
last_bundle_data);
|
last_bundle_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gr_pipe_bundle_address_value_v(
|
if (gr_pipe_bundle_address_value_v(
|
||||||
sw_veid_bundle_init->l[i].addr) == GR_GO_IDLE_BUNDLE) {
|
sw_veid_bundle_init->l[i].addr) == GR_GO_IDLE_BUNDLE) {
|
||||||
nvgpu_log_fn(g, "go idle bundle");
|
nvgpu_log(g, gpu_dbg_verbose, "go idle bundle");
|
||||||
nvgpu_writel(g, gr_pipe_bundle_address_r(),
|
nvgpu_writel(g, gr_pipe_bundle_address_r(),
|
||||||
sw_veid_bundle_init->l[i].addr);
|
sw_veid_bundle_init->l[i].addr);
|
||||||
err = g->ops.gr.init.wait_idle(g);
|
err = g->ops.gr.init.wait_idle(g);
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
|||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 retries;
|
u32 retries;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log(g, gpu_dbg_mm, " ");
|
||||||
|
|
||||||
gk20a_busy_noresume(g);
|
gk20a_busy_noresume(g);
|
||||||
if (nvgpu_is_powered_off(g)) {
|
if (nvgpu_is_powered_off(g)) {
|
||||||
@@ -76,7 +76,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
|||||||
flush_fb_flush_outstanding_true_v()) ||
|
flush_fb_flush_outstanding_true_v()) ||
|
||||||
(flush_fb_flush_pending_v(data) ==
|
(flush_fb_flush_pending_v(data) ==
|
||||||
flush_fb_flush_pending_busy_v())) {
|
flush_fb_flush_pending_busy_v())) {
|
||||||
nvgpu_log_info(g, "fb_flush 0x%x", data);
|
nvgpu_log(g, gpu_dbg_mm, "fb_flush 0x%x", data);
|
||||||
nvgpu_udelay(5);
|
nvgpu_udelay(5);
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
@@ -133,8 +133,8 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
|
|||||||
flush_l2_system_invalidate_outstanding_true_v()) ||
|
flush_l2_system_invalidate_outstanding_true_v()) ||
|
||||||
(flush_l2_system_invalidate_pending_v(data) ==
|
(flush_l2_system_invalidate_pending_v(data) ==
|
||||||
flush_l2_system_invalidate_pending_busy_v())) {
|
flush_l2_system_invalidate_pending_busy_v())) {
|
||||||
nvgpu_log_info(g, "l2_system_invalidate 0x%x",
|
nvgpu_log(g, gpu_dbg_mm,
|
||||||
data);
|
"l2_system_invalidate 0x%x", data);
|
||||||
nvgpu_udelay(5);
|
nvgpu_udelay(5);
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
@@ -170,7 +170,7 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
|
|||||||
u32 retries = 2000;
|
u32 retries = 2000;
|
||||||
int err = -ETIMEDOUT;
|
int err = -ETIMEDOUT;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log(g, gpu_dbg_mm, " ");
|
||||||
|
|
||||||
gk20a_busy_noresume(g);
|
gk20a_busy_noresume(g);
|
||||||
if (nvgpu_is_powered_off(g)) {
|
if (nvgpu_is_powered_off(g)) {
|
||||||
@@ -203,7 +203,8 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
|
|||||||
flush_l2_flush_dirty_outstanding_true_v()) ||
|
flush_l2_flush_dirty_outstanding_true_v()) ||
|
||||||
(flush_l2_flush_dirty_pending_v(data) ==
|
(flush_l2_flush_dirty_pending_v(data) ==
|
||||||
flush_l2_flush_dirty_pending_busy_v())) {
|
flush_l2_flush_dirty_pending_busy_v())) {
|
||||||
nvgpu_log_info(g, "l2_flush_dirty 0x%x", data);
|
nvgpu_log(g, gpu_dbg_mm, "l2_flush_dirty 0x%x",
|
||||||
|
data);
|
||||||
nvgpu_udelay(5);
|
nvgpu_udelay(5);
|
||||||
} else {
|
} else {
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -31,7 +31,7 @@ int gv11b_mm_l2_flush(struct gk20a *g, bool invalidate)
|
|||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn, "gv11b_mm_l2_flush");
|
nvgpu_log(g, gpu_dbg_mm, "gv11b_mm_l2_flush");
|
||||||
|
|
||||||
err = g->ops.mm.cache.fb_flush(g);
|
err = g->ops.mm.cache.fb_flush(g);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
|
|||||||
@@ -71,12 +71,15 @@ enum nvgpu_log_type {
|
|||||||
#define gpu_dbg_mem BIT(31) /* memory accesses; very verbose. */
|
#define gpu_dbg_mem BIT(31) /* memory accesses; very verbose. */
|
||||||
#define gpu_dbg_device BIT(32) /* Device initialization and
|
#define gpu_dbg_device BIT(32) /* Device initialization and
|
||||||
querying. */
|
querying. */
|
||||||
#define gpu_dbg_mig BIT(33) /* MIG info */
|
#define gpu_dbg_mig BIT(33) /* MIG info. */
|
||||||
#define gpu_dbg_rec BIT(34) /* Recovery sequence debugging. */
|
#define gpu_dbg_rec BIT(34) /* Recovery sequence debugging. */
|
||||||
#define gpu_dbg_zbc BIT(35) /* Gr ZBC */
|
#define gpu_dbg_zbc BIT(35) /* Gr ZBC. */
|
||||||
#define gpu_dbg_vab BIT(36) /* VAB */
|
#define gpu_dbg_vab BIT(36) /* VAB. */
|
||||||
#define gpu_dbg_runlists BIT(38) /* Runlist related debugging. */
|
#define gpu_dbg_runlists BIT(38) /* Runlist related debugging. */
|
||||||
#define gpu_dbg_cic BIT(39) /* Interrupt Handling debugging. */
|
#define gpu_dbg_cic BIT(39) /* Interrupt Handling debugging. */
|
||||||
#define gpu_dbg_falcon BIT(40) /* Falcon/NVRISCV debugging */
|
#define gpu_dbg_falcon BIT(40) /* Falcon/NVRISCV debugging */
|
||||||
|
#define gpu_dbg_mm BIT(41) /* Memory management debugging. */
|
||||||
|
#define gpu_dbg_hwpm BIT(42) /* GPU HWPM. */
|
||||||
|
#define gpu_dbg_verbose BIT(43) /* More verbose logs. */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
Reference in New Issue
Block a user