gpu: nvgpu: Update pbdma data and header reset functions

Two new fifo hals are added.
read_pbdma_data and reset_pbdma_header.

In turing the instruction that caused the interrupt
will be stored in NV_PPBDMA_PB_DATA0 register or
NV_PPBDMA_HDR_SHADOW register, which is decided based on
NV_PPBDMA_PB_COUNT value and PB_HEADER type

JIRA NVGPU-1240

Change-Id: I54a92e317a6054335439d2d61bced28aff3eecb7
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1990699
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-01-08 17:31:32 -08:00
committed by mobile promotions
parent 9953b17ae1
commit 1ff12f065e
11 changed files with 85 additions and 7 deletions

View File

@@ -2049,7 +2049,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
return handled;
}
static inline void gk20a_fifo_reset_pbdma_header(struct gk20a *g, u32 pbdma_id)
void gk20a_fifo_reset_pbdma_header(struct gk20a *g, u32 pbdma_id)
{
gk20a_writel(g, pbdma_pb_header_r(pbdma_id),
pbdma_pb_header_first_true_f() |
@@ -2099,6 +2099,11 @@ static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, u32 pbdma_id,
return false;
}
u32 gk20a_fifo_read_pbdma_data(struct gk20a *g, u32 pbdma_id)
{
return nvgpu_readl(g, pbdma_hdr_shadow_r(pbdma_id));
}
unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_0, u32 *handled, u32 *error_notifier)
{
@@ -2124,7 +2129,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
"M0: %08x %08x %08x %08x ",
pbdma_id, pbdma_intr_0,
gk20a_readl(g, pbdma_pb_header_r(pbdma_id)),
gk20a_readl(g, pbdma_hdr_shadow_r(pbdma_id)),
g->ops.fifo.read_pbdma_data(g, pbdma_id),
gk20a_readl(g, pbdma_gp_shadow_0_r(pbdma_id)),
gk20a_readl(g, pbdma_gp_shadow_1_r(pbdma_id)),
gk20a_readl(g, pbdma_method0_r(pbdma_id)),
@@ -2155,7 +2160,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
}
if ((pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) != 0U) {
gk20a_fifo_reset_pbdma_header(g, pbdma_id);
g->ops.fifo.reset_pbdma_header(g, pbdma_id);
gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
rc_type = RC_TYPE_PBDMA_FAULT;
}
@@ -2172,7 +2177,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
}
if ((pbdma_intr_0 & pbdma_intr_0_device_pending_f()) != 0U) {
gk20a_fifo_reset_pbdma_header(g, pbdma_id);
g->ops.fifo.reset_pbdma_header(g, pbdma_id);
for (i = 0U; i < 4U; i++) {
if (gk20a_fifo_is_sw_method_subch(g,

View File

@@ -457,4 +457,6 @@ int gk20a_fifo_init_userd(struct gk20a *g, struct channel_gk20a *c);
bool gk20a_fifo_find_pbdma_for_runlist(struct fifo_gk20a *f, u32 runlist_id,
u32 *pbdma_id);
u32 gk20a_fifo_read_pbdma_data(struct gk20a *g, u32 pbdma_id);
void gk20a_fifo_reset_pbdma_header(struct gk20a *g, u32 pbdma_id);
#endif /* FIFO_GK20A_H */

View File

@@ -557,6 +557,8 @@ static const struct gpu_ops gm20b_ops = {
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gm20b_fifo_init_ce_engine_info,
.read_pbdma_data = gk20a_fifo_read_pbdma_data,
.reset_pbdma_header = gk20a_fifo_reset_pbdma_header,
},
.netlist = {
.get_netlist_name = gm20b_netlist_get_name,

View File

@@ -609,6 +609,8 @@ static const struct gpu_ops gp10b_ops = {
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.read_pbdma_data = gk20a_fifo_read_pbdma_data,
.reset_pbdma_header = gk20a_fifo_reset_pbdma_header,
},
.netlist = {
.get_netlist_name = gp10b_netlist_get_name,

View File

@@ -783,6 +783,8 @@ static const struct gpu_ops gv100_ops = {
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.read_pbdma_data = gk20a_fifo_read_pbdma_data,
.reset_pbdma_header = gk20a_fifo_reset_pbdma_header,
},
.netlist = {
.get_netlist_name = gv100_netlist_get_name,

View File

@@ -738,6 +738,8 @@ static const struct gpu_ops gv11b_ops = {
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.read_pbdma_data = gk20a_fifo_read_pbdma_data,
.reset_pbdma_header = gk20a_fifo_reset_pbdma_header,
},
.netlist = {
.get_netlist_name = gv11b_netlist_get_name,

View File

@@ -882,6 +882,8 @@ struct gpu_ops {
bool (*find_pbdma_for_runlist)(struct fifo_gk20a *f,
u32 runlist_id, u32 *pbdma_id);
int (*init_ce_engine_info)(struct fifo_gk20a *f);
u32 (*read_pbdma_data)(struct gk20a *g, u32 pbdma_id);
void (*reset_pbdma_header)(struct gk20a *g, u32 pbdma_id);
struct {
int (*report_host_err)(struct gk20a *g,
u32 hw_id, u32 inst, u32 err_id,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -158,6 +158,10 @@ static inline u32 pbdma_pb_header_first_true_f(void)
{
return 0x400000U;
}
static inline u32 pbdma_pb_header_type_m(void)
{
return U32(0x7U) << 29U;
}
static inline u32 pbdma_pb_header_type_inc_f(void)
{
return 0x20000000U;
@@ -166,6 +170,10 @@ static inline u32 pbdma_pb_header_type_non_inc_f(void)
{
return 0x60000000U;
}
static inline u32 pbdma_pb_header_type_immd_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_hdr_shadow_r(u32 i)
{
return 0x00040118U + i*8192U;
@@ -234,6 +242,18 @@ static inline u32 pbdma_method3_r(u32 i)
{
return 0x000400d8U + i*8192U;
}
static inline u32 pbdma_pb_count_r(u32 i)
{
return 0x00040088U + i*8192U;
}
static inline u32 pbdma_pb_count_value_v(u32 r)
{
return (r >> 0U) & 0x1fffU;
}
static inline u32 pbdma_pb_count_value_zero_f(void)
{
return 0x0U;
}
static inline u32 pbdma_data0_r(u32 i)
{
return 0x000400c4U + i*8192U;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -296,3 +296,40 @@ void tu104_deinit_pdb_cache_war(struct gk20a *g)
nvgpu_dma_free(g, &g->pdb_cache_war_mem);
}
}
u32 tu104_fifo_read_pbdma_data(struct gk20a *g, u32 pbdma_id)
{
u32 pb_inst;
u32 pb_header, pb_header_type;
u32 pb_count;
/*
* In order to determine the location of the PB entry that cause the
* interrupt, NV_PPBDMA_PB_HEADER and NV_PPBDMA_PB_COUNT need to be
* checked. If the TYPE field of the NV_PPBDMA_PB_HEADER is IMMD or the
* VALUE field of the NV_PPBDMA_PB_COUNT is zero, then the raw PB
* instruction stored in NV_PPBDMA_PB_DATA0 is the one that triggered
* the interrupt. Otherwise, the raw PB instruction that triggered the
* interrupt is stored in NV_PPBDMA_HDR_SHADOW and NV_PPBDMA_PB_HEADER
* stores the decoded version.
*/
pb_header = nvgpu_readl(g, pbdma_pb_header_r(pbdma_id));
pb_count = nvgpu_readl(g, pbdma_pb_count_r(pbdma_id));
pb_header_type = pb_header & pbdma_pb_header_type_m();
if ((pbdma_pb_count_value_v(pb_count) == pbdma_pb_count_value_zero_f())
|| (pb_header_type == pbdma_pb_header_type_immd_f())) {
pb_inst = nvgpu_readl(g, pbdma_data0_r(pbdma_id));
} else {
pb_inst = nvgpu_readl(g, pbdma_hdr_shadow_r(pbdma_id));
}
return pb_inst;
}
void tu104_fifo_reset_pbdma_header(struct gk20a *g, u32 pbdma_id)
{
gk20a_fifo_reset_pbdma_header(g, pbdma_id);
nvgpu_writel(g, pbdma_data0_r(pbdma_id), 0);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -41,5 +41,7 @@ u32 tu104_fifo_doorbell_token(struct channel_gk20a *c);
int tu104_init_pdb_cache_war(struct gk20a *g);
void tu104_deinit_pdb_cache_war(struct gk20a *g);
u32 tu104_fifo_read_pbdma_data(struct gk20a *g, u32 pbdma_id);
void tu104_fifo_reset_pbdma_header(struct gk20a *g, u32 pbdma_id);
#endif /* NVGPU_FIFO_TU104_H */

View File

@@ -815,6 +815,8 @@ static const struct gpu_ops tu104_ops = {
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.read_pbdma_data = tu104_fifo_read_pbdma_data,
.reset_pbdma_header = tu104_fifo_reset_pbdma_header,
},
.netlist = {
.get_netlist_name = tu104_netlist_get_name,