mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: remove support for quad reg_op
quad type reg_ops were only needed on Kepler, and not for any other chip beginning Maxweel. HAL g->ops.gr.access_smpc_reg() was incorrectly set for Volta and Turing whereas it was only applicable to Kepler. Delete it. There is no register in the quad type whitelist since the type itself is not supported anymore. Remove the empty whitelists for all chips and also delete below HALs: g->ops.regops.get_qctl_whitelist() g->ops.regops.get_qctl_whitelist_count() hal/regops/regops_gv100.* files are not used anymore. Delete the files instead of just deleting quad HALs in these files. Bug 200628391 Change-Id: I4dcc04bef5c24eb4d63d913f492a8c00543163a2 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2366035 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
73ff4ac334
commit
1ff79b1d2c
@@ -595,8 +595,6 @@ regops:
|
|||||||
hal/regops/regops_gm20b.h,
|
hal/regops/regops_gm20b.h,
|
||||||
hal/regops/regops_gp10b.c,
|
hal/regops/regops_gp10b.c,
|
||||||
hal/regops/regops_gp10b.h,
|
hal/regops/regops_gp10b.h,
|
||||||
hal/regops/regops_gv100.c,
|
|
||||||
hal/regops/regops_gv100.h,
|
|
||||||
hal/regops/regops_gv11b.c,
|
hal/regops/regops_gv11b.c,
|
||||||
hal/regops/regops_gv11b.h,
|
hal/regops/regops_gv11b.h,
|
||||||
hal/regops/regops_tu104.c,
|
hal/regops/regops_tu104.c,
|
||||||
|
|||||||
@@ -720,7 +720,6 @@ nvgpu-$(CONFIG_NVGPU_HAL_NON_FUSA) += \
|
|||||||
hal/priv_ring/priv_ring_gm20b.o \
|
hal/priv_ring/priv_ring_gm20b.o \
|
||||||
hal/regops/regops_gm20b.o \
|
hal/regops/regops_gm20b.o \
|
||||||
hal/regops/regops_gp10b.o \
|
hal/regops/regops_gp10b.o \
|
||||||
hal/regops/regops_gv100.o \
|
|
||||||
hal/regops/regops_tu104.o \
|
hal/regops/regops_tu104.o \
|
||||||
hal/therm/therm_gm20b.o \
|
hal/therm/therm_gm20b.o \
|
||||||
hal/top/top_gm20b.o
|
hal/top/top_gm20b.o
|
||||||
|
|||||||
@@ -384,7 +384,6 @@ srcs += common/debugger.c \
|
|||||||
ifeq ($(CONFIG_NVGPU_HAL_NON_FUSA),1)
|
ifeq ($(CONFIG_NVGPU_HAL_NON_FUSA),1)
|
||||||
srcs += hal/regops/regops_gm20b.c \
|
srcs += hal/regops/regops_gm20b.c \
|
||||||
hal/regops/regops_gp10b.c \
|
hal/regops/regops_gp10b.c \
|
||||||
hal/regops/regops_gv100.c \
|
|
||||||
hal/regops/regops_tu104.c \
|
hal/regops/regops_tu104.c \
|
||||||
hal/perf/perf_gm20b.c
|
hal/perf/perf_gm20b.c
|
||||||
endif
|
endif
|
||||||
|
|||||||
@@ -316,12 +316,6 @@ static bool check_whitelists(struct gk20a *g,
|
|||||||
g->ops.regops.get_runcontrol_whitelist(),
|
g->ops.regops.get_runcontrol_whitelist(),
|
||||||
g->ops.regops.get_runcontrol_whitelist_count());
|
g->ops.regops.get_runcontrol_whitelist_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (op->type == REGOP(TYPE_GR_CTX_QUAD)) {
|
|
||||||
valid = (g->ops.regops.get_qctl_whitelist != NULL) &&
|
|
||||||
linear_search(offset,
|
|
||||||
g->ops.regops.get_qctl_whitelist(),
|
|
||||||
g->ops.regops.get_qctl_whitelist_count());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return valid;
|
return valid;
|
||||||
@@ -357,9 +351,7 @@ static int validate_reg_op_offset(struct gk20a *g,
|
|||||||
1,
|
1,
|
||||||
&buf_offset_lo,
|
&buf_offset_lo,
|
||||||
&buf_offset_addr,
|
&buf_offset_addr,
|
||||||
&num_offsets,
|
&num_offsets);
|
||||||
op->type == REGOP(TYPE_GR_CTX_QUAD),
|
|
||||||
op->quad);
|
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
err = gr_gk20a_get_pm_ctx_buffer_offsets(g,
|
err = gr_gk20a_get_pm_ctx_buffer_offsets(g,
|
||||||
op->offset,
|
op->offset,
|
||||||
|
|||||||
@@ -251,7 +251,6 @@ bool gk20a_gr_sm_debugger_attached(struct gk20a *g)
|
|||||||
|
|
||||||
static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
|
static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
|
||||||
u32 addr,
|
u32 addr,
|
||||||
bool is_quad, u32 quad,
|
|
||||||
u32 *context_buffer,
|
u32 *context_buffer,
|
||||||
u32 context_buffer_size,
|
u32 context_buffer_size,
|
||||||
u32 *priv_offset);
|
u32 *priv_offset);
|
||||||
@@ -495,8 +494,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
|
|||||||
u32 addr,
|
u32 addr,
|
||||||
u32 max_offsets,
|
u32 max_offsets,
|
||||||
u32 *offsets, u32 *offset_addrs,
|
u32 *offsets, u32 *offset_addrs,
|
||||||
u32 *num_offsets,
|
u32 *num_offsets)
|
||||||
bool is_quad, u32 quad)
|
|
||||||
{
|
{
|
||||||
u32 i;
|
u32 i;
|
||||||
u32 priv_offset = 0;
|
u32 priv_offset = 0;
|
||||||
@@ -549,7 +547,6 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
|
|||||||
for (i = 0; i < num_registers; i++) {
|
for (i = 0; i < num_registers; i++) {
|
||||||
err = gr_gk20a_find_priv_offset_in_buffer(g,
|
err = gr_gk20a_find_priv_offset_in_buffer(g,
|
||||||
priv_registers[i],
|
priv_registers[i],
|
||||||
is_quad, quad,
|
|
||||||
nvgpu_gr_obj_ctx_get_local_golden_image_ptr(
|
nvgpu_gr_obj_ctx_get_local_golden_image_ptr(
|
||||||
g->gr->golden_image),
|
g->gr->golden_image),
|
||||||
nvgpu_gr_obj_ctx_get_golden_image_size(
|
nvgpu_gr_obj_ctx_get_golden_image_size(
|
||||||
@@ -755,7 +752,6 @@ void gk20a_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
|
|||||||
|
|
||||||
static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
|
static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
|
||||||
u32 addr,
|
u32 addr,
|
||||||
bool is_quad, u32 quad,
|
|
||||||
u32 *context_buffer,
|
u32 *context_buffer,
|
||||||
u32 context_buffer_size,
|
u32 context_buffer_size,
|
||||||
u32 *priv_offset)
|
u32 *priv_offset)
|
||||||
@@ -1215,7 +1211,6 @@ int gr_gk20a_get_offset_in_gpccs_segment(struct gk20a *g,
|
|||||||
*/
|
*/
|
||||||
static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
|
static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
|
||||||
u32 addr,
|
u32 addr,
|
||||||
bool is_quad, u32 quad,
|
|
||||||
u32 *context_buffer,
|
u32 *context_buffer,
|
||||||
u32 context_buffer_size,
|
u32 context_buffer_size,
|
||||||
u32 *priv_offset)
|
u32 *priv_offset)
|
||||||
@@ -1264,17 +1259,13 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
|
|||||||
g->ops.gr.ctxsw_prog.get_local_priv_register_ctl_offset(context);
|
g->ops.gr.ctxsw_prog.get_local_priv_register_ctl_offset(context);
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset);
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset);
|
||||||
|
|
||||||
/* If found in Ext buffer, ok.
|
/* If found in Ext buffer, ok. If not, continue on. */
|
||||||
* If it failed and we expected to find it there (quad offset)
|
|
||||||
* then return the error. Otherwise continue on.
|
|
||||||
*/
|
|
||||||
err = gr_gk20a_find_priv_offset_in_ext_buffer(g,
|
err = gr_gk20a_find_priv_offset_in_ext_buffer(g,
|
||||||
addr, is_quad, quad, context_buffer,
|
addr, context_buffer,
|
||||||
context_buffer_size, priv_offset);
|
context_buffer_size, priv_offset);
|
||||||
if ((err == 0) || ((err != 0) && is_quad)) {
|
if (err == 0) {
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
|
||||||
"err = %d, is_quad = %s",
|
"offset found in Ext buffer");
|
||||||
err, is_quad ? "true" : "false");
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1463,13 +1454,6 @@ static int gr_exec_ctx_ops(struct nvgpu_channel *ch,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if this is a quad access, setup for special access*/
|
|
||||||
if ((ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD))
|
|
||||||
&& (g->ops.gr.access_smpc_reg != NULL)) {
|
|
||||||
g->ops.gr.access_smpc_reg(g,
|
|
||||||
ctx_ops[i].quad,
|
|
||||||
ctx_ops[i].offset);
|
|
||||||
}
|
|
||||||
offset = ctx_ops[i].offset;
|
offset = ctx_ops[i].offset;
|
||||||
|
|
||||||
if (pass == 0) { /* write pass */
|
if (pass == 0) { /* write pass */
|
||||||
@@ -1558,9 +1542,7 @@ static int gr_exec_ctx_ops(struct nvgpu_channel *ch,
|
|||||||
ctx_ops[i].offset,
|
ctx_ops[i].offset,
|
||||||
max_offsets,
|
max_offsets,
|
||||||
offsets, offset_addrs,
|
offsets, offset_addrs,
|
||||||
&num_offsets,
|
&num_offsets);
|
||||||
ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD),
|
|
||||||
ctx_ops[i].quad);
|
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
if (!gr_ctx_ready) {
|
if (!gr_ctx_ready) {
|
||||||
gr_ctx_ready = true;
|
gr_ctx_ready = true;
|
||||||
@@ -1593,13 +1575,6 @@ static int gr_exec_ctx_ops(struct nvgpu_channel *ch,
|
|||||||
current_mem = nvgpu_gr_ctx_get_pm_ctx_mem(gr_ctx);
|
current_mem = nvgpu_gr_ctx_get_pm_ctx_mem(gr_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if this is a quad access, setup for special access*/
|
|
||||||
if ((ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD)) &&
|
|
||||||
(g->ops.gr.access_smpc_reg != NULL)) {
|
|
||||||
g->ops.gr.access_smpc_reg(g, ctx_ops[i].quad,
|
|
||||||
ctx_ops[i].offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (j = 0; j < num_offsets; j++) {
|
for (j = 0; j < num_offsets; j++) {
|
||||||
/* sanity check gr ctxt offsets,
|
/* sanity check gr ctxt offsets,
|
||||||
* don't write outside, worst case
|
* don't write outside, worst case
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ int gr_gk20a_exec_ctx_ops(struct nvgpu_channel *ch,
|
|||||||
int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
|
int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
|
||||||
u32 addr, u32 max_offsets,
|
u32 addr, u32 max_offsets,
|
||||||
u32 *offsets, u32 *offset_addrs,
|
u32 *offsets, u32 *offset_addrs,
|
||||||
u32 *num_offsets, bool is_quad, u32 quad);
|
u32 *num_offsets);
|
||||||
int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
|
int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
|
||||||
u32 addr, u32 max_offsets,
|
u32 addr, u32 max_offsets,
|
||||||
u32 *offsets, u32 *offset_addrs,
|
u32 *offsets, u32 *offset_addrs,
|
||||||
|
|||||||
@@ -1442,49 +1442,6 @@ void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
|
|||||||
*ovr_perf_regs = _ovr_perf_regs;
|
*ovr_perf_regs = _ovr_perf_regs;
|
||||||
}
|
}
|
||||||
|
|
||||||
void gv11b_gr_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset)
|
|
||||||
{
|
|
||||||
u32 reg_val;
|
|
||||||
u32 quad_ctrl;
|
|
||||||
u32 half_ctrl;
|
|
||||||
u32 tpc, gpc;
|
|
||||||
u32 gpc_tpc_addr;
|
|
||||||
u32 gpc_tpc_stride;
|
|
||||||
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
|
||||||
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
|
|
||||||
GPU_LIT_TPC_IN_GPC_STRIDE);
|
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "offset=0x%x", offset);
|
|
||||||
|
|
||||||
gpc = pri_get_gpc_num(g, offset);
|
|
||||||
gpc_tpc_addr = pri_gpccs_addr_mask(offset);
|
|
||||||
tpc = nvgpu_gr_get_tpc_num(g, gpc_tpc_addr);
|
|
||||||
|
|
||||||
quad_ctrl = quad & 0x1U; /* first bit tells us quad */
|
|
||||||
half_ctrl = (quad >> 1) & 0x1U; /* second bit tells us half */
|
|
||||||
|
|
||||||
gpc_tpc_stride = gpc * gpc_stride + tpc * tpc_in_gpc_stride;
|
|
||||||
gpc_tpc_addr = gr_gpc0_tpc0_sm_halfctl_ctrl_r() + gpc_tpc_stride;
|
|
||||||
|
|
||||||
/* read from unicast reg */
|
|
||||||
reg_val = gk20a_readl(g, gpc_tpc_addr);
|
|
||||||
reg_val = set_field(reg_val,
|
|
||||||
gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(),
|
|
||||||
gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(quad_ctrl));
|
|
||||||
|
|
||||||
/* write to broadcast reg */
|
|
||||||
gk20a_writel(g, gr_gpcs_tpcs_sm_halfctl_ctrl_r(), reg_val);
|
|
||||||
|
|
||||||
gpc_tpc_addr = gr_gpc0_tpc0_sm_debug_sfe_control_r() + gpc_tpc_stride;
|
|
||||||
reg_val = gk20a_readl(g, gpc_tpc_addr);
|
|
||||||
reg_val = set_field(reg_val,
|
|
||||||
gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(),
|
|
||||||
gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(half_ctrl));
|
|
||||||
|
|
||||||
/* write to broadcast reg */
|
|
||||||
gk20a_writel(g, gr_gpcs_tpcs_sm_debug_sfe_control_r(), reg_val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool pri_is_egpc_addr_shared(struct gk20a *g, u32 addr)
|
static bool pri_is_egpc_addr_shared(struct gk20a *g, u32 addr)
|
||||||
{
|
{
|
||||||
u32 egpc_shared_base = EGPC_PRI_SHARED_BASE;
|
u32 egpc_shared_base = EGPC_PRI_SHARED_BASE;
|
||||||
|
|||||||
@@ -77,7 +77,6 @@ void gv11b_gr_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
|
|||||||
u32 *ctrl_register_stride);
|
u32 *ctrl_register_stride);
|
||||||
void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
|
void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
|
||||||
u32 **ovr_perf_regs);
|
u32 **ovr_perf_regs);
|
||||||
void gv11b_gr_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset);
|
|
||||||
bool gv11b_gr_pri_is_egpc_addr(struct gk20a *g, u32 addr);
|
bool gv11b_gr_pri_is_egpc_addr(struct gk20a *g, u32 addr);
|
||||||
bool gv11b_gr_pri_is_etpc_addr(struct gk20a *g, u32 addr);
|
bool gv11b_gr_pri_is_etpc_addr(struct gk20a *g, u32 addr);
|
||||||
void gv11b_gr_get_egpc_etpc_num(struct gk20a *g, u32 addr,
|
void gv11b_gr_get_egpc_etpc_num(struct gk20a *g, u32 addr,
|
||||||
|
|||||||
@@ -960,8 +960,6 @@ static const struct gpu_ops gm20b_ops = {
|
|||||||
.get_runcontrol_whitelist = gm20b_get_runcontrol_whitelist,
|
.get_runcontrol_whitelist = gm20b_get_runcontrol_whitelist,
|
||||||
.get_runcontrol_whitelist_count =
|
.get_runcontrol_whitelist_count =
|
||||||
gm20b_get_runcontrol_whitelist_count,
|
gm20b_get_runcontrol_whitelist_count,
|
||||||
.get_qctl_whitelist = gm20b_get_qctl_whitelist,
|
|
||||||
.get_qctl_whitelist_count = gm20b_get_qctl_whitelist_count,
|
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
.mc = {
|
.mc = {
|
||||||
|
|||||||
@@ -1062,8 +1062,6 @@ static const struct gpu_ops gp10b_ops = {
|
|||||||
.get_runcontrol_whitelist = gp10b_get_runcontrol_whitelist,
|
.get_runcontrol_whitelist = gp10b_get_runcontrol_whitelist,
|
||||||
.get_runcontrol_whitelist_count =
|
.get_runcontrol_whitelist_count =
|
||||||
gp10b_get_runcontrol_whitelist_count,
|
gp10b_get_runcontrol_whitelist_count,
|
||||||
.get_qctl_whitelist = gp10b_get_qctl_whitelist,
|
|
||||||
.get_qctl_whitelist_count = gp10b_get_qctl_whitelist_count,
|
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
.mc = {
|
.mc = {
|
||||||
|
|||||||
@@ -356,7 +356,6 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
|
|||||||
.egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
|
.egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
|
||||||
.get_egpc_base = gv11b_gr_get_egpc_base,
|
.get_egpc_base = gv11b_gr_get_egpc_base,
|
||||||
.get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
|
.get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
|
||||||
.access_smpc_reg = gv11b_gr_access_smpc_reg,
|
|
||||||
.is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
|
.is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
|
||||||
.decode_egpc_addr = gv11b_gr_decode_egpc_addr,
|
.decode_egpc_addr = gv11b_gr_decode_egpc_addr,
|
||||||
.decode_priv_addr = gr_gv11b_decode_priv_addr,
|
.decode_priv_addr = gr_gv11b_decode_priv_addr,
|
||||||
@@ -1317,8 +1316,6 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
|
|||||||
.get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist,
|
.get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist,
|
||||||
.get_runcontrol_whitelist_count =
|
.get_runcontrol_whitelist_count =
|
||||||
gv11b_get_runcontrol_whitelist_count,
|
gv11b_get_runcontrol_whitelist_count,
|
||||||
.get_qctl_whitelist = gv11b_get_qctl_whitelist,
|
|
||||||
.get_qctl_whitelist_count = gv11b_get_qctl_whitelist_count,
|
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
.mc = {
|
.mc = {
|
||||||
|
|||||||
@@ -397,7 +397,6 @@ static const struct gpu_ops tu104_ops = {
|
|||||||
.egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
|
.egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
|
||||||
.get_egpc_base = gv11b_gr_get_egpc_base,
|
.get_egpc_base = gv11b_gr_get_egpc_base,
|
||||||
.get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
|
.get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
|
||||||
.access_smpc_reg = gv11b_gr_access_smpc_reg,
|
|
||||||
.is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
|
.is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
|
||||||
.decode_egpc_addr = gv11b_gr_decode_egpc_addr,
|
.decode_egpc_addr = gv11b_gr_decode_egpc_addr,
|
||||||
.decode_priv_addr = gr_gv11b_decode_priv_addr,
|
.decode_priv_addr = gr_gv11b_decode_priv_addr,
|
||||||
@@ -1354,8 +1353,6 @@ static const struct gpu_ops tu104_ops = {
|
|||||||
.get_runcontrol_whitelist = tu104_get_runcontrol_whitelist,
|
.get_runcontrol_whitelist = tu104_get_runcontrol_whitelist,
|
||||||
.get_runcontrol_whitelist_count =
|
.get_runcontrol_whitelist_count =
|
||||||
tu104_get_runcontrol_whitelist_count,
|
tu104_get_runcontrol_whitelist_count,
|
||||||
.get_qctl_whitelist = tu104_get_qctl_whitelist,
|
|
||||||
.get_qctl_whitelist_count = tu104_get_qctl_whitelist_count,
|
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
.mc = {
|
.mc = {
|
||||||
|
|||||||
@@ -364,12 +364,6 @@ static const u32 gm20b_runcontrol_whitelist[] = {
|
|||||||
static const u64 gm20b_runcontrol_whitelist_count =
|
static const u64 gm20b_runcontrol_whitelist_count =
|
||||||
ARRAY_SIZE(gm20b_runcontrol_whitelist);
|
ARRAY_SIZE(gm20b_runcontrol_whitelist);
|
||||||
|
|
||||||
/* quad ctl */
|
|
||||||
static const u32 gm20b_qctl_whitelist[] = {
|
|
||||||
};
|
|
||||||
static const u64 gm20b_qctl_whitelist_count =
|
|
||||||
ARRAY_SIZE(gm20b_qctl_whitelist);
|
|
||||||
|
|
||||||
const struct regop_offset_range *gm20b_get_global_whitelist_ranges(void)
|
const struct regop_offset_range *gm20b_get_global_whitelist_ranges(void)
|
||||||
{
|
{
|
||||||
return gm20b_global_whitelist_ranges;
|
return gm20b_global_whitelist_ranges;
|
||||||
@@ -399,13 +393,3 @@ u64 gm20b_get_runcontrol_whitelist_count(void)
|
|||||||
{
|
{
|
||||||
return gm20b_runcontrol_whitelist_count;
|
return gm20b_runcontrol_whitelist_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u32 *gm20b_get_qctl_whitelist(void)
|
|
||||||
{
|
|
||||||
return gm20b_qctl_whitelist;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 gm20b_get_qctl_whitelist_count(void)
|
|
||||||
{
|
|
||||||
return gm20b_qctl_whitelist_count;
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -35,10 +35,6 @@ const u32 *gm20b_get_runcontrol_whitelist(void);
|
|||||||
u64 gm20b_get_runcontrol_whitelist_count(void);
|
u64 gm20b_get_runcontrol_whitelist_count(void);
|
||||||
const struct regop_offset_range *gm20b_get_runcontrol_whitelist_ranges(void);
|
const struct regop_offset_range *gm20b_get_runcontrol_whitelist_ranges(void);
|
||||||
u64 gm20b_get_runcontrol_whitelist_ranges_count(void);
|
u64 gm20b_get_runcontrol_whitelist_ranges_count(void);
|
||||||
const u32 *gm20b_get_qctl_whitelist(void);
|
|
||||||
u64 gm20b_get_qctl_whitelist_count(void);
|
|
||||||
const struct regop_offset_range *gm20b_get_qctl_whitelist_ranges(void);
|
|
||||||
u64 gm20b_get_qctl_whitelist_ranges_count(void);
|
|
||||||
|
|
||||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
#endif /* CONFIG_NVGPU_DEBUGGER */
|
||||||
#endif /* NVGPU_REGOPS_GM20B_H */
|
#endif /* NVGPU_REGOPS_GM20B_H */
|
||||||
|
|||||||
@@ -391,12 +391,6 @@ static const u32 gp10b_runcontrol_whitelist[] = {
|
|||||||
static const u64 gp10b_runcontrol_whitelist_count =
|
static const u64 gp10b_runcontrol_whitelist_count =
|
||||||
ARRAY_SIZE(gp10b_runcontrol_whitelist);
|
ARRAY_SIZE(gp10b_runcontrol_whitelist);
|
||||||
|
|
||||||
/* quad ctl */
|
|
||||||
static const u32 gp10b_qctl_whitelist[] = {
|
|
||||||
};
|
|
||||||
static const u64 gp10b_qctl_whitelist_count =
|
|
||||||
ARRAY_SIZE(gp10b_qctl_whitelist);
|
|
||||||
|
|
||||||
const struct regop_offset_range *gp10b_get_global_whitelist_ranges(void)
|
const struct regop_offset_range *gp10b_get_global_whitelist_ranges(void)
|
||||||
{
|
{
|
||||||
return gp10b_global_whitelist_ranges;
|
return gp10b_global_whitelist_ranges;
|
||||||
@@ -426,13 +420,3 @@ u64 gp10b_get_runcontrol_whitelist_count(void)
|
|||||||
{
|
{
|
||||||
return gp10b_runcontrol_whitelist_count;
|
return gp10b_runcontrol_whitelist_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u32 *gp10b_get_qctl_whitelist(void)
|
|
||||||
{
|
|
||||||
return gp10b_qctl_whitelist;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 gp10b_get_qctl_whitelist_count(void)
|
|
||||||
{
|
|
||||||
return gp10b_qctl_whitelist_count;
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -35,10 +35,6 @@ const u32 *gp10b_get_runcontrol_whitelist(void);
|
|||||||
u64 gp10b_get_runcontrol_whitelist_count(void);
|
u64 gp10b_get_runcontrol_whitelist_count(void);
|
||||||
const struct regop_offset_range *gp10b_get_runcontrol_whitelist_ranges(void);
|
const struct regop_offset_range *gp10b_get_runcontrol_whitelist_ranges(void);
|
||||||
u64 gp10b_get_runcontrol_whitelist_ranges_count(void);
|
u64 gp10b_get_runcontrol_whitelist_ranges_count(void);
|
||||||
const u32 *gp10b_get_qctl_whitelist(void);
|
|
||||||
u64 gp10b_get_qctl_whitelist_count(void);
|
|
||||||
const struct regop_offset_range *gp10b_get_qctl_whitelist_ranges(void);
|
|
||||||
u64 gp10b_get_qctl_whitelist_ranges_count(void);
|
|
||||||
|
|
||||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
#endif /* CONFIG_NVGPU_DEBUGGER */
|
||||||
#endif /* NVGPU_REGOPS_GP10B_H */
|
#endif /* NVGPU_REGOPS_GP10B_H */
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,44 +0,0 @@
|
|||||||
/*
|
|
||||||
*
|
|
||||||
* Tegra GV100 GPU Driver Register Ops
|
|
||||||
*
|
|
||||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
#ifndef NVGPU_REGOPS_GV100_H
|
|
||||||
#define NVGPU_REGOPS_GV100_H
|
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
|
||||||
|
|
||||||
const struct regop_offset_range *gv100_get_global_whitelist_ranges(void);
|
|
||||||
u64 gv100_get_global_whitelist_ranges_count(void);
|
|
||||||
const struct regop_offset_range *gv100_get_context_whitelist_ranges(void);
|
|
||||||
u64 gv100_get_context_whitelist_ranges_count(void);
|
|
||||||
const u32 *gv100_get_runcontrol_whitelist(void);
|
|
||||||
u64 gv100_get_runcontrol_whitelist_count(void);
|
|
||||||
const struct regop_offset_range *gv100_get_runcontrol_whitelist_ranges(void);
|
|
||||||
u64 gv100_get_runcontrol_whitelist_ranges_count(void);
|
|
||||||
const u32 *gv100_get_qctl_whitelist(void);
|
|
||||||
u64 gv100_get_qctl_whitelist_count(void);
|
|
||||||
const struct regop_offset_range *gv100_get_qctl_whitelist_ranges(void);
|
|
||||||
u64 gv100_get_qctl_whitelist_ranges_count(void);
|
|
||||||
|
|
||||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
|
||||||
#endif /* NVGPU_REGOPS_GV100_H */
|
|
||||||
@@ -1452,12 +1452,6 @@ static const u32 gv11b_runcontrol_whitelist[] = {
|
|||||||
static const u64 gv11b_runcontrol_whitelist_count =
|
static const u64 gv11b_runcontrol_whitelist_count =
|
||||||
ARRAY_SIZE(gv11b_runcontrol_whitelist);
|
ARRAY_SIZE(gv11b_runcontrol_whitelist);
|
||||||
|
|
||||||
/* quad ctl */
|
|
||||||
static const u32 gv11b_qctl_whitelist[] = {
|
|
||||||
};
|
|
||||||
static const u64 gv11b_qctl_whitelist_count =
|
|
||||||
ARRAY_SIZE(gv11b_qctl_whitelist);
|
|
||||||
|
|
||||||
const struct regop_offset_range *gv11b_get_global_whitelist_ranges(void)
|
const struct regop_offset_range *gv11b_get_global_whitelist_ranges(void)
|
||||||
{
|
{
|
||||||
return gv11b_global_whitelist_ranges;
|
return gv11b_global_whitelist_ranges;
|
||||||
@@ -1487,13 +1481,3 @@ u64 gv11b_get_runcontrol_whitelist_count(void)
|
|||||||
{
|
{
|
||||||
return gv11b_runcontrol_whitelist_count;
|
return gv11b_runcontrol_whitelist_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u32 *gv11b_get_qctl_whitelist(void)
|
|
||||||
{
|
|
||||||
return gv11b_qctl_whitelist;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 gv11b_get_qctl_whitelist_count(void)
|
|
||||||
{
|
|
||||||
return gv11b_qctl_whitelist_count;
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -35,10 +35,6 @@ const u32 *gv11b_get_runcontrol_whitelist(void);
|
|||||||
u64 gv11b_get_runcontrol_whitelist_count(void);
|
u64 gv11b_get_runcontrol_whitelist_count(void);
|
||||||
const struct regop_offset_range *gv11b_get_runcontrol_whitelist_ranges(void);
|
const struct regop_offset_range *gv11b_get_runcontrol_whitelist_ranges(void);
|
||||||
u64 gv11b_get_runcontrol_whitelist_ranges_count(void);
|
u64 gv11b_get_runcontrol_whitelist_ranges_count(void);
|
||||||
const u32 *gv11b_get_qctl_whitelist(void);
|
|
||||||
u64 gv11b_get_qctl_whitelist_count(void);
|
|
||||||
const struct regop_offset_range *gv11b_get_qctl_whitelist_ranges(void);
|
|
||||||
u64 gv11b_get_qctl_whitelist_ranges_count(void);
|
|
||||||
|
|
||||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
#endif /* CONFIG_NVGPU_DEBUGGER */
|
||||||
#endif /* NVGPU_REGOPS_GV11B_H */
|
#endif /* NVGPU_REGOPS_GV11B_H */
|
||||||
|
|||||||
@@ -6608,12 +6608,6 @@ static const u32 tu104_runcontrol_whitelist[] = {
|
|||||||
static const u64 tu104_runcontrol_whitelist_count =
|
static const u64 tu104_runcontrol_whitelist_count =
|
||||||
ARRAY_SIZE(tu104_runcontrol_whitelist);
|
ARRAY_SIZE(tu104_runcontrol_whitelist);
|
||||||
|
|
||||||
/* quad ctl */
|
|
||||||
static const u32 tu104_qctl_whitelist[] = {
|
|
||||||
};
|
|
||||||
static const u64 tu104_qctl_whitelist_count =
|
|
||||||
ARRAY_SIZE(tu104_qctl_whitelist);
|
|
||||||
|
|
||||||
const struct regop_offset_range *tu104_get_global_whitelist_ranges(void)
|
const struct regop_offset_range *tu104_get_global_whitelist_ranges(void)
|
||||||
{
|
{
|
||||||
return tu104_global_whitelist_ranges;
|
return tu104_global_whitelist_ranges;
|
||||||
@@ -6643,13 +6637,3 @@ u64 tu104_get_runcontrol_whitelist_count(void)
|
|||||||
{
|
{
|
||||||
return tu104_runcontrol_whitelist_count;
|
return tu104_runcontrol_whitelist_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u32 *tu104_get_qctl_whitelist(void)
|
|
||||||
{
|
|
||||||
return tu104_qctl_whitelist;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 tu104_get_qctl_whitelist_count(void)
|
|
||||||
{
|
|
||||||
return tu104_qctl_whitelist_count;
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -32,10 +32,6 @@ const u32 *tu104_get_runcontrol_whitelist(void);
|
|||||||
u64 tu104_get_runcontrol_whitelist_count(void);
|
u64 tu104_get_runcontrol_whitelist_count(void);
|
||||||
const struct regop_offset_range *tu104_get_runcontrol_whitelist_ranges(void);
|
const struct regop_offset_range *tu104_get_runcontrol_whitelist_ranges(void);
|
||||||
u64 tu104_get_runcontrol_whitelist_ranges_count(void);
|
u64 tu104_get_runcontrol_whitelist_ranges_count(void);
|
||||||
const u32 *tu104_get_qctl_whitelist(void);
|
|
||||||
u64 tu104_get_qctl_whitelist_count(void);
|
|
||||||
const struct regop_offset_range *tu104_get_qctl_whitelist_ranges(void);
|
|
||||||
u64 tu104_get_qctl_whitelist_ranges_count(void);
|
|
||||||
|
|
||||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
#endif /* CONFIG_NVGPU_DEBUGGER */
|
||||||
#endif /* NVGPU_REGOPS_TU104_H */
|
#endif /* NVGPU_REGOPS_TU104_H */
|
||||||
|
|||||||
@@ -742,8 +742,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
|||||||
.get_runcontrol_whitelist = gp10b_get_runcontrol_whitelist,
|
.get_runcontrol_whitelist = gp10b_get_runcontrol_whitelist,
|
||||||
.get_runcontrol_whitelist_count =
|
.get_runcontrol_whitelist_count =
|
||||||
gp10b_get_runcontrol_whitelist_count,
|
gp10b_get_runcontrol_whitelist_count,
|
||||||
.get_qctl_whitelist = gp10b_get_qctl_whitelist,
|
|
||||||
.get_qctl_whitelist_count = gp10b_get_qctl_whitelist_count,
|
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
.mc = {
|
.mc = {
|
||||||
|
|||||||
@@ -276,7 +276,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
|||||||
.egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
|
.egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
|
||||||
.get_egpc_base = gv11b_gr_get_egpc_base,
|
.get_egpc_base = gv11b_gr_get_egpc_base,
|
||||||
.get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
|
.get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
|
||||||
.access_smpc_reg = gv11b_gr_access_smpc_reg,
|
|
||||||
.is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
|
.is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
|
||||||
.decode_egpc_addr = gv11b_gr_decode_egpc_addr,
|
.decode_egpc_addr = gv11b_gr_decode_egpc_addr,
|
||||||
.decode_priv_addr = gr_gv11b_decode_priv_addr,
|
.decode_priv_addr = gr_gv11b_decode_priv_addr,
|
||||||
@@ -868,8 +867,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
|||||||
.get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist,
|
.get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist,
|
||||||
.get_runcontrol_whitelist_count =
|
.get_runcontrol_whitelist_count =
|
||||||
gv11b_get_runcontrol_whitelist_count,
|
gv11b_get_runcontrol_whitelist_count,
|
||||||
.get_qctl_whitelist = gv11b_get_qctl_whitelist,
|
|
||||||
.get_qctl_whitelist_count = gv11b_get_qctl_whitelist_count,
|
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
.mc = {
|
.mc = {
|
||||||
|
|||||||
@@ -422,8 +422,6 @@ struct gpu_ops {
|
|||||||
u64 (*get_context_whitelist_ranges_count)(void);
|
u64 (*get_context_whitelist_ranges_count)(void);
|
||||||
const u32* (*get_runcontrol_whitelist)(void);
|
const u32* (*get_runcontrol_whitelist)(void);
|
||||||
u64 (*get_runcontrol_whitelist_count)(void);
|
u64 (*get_runcontrol_whitelist_count)(void);
|
||||||
const u32* (*get_qctl_whitelist)(void);
|
|
||||||
u64 (*get_qctl_whitelist_count)(void);
|
|
||||||
} regops;
|
} regops;
|
||||||
#endif
|
#endif
|
||||||
struct gops_mc mc;
|
struct gops_mc mc;
|
||||||
|
|||||||
@@ -1067,7 +1067,6 @@ struct gops_gr {
|
|||||||
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
||||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||||
u32 (*get_gr_status)(struct gk20a *g);
|
u32 (*get_gr_status)(struct gk20a *g);
|
||||||
void (*access_smpc_reg)(struct gk20a *g, u32 quad, u32 offset);
|
|
||||||
void (*set_alpha_circular_buffer_size)(struct gk20a *g,
|
void (*set_alpha_circular_buffer_size)(struct gk20a *g,
|
||||||
u32 data);
|
u32 data);
|
||||||
void (*set_circular_buffer_size)(struct gk20a *g, u32 data);
|
void (*set_circular_buffer_size)(struct gk20a *g, u32 data);
|
||||||
@@ -1193,8 +1192,7 @@ struct gops_gr {
|
|||||||
u32 addr,
|
u32 addr,
|
||||||
u32 max_offsets,
|
u32 max_offsets,
|
||||||
u32 *offsets, u32 *offset_addrs,
|
u32 *offsets, u32 *offset_addrs,
|
||||||
u32 *num_offsets,
|
u32 *num_offsets);
|
||||||
bool is_quad, u32 quad);
|
|
||||||
int (*process_context_buffer_priv_segment)(struct gk20a *g,
|
int (*process_context_buffer_priv_segment)(struct gk20a *g,
|
||||||
enum ctxsw_addr_type addr_type,
|
enum ctxsw_addr_type addr_type,
|
||||||
u32 pri_addr,
|
u32 pri_addr,
|
||||||
|
|||||||
Reference in New Issue
Block a user