gpu: nvgpu: gv11b: skip clk gating prog for pre-si

For pre-silicon platforms, clock gating
should be skipped as it is not supported.
Added new flags "can_"x"lcg" to check platform
capability before programming SLCG,BLCG and ELCG.

Bug 200314250

Change-Id: Iec7564b00b988cdd50a02f3130662727839c5047
Signed-off-by: Deepak Goyal <dgoyal@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1566251
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Goyal
2017-09-22 15:36:36 +05:30
committed by mobile promotions
parent f63f96866d
commit 192afccf7c
4 changed files with 38 additions and 24 deletions

View File

@@ -2352,6 +2352,9 @@ void gr_gv11b_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
{
u32 gate_ctrl;
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG))
return;
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
switch (mode) {

View File

@@ -27,6 +27,7 @@
#include <linux/types.h>
#include "gv11b_gating_reglist.h"
#include <nvgpu/enabled.h>
struct gating_desc {
u32 addr;
@@ -282,7 +283,7 @@ void gv11b_slcg_bus_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_bus) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -301,7 +302,7 @@ void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_ce2) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -320,7 +321,7 @@ void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_chiplet) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -344,7 +345,7 @@ void gv11b_slcg_fb_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_fb) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -363,7 +364,7 @@ void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_fifo) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -382,7 +383,7 @@ void gr_gv11b_slcg_gr_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_gr) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -401,7 +402,7 @@ void ltc_gv11b_slcg_ltc_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_ltc) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -420,7 +421,7 @@ void gv11b_slcg_perf_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_perf) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -439,7 +440,7 @@ void gv11b_slcg_priring_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_priring) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -458,7 +459,7 @@ void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_pwr_csb) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -477,7 +478,7 @@ void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_pmu) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -496,7 +497,7 @@ void gv11b_slcg_therm_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_therm) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -515,7 +516,7 @@ void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_slcg_xbar) / sizeof(struct gating_desc);
if (!g->slcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
@@ -534,7 +535,7 @@ void gv11b_blcg_bus_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_bus) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -553,7 +554,7 @@ void gv11b_blcg_ce_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_ce) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -572,7 +573,7 @@ void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -591,7 +592,7 @@ void gv11b_blcg_fb_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_fb) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -610,7 +611,7 @@ void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_fifo) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -629,7 +630,7 @@ void gv11b_blcg_gr_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_gr) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -648,7 +649,7 @@ void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_ltc) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -667,7 +668,7 @@ void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_pwr_csb) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -686,7 +687,7 @@ void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_pmu) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -705,7 +706,7 @@ void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_blcg_xbar) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
@@ -724,7 +725,7 @@ void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g,
u32 i;
u32 size = sizeof(gv11b_pg_gr) / sizeof(struct gating_desc);
if (!g->blcg_enabled)
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {

View File

@@ -133,6 +133,13 @@ struct gk20a_platform t19x_gpu_tegra_platform = {
.probe = gv11b_tegra_probe,
.remove = gv11b_tegra_remove,
.enable_slcg = false,
.enable_blcg = false,
.enable_elcg = false,
.can_slcg = false,
.can_blcg = false,
.can_elcg = false,
/* power management callbacks */
.suspend = gv11b_tegra_suspend,
.railgate = gv11b_tegra_railgate,

View File

@@ -72,6 +72,9 @@ struct gk20a_platform gv11b_vgpu_tegra_platform = {
.enable_elcg = false,
.enable_elpg = false,
.enable_aelpg = false,
.can_slcg = false,
.can_blcg = false,
.can_elcg = false,
.ch_wdt_timeout_ms = 5000,