mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
gpu: nvgpu: skip clk gating prog for sim/emu.
For Simualtion/Emulation platforms,clock gating should be skipped as it is not supported. Added new flags "can_"X"lcg" to check platform capability before doing SLCG,BLCG and ELCG. Bug 200314250 Change-Id: I4124d444a77a4c06df8c1d82c6038bfd457f3db0 Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1566049 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
edb1166613
commit
0e8aee1c1a
@@ -116,6 +116,14 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false;
|
||||
g->can_elpg =
|
||||
nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
|
||||
|
||||
__nvgpu_set_enabled(g, NVGPU_GPU_CAN_ELCG,
|
||||
nvgpu_platform_is_silicon(g) ? platform->can_elcg : false);
|
||||
__nvgpu_set_enabled(g, NVGPU_GPU_CAN_SLCG,
|
||||
nvgpu_platform_is_silicon(g) ? platform->can_slcg : false);
|
||||
__nvgpu_set_enabled(g, NVGPU_GPU_CAN_BLCG,
|
||||
nvgpu_platform_is_silicon(g) ? platform->can_blcg : false);
|
||||
|
||||
g->default_pri_timeout = platform->default_pri_timeout;
|
||||
g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
|
||||
g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
|
||||
|
||||
@@ -79,6 +79,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
||||
.enable_slcg = true,
|
||||
.enable_blcg = true,
|
||||
.enable_mscg = true,
|
||||
.can_slcg = true,
|
||||
.can_blcg = true,
|
||||
.can_elcg = true,
|
||||
.default_pri_timeout = 0x3ff,
|
||||
|
||||
.disable_aspm = true,
|
||||
@@ -112,6 +115,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
||||
.enable_slcg = true,
|
||||
.enable_blcg = true,
|
||||
.enable_mscg = true,
|
||||
.can_slcg = true,
|
||||
.can_blcg = true,
|
||||
.can_elcg = true,
|
||||
.default_pri_timeout = 0x3ff,
|
||||
|
||||
.disable_aspm = true,
|
||||
@@ -145,6 +151,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
||||
.enable_slcg = true,
|
||||
.enable_blcg = true,
|
||||
.enable_mscg = true,
|
||||
.can_slcg = true,
|
||||
.can_blcg = true,
|
||||
.can_elcg = true,
|
||||
.default_pri_timeout = 0x3ff,
|
||||
|
||||
.disable_aspm = true,
|
||||
@@ -178,6 +187,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
||||
.enable_slcg = true,
|
||||
.enable_blcg = true,
|
||||
.enable_mscg = true,
|
||||
.can_slcg = true,
|
||||
.can_blcg = true,
|
||||
.can_elcg = true,
|
||||
.default_pri_timeout = 0x3ff,
|
||||
|
||||
.disable_aspm = true,
|
||||
@@ -211,6 +223,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
||||
.enable_slcg = false,
|
||||
.enable_blcg = false,
|
||||
.enable_mscg = false,
|
||||
.can_slcg = false,
|
||||
.can_blcg = false,
|
||||
.can_elcg = false,
|
||||
.default_pri_timeout = 0x3ff,
|
||||
|
||||
.disable_aspm = true,
|
||||
@@ -241,6 +256,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
||||
.enable_slcg = false,
|
||||
.enable_blcg = false,
|
||||
.enable_mscg = false,
|
||||
.can_slcg = false,
|
||||
.can_blcg = false,
|
||||
.can_elcg = false,
|
||||
.default_pri_timeout = 0x3ff,
|
||||
|
||||
.disable_aspm = true,
|
||||
|
||||
@@ -911,6 +911,9 @@ struct gk20a_platform gm20b_tegra_platform = {
|
||||
.enable_slcg = true,
|
||||
.enable_blcg = true,
|
||||
.enable_elcg = true,
|
||||
.can_slcg = true,
|
||||
.can_blcg = true,
|
||||
.can_elcg = true,
|
||||
.enable_elpg = true,
|
||||
.enable_aelpg = true,
|
||||
.enable_perfmon = true,
|
||||
|
||||
@@ -373,6 +373,9 @@ struct gk20a_platform gp10b_tegra_platform = {
|
||||
.enable_blcg = true,
|
||||
.enable_slcg = true,
|
||||
.enable_elcg = true,
|
||||
.can_slcg = true,
|
||||
.can_blcg = true,
|
||||
.can_elcg = true,
|
||||
.enable_aelpg = true,
|
||||
.enable_perfmon = true,
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2015, NVIDIA Corporation. All rights reserved.
|
||||
* Copyright (c) 2012-2017, NVIDIA Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -26,6 +26,7 @@
|
||||
#define __gk20a_gating_reglist_h__
|
||||
|
||||
#include "gk20a_gating_reglist.h"
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
struct gating_desc {
|
||||
u32 addr;
|
||||
@@ -305,6 +306,10 @@ void gr_gk20a_slcg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gk20a_slcg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gk20a_slcg_gr[i].addr,
|
||||
@@ -325,6 +330,10 @@ void gr_gk20a_slcg_perf_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gk20a_slcg_perf) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gk20a_slcg_perf[i].addr,
|
||||
@@ -340,6 +349,10 @@ void gr_gk20a_blcg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gk20a_blcg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gk20a_blcg_gr[i].addr,
|
||||
@@ -355,6 +368,10 @@ void gr_gk20a_pg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gk20a_pg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gk20a_pg_gr[i].addr,
|
||||
@@ -370,6 +387,10 @@ void gr_gk20a_slcg_therm_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gk20a_slcg_therm) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gk20a_slcg_therm[i].addr,
|
||||
|
||||
@@ -4067,6 +4067,9 @@ void gr_gk20a_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine)
|
||||
{
|
||||
u32 gate_ctrl;
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
|
||||
|
||||
switch (mode) {
|
||||
@@ -4095,6 +4098,9 @@ void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
|
||||
|
||||
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG))
|
||||
return;
|
||||
|
||||
switch (mode) {
|
||||
case ELCG_RUN:
|
||||
gate_ctrl = set_field(gate_ctrl,
|
||||
|
||||
@@ -91,6 +91,15 @@ struct gk20a_platform {
|
||||
/* Engine Level Clock Gating: true = enable flase = disable */
|
||||
bool enable_elcg;
|
||||
|
||||
/* Should be populated at probe. */
|
||||
bool can_slcg;
|
||||
|
||||
/* Should be populated at probe. */
|
||||
bool can_blcg;
|
||||
|
||||
/* Should be populated at probe. */
|
||||
bool can_elcg;
|
||||
|
||||
/* Engine Level Power Gating: true = enable flase = disable */
|
||||
bool enable_elpg;
|
||||
|
||||
|
||||
@@ -57,6 +57,9 @@ struct gk20a_platform vgpu_tegra_platform = {
|
||||
.enable_elcg = false,
|
||||
.enable_elpg = false,
|
||||
.enable_aelpg = false,
|
||||
.can_slcg = false,
|
||||
.can_blcg = false,
|
||||
.can_elcg = false,
|
||||
|
||||
.ch_wdt_timeout_ms = 5000,
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -26,6 +26,7 @@
|
||||
#define __gm20b_gating_reglist_h__
|
||||
|
||||
#include "gm20b_gating_reglist.h"
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
struct gating_desc {
|
||||
u32 addr;
|
||||
@@ -290,6 +291,10 @@ void gm20b_slcg_bus_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_bus) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_bus[i].addr,
|
||||
@@ -305,6 +310,10 @@ void gm20b_slcg_ce2_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_ce2) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_ce2[i].addr,
|
||||
@@ -320,6 +329,10 @@ void gm20b_slcg_chiplet_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_chiplet) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_chiplet[i].addr,
|
||||
@@ -340,6 +353,10 @@ void gm20b_slcg_fb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_fb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_fb[i].addr,
|
||||
@@ -355,6 +372,10 @@ void gm20b_slcg_fifo_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_fifo) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_fifo[i].addr,
|
||||
@@ -370,6 +391,10 @@ void gr_gm20b_slcg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_gr[i].addr,
|
||||
@@ -385,6 +410,10 @@ void ltc_gm20b_slcg_ltc_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_ltc) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_ltc[i].addr,
|
||||
@@ -400,6 +429,10 @@ void gm20b_slcg_perf_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_perf) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_perf[i].addr,
|
||||
@@ -415,6 +448,10 @@ void gm20b_slcg_priring_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_priring) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_priring[i].addr,
|
||||
@@ -430,6 +467,10 @@ void gm20b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_pwr_csb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr,
|
||||
@@ -445,6 +486,10 @@ void gm20b_slcg_pmu_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_pmu) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_pmu[i].addr,
|
||||
@@ -460,6 +505,10 @@ void gm20b_slcg_therm_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_therm) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_therm[i].addr,
|
||||
@@ -475,6 +524,10 @@ void gm20b_slcg_xbar_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_slcg_xbar) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_slcg_xbar[i].addr,
|
||||
@@ -490,6 +543,10 @@ void gm20b_blcg_bus_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_bus) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_bus[i].addr,
|
||||
@@ -505,6 +562,10 @@ void gm20b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_ctxsw_prog[i].addr,
|
||||
@@ -520,6 +581,10 @@ void gm20b_blcg_fb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_fb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_fb[i].addr,
|
||||
@@ -535,6 +600,10 @@ void gm20b_blcg_fifo_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_fifo) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_fifo[i].addr,
|
||||
@@ -550,6 +619,10 @@ void gm20b_blcg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_gr[i].addr,
|
||||
@@ -565,6 +638,10 @@ void gm20b_blcg_ltc_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_ltc) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_ltc[i].addr,
|
||||
@@ -580,6 +657,10 @@ void gm20b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_pwr_csb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_pwr_csb[i].addr,
|
||||
@@ -595,6 +676,10 @@ void gm20b_blcg_pmu_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_pmu) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_pmu[i].addr,
|
||||
@@ -610,6 +695,10 @@ void gm20b_blcg_xbar_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_blcg_xbar) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_blcg_xbar[i].addr,
|
||||
@@ -625,6 +714,10 @@ void gr_gm20b_pg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gm20b_pg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gm20b_pg_gr[i].addr,
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#define __gp106_gating_reglist_h__
|
||||
|
||||
#include "gp106_gating_reglist.h"
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
struct gating_desc {
|
||||
u32 addr;
|
||||
@@ -276,6 +277,10 @@ void gp106_slcg_bus_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_bus) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_bus[i].addr,
|
||||
@@ -291,6 +296,10 @@ void gp106_slcg_ce2_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_ce2) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_ce2[i].addr,
|
||||
@@ -306,6 +315,10 @@ void gp106_slcg_chiplet_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_chiplet) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_chiplet[i].addr,
|
||||
@@ -326,6 +339,10 @@ void gp106_slcg_fb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_fb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_fb[i].addr,
|
||||
@@ -341,6 +358,10 @@ void gp106_slcg_fifo_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_fifo) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_fifo[i].addr,
|
||||
@@ -356,6 +377,10 @@ void gr_gp106_slcg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_gr[i].addr,
|
||||
@@ -371,6 +396,10 @@ void ltc_gp106_slcg_ltc_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_ltc) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_ltc[i].addr,
|
||||
@@ -386,6 +415,10 @@ void gp106_slcg_perf_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_perf) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_perf[i].addr,
|
||||
@@ -401,6 +434,10 @@ void gp106_slcg_priring_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_priring) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_priring[i].addr,
|
||||
@@ -416,6 +453,10 @@ void gp106_slcg_pmu_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_pmu) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_pmu[i].addr,
|
||||
@@ -431,6 +472,10 @@ void gp106_slcg_therm_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_therm) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_therm[i].addr,
|
||||
@@ -446,6 +491,10 @@ void gp106_slcg_xbar_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_slcg_xbar) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_slcg_xbar[i].addr,
|
||||
@@ -461,6 +510,10 @@ void gp106_blcg_bus_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_blcg_bus) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_blcg_bus[i].addr,
|
||||
@@ -476,6 +529,10 @@ void gp106_blcg_ce_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_blcg_ce) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_blcg_ce[i].addr,
|
||||
@@ -491,6 +548,10 @@ void gp106_blcg_fb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_blcg_fb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_blcg_fb[i].addr,
|
||||
@@ -506,6 +567,10 @@ void gp106_blcg_fifo_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_blcg_fifo) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_blcg_fifo[i].addr,
|
||||
@@ -521,6 +586,10 @@ void gp106_blcg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_blcg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_blcg_gr[i].addr,
|
||||
@@ -536,6 +605,10 @@ void gp106_blcg_ltc_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_blcg_ltc) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_blcg_ltc[i].addr,
|
||||
@@ -551,6 +624,10 @@ void gp106_blcg_pmu_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_blcg_pmu) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_blcg_pmu[i].addr,
|
||||
@@ -566,6 +643,10 @@ void gp106_blcg_xbar_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_blcg_xbar) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_blcg_xbar[i].addr,
|
||||
@@ -581,6 +662,10 @@ void gr_gp106_pg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp106_pg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp106_pg_gr[i].addr,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -26,6 +26,7 @@
|
||||
#define __gp10b_gating_reglist_h__
|
||||
|
||||
#include "gp10b_gating_reglist.h"
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
struct gating_desc {
|
||||
u32 addr;
|
||||
@@ -281,6 +282,10 @@ void gp10b_slcg_bus_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_bus) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_bus[i].addr,
|
||||
@@ -296,6 +301,10 @@ void gp10b_slcg_ce2_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_ce2) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_ce2[i].addr,
|
||||
@@ -311,6 +320,10 @@ void gp10b_slcg_chiplet_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_chiplet) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_chiplet[i].addr,
|
||||
@@ -331,6 +344,10 @@ void gp10b_slcg_fb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_fb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_fb[i].addr,
|
||||
@@ -346,6 +363,10 @@ void gp10b_slcg_fifo_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_fifo) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_fifo[i].addr,
|
||||
@@ -361,6 +382,10 @@ void gr_gp10b_slcg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_gr[i].addr,
|
||||
@@ -376,6 +401,10 @@ void ltc_gp10b_slcg_ltc_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_ltc) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_ltc[i].addr,
|
||||
@@ -391,6 +420,10 @@ void gp10b_slcg_perf_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_perf) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_perf[i].addr,
|
||||
@@ -406,6 +439,10 @@ void gp10b_slcg_priring_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_priring) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_priring[i].addr,
|
||||
@@ -421,6 +458,10 @@ void gp10b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_pwr_csb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_pwr_csb[i].addr,
|
||||
@@ -436,6 +477,10 @@ void gp10b_slcg_pmu_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_pmu) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_pmu[i].addr,
|
||||
@@ -451,6 +496,10 @@ void gp10b_slcg_therm_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_therm) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_therm[i].addr,
|
||||
@@ -466,6 +515,10 @@ void gp10b_slcg_xbar_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_slcg_xbar) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_slcg_xbar[i].addr,
|
||||
@@ -481,6 +534,10 @@ void gp10b_blcg_bus_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_bus) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_bus[i].addr,
|
||||
@@ -496,6 +553,10 @@ void gp10b_blcg_ce_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_ce) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_ce[i].addr,
|
||||
@@ -511,6 +572,10 @@ void gp10b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_ctxsw_prog[i].addr,
|
||||
@@ -526,6 +591,10 @@ void gp10b_blcg_fb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_fb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_fb[i].addr,
|
||||
@@ -541,6 +610,10 @@ void gp10b_blcg_fifo_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_fifo) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_fifo[i].addr,
|
||||
@@ -556,6 +629,10 @@ void gp10b_blcg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_gr[i].addr,
|
||||
@@ -571,6 +648,10 @@ void gp10b_blcg_ltc_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_ltc) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_ltc[i].addr,
|
||||
@@ -586,6 +667,10 @@ void gp10b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_pwr_csb) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_pwr_csb[i].addr,
|
||||
@@ -601,6 +686,10 @@ void gp10b_blcg_pmu_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_pmu) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_pmu[i].addr,
|
||||
@@ -616,6 +705,10 @@ void gp10b_blcg_xbar_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_blcg_xbar) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_blcg_xbar[i].addr,
|
||||
@@ -631,6 +724,10 @@ void gr_gp10b_pg_gr_load_gating_prod(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
u32 size = sizeof(gp10b_pg_gr) / sizeof(struct gating_desc);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (prod)
|
||||
gk20a_writel(g, gp10b_pg_gr[i].addr,
|
||||
|
||||
@@ -58,6 +58,9 @@ struct gk20a;
|
||||
#define NVGPU_PMU_PSTATE 49
|
||||
#define NVGPU_PMU_ZBC_SAVE 50
|
||||
#define NVGPU_PMU_FECS_BOOTSTRAP_DONE 51
|
||||
#define NVGPU_GPU_CAN_BLCG 52
|
||||
#define NVGPU_GPU_CAN_SLCG 53
|
||||
#define NVGPU_GPU_CAN_ELCG 54
|
||||
|
||||
/* whether to run PREOS binary on dGPUs */
|
||||
#define NVGPU_PMU_RUN_PREOS 52
|
||||
|
||||
Reference in New Issue
Block a user