gpu: nvgpu: add CONFIG_NVGPU_TEGRA_FUSE

Encapsulate the tegra fuse functionality under the config flag
CONFIG_NVGPU_TEGRA_FUSE.

Bug 2834141

Change-Id: I54c9e82360e8a24008ea14eb55af80f81d325cdc
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2306432
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2020-02-28 21:25:59 +05:30
committed by Alex Waterman
parent 3748be5792
commit 59c6947fc6
30 changed files with 209 additions and 36 deletions

View File

@@ -225,3 +225,10 @@ config NVGPU_VPR
default y default y
help help
Support for NVGPU VPR Support for NVGPU VPR
config NVGPU_TEGRA_FUSE
bool "Tegra fuse Support"
depends on GK20A
default y
help
Support Tegra fuse

View File

@@ -428,12 +428,15 @@ endif
nvgpu-$(CONFIG_TEGRA_GK20A) += \ nvgpu-$(CONFIG_TEGRA_GK20A) += \
os/linux/module.o \ os/linux/module.o \
os/linux/module_usermode.o \ os/linux/module_usermode.o \
os/linux/soc.o \
os/linux/fuse.o \
os/linux/platform_gk20a_tegra.o \ os/linux/platform_gk20a_tegra.o \
os/linux/platform_gp10b_tegra.o \ os/linux/platform_gp10b_tegra.o \
os/linux/platform_gv11b_tegra.o os/linux/platform_gv11b_tegra.o
ifeq ($(CONFIG_TEGRA_GK20A),y)
nvgpu-$(CONFIG_NVGPU_TEGRA_FUSE) += os/linux/fuse.o \
os/linux/soc.o
endif
nvgpu-$(CONFIG_SYNC) += \ nvgpu-$(CONFIG_SYNC) += \
os/linux/sync_sema_android.o \ os/linux/sync_sema_android.o \
os/linux/os_fence_android.o \ os/linux/os_fence_android.o \

View File

@@ -84,6 +84,9 @@ NVGPU_COMMON_CFLAGS += \
endif endif
CONFIG_NVGPU_TEGRA_FUSE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_TEGRA_FUSE
# #
# Flags enabled only for safety debug and regular build profile. # Flags enabled only for safety debug and regular build profile.
# #

View File

@@ -31,7 +31,6 @@ srcs += os/posix/nvgpu.c \
os/posix/firmware.c \ os/posix/firmware.c \
os/posix/soc.c \ os/posix/soc.c \
os/posix/error_notifier.c \ os/posix/error_notifier.c \
os/posix/fuse.c \
os/posix/posix-channel.c \ os/posix/posix-channel.c \
os/posix/posix-tsg.c \ os/posix/posix-tsg.c \
os/posix/stubs.c \ os/posix/stubs.c \
@@ -43,6 +42,10 @@ ifdef CONFIG_NVGPU_VPR
srcs += os/posix/posix-vpr.c srcs += os/posix/posix-vpr.c
endif endif
ifdef CONFIG_NVGPU_TEGRA_FUSE
srcs += os/posix/fuse.c
endif
ifdef CONFIG_NVGPU_FECS_TRACE ifdef CONFIG_NVGPU_FECS_TRACE
srcs += os/posix/fecs_trace_posix.c srcs += os/posix/fecs_trace_posix.c
endif endif

View File

@@ -2056,6 +2056,7 @@ PREDEFINED += CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
PREDEFINED += NVGPU_IGPU_ISOLATION_SUPPORT PREDEFINED += NVGPU_IGPU_ISOLATION_SUPPORT
PREDEFINED += CONFIG_NVGPU_ISOLATION_SUPPORT PREDEFINED += CONFIG_NVGPU_ISOLATION_SUPPORT
PREDEFINED += NVCPU_IS_AARCH64 PREDEFINED += NVCPU_IS_AARCH64
PREDEFINED += CONFIG_NVGPU_TEGRA_FUSE
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The # tag can be used to specify a list of macro names that should be expanded. The

View File

@@ -301,13 +301,6 @@ static int nvgpu_fuse_calib_gpcpll_get_adc(struct gk20a *g,
return 0; return 0;
} }
#ifdef CONFIG_TEGRA_USE_NA_GPCPLL
static bool nvgpu_fuse_can_use_na_gpcpll(struct gk20a *g)
{
return nvgpu_tegra_get_gpu_speedo_id(g);
}
#endif
/* /*
* Read ADC characteristic parmeters from fuses. * Read ADC characteristic parmeters from fuses.
* Determine clibration settings. * Determine clibration settings.
@@ -1179,11 +1172,37 @@ struct pll_parms *gm20b_get_gpc_pll_parms(void)
return &gpc_pll_params; return &gpc_pll_params;
} }
#ifdef CONFIG_TEGRA_USE_NA_GPCPLL
static int nvgpu_fuse_can_use_na_gpcpll(struct gk20a *g, int *id)
{
return nvgpu_tegra_get_gpu_speedo_id(g, id);
}
static int nvgpu_clk_set_na_gpcpll(struct gk20a *g)
{
struct clk_gk20a *clk = &g->clk;
int speedo_id;
int err;
err = nvgpu_fuse_can_use_na_gpcpll(g, &speedo_id);
if (err == 0) {
/* NA mode is supported only at max update rate 38.4 MHz */
if (speedo_id) {
WARN_ON(clk->gpc_pll.clk_in != gpc_pll_params.max_u);
clk->gpc_pll.mode = GPC_PLL_MODE_DVFS;
gpc_pll_params.min_u = gpc_pll_params.max_u;
}
}
return err;
}
#endif
int gm20b_init_clk_setup_sw(struct gk20a *g) int gm20b_init_clk_setup_sw(struct gk20a *g)
{ {
struct clk_gk20a *clk = &g->clk; struct clk_gk20a *clk = &g->clk;
unsigned long safe_rate; unsigned long safe_rate;
int err; int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -1236,11 +1255,10 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
*/ */
clk_config_calibration_params(g); clk_config_calibration_params(g);
#ifdef CONFIG_TEGRA_USE_NA_GPCPLL #ifdef CONFIG_TEGRA_USE_NA_GPCPLL
if (nvgpu_fuse_can_use_na_gpcpll(g)) { err = nvgpu_clk_set_na_gpcpll(g);
/* NA mode is supported only at max update rate 38.4 MHz */ if (err != 0) {
BUG_ON(clk->gpc_pll.clk_in != gpc_pll_params.max_u); nvgpu_err(g, "NA GPCPLL fuse info. not available");
clk->gpc_pll.mode = GPC_PLL_MODE_DVFS; goto fail;
gpc_pll_params.min_u = gpc_pll_params.max_u;
} }
#endif #endif

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B GPC MMU * GM20B GPC MMU
* *
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -214,6 +214,7 @@ void gr_gm20b_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
g->ops.gr.ctxsw_prog.hw_get_perf_counter_control_register_stride(); g->ops.gr.ctxsw_prog.hw_get_perf_counter_control_register_stride();
} }
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
{ {
nvgpu_tegra_fuse_write_bypass(g, 0x1); nvgpu_tegra_fuse_write_bypass(g, 0x1);
@@ -231,6 +232,7 @@ void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
nvgpu_tegra_fuse_write_opt_gpu_tpc1_disable(g, 0x0); nvgpu_tegra_fuse_write_opt_gpu_tpc1_disable(g, 0x0);
} }
} }
#endif
static bool gr_gm20b_is_tpc_addr_shared(struct gk20a *g, u32 addr) static bool gr_gm20b_is_tpc_addr_shared(struct gk20a *g, u32 addr)
{ {

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B GPC MMU * GM20B GPC MMU
* *
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -48,7 +48,9 @@ void gr_gm20b_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
u32 *num_sm_dsm_perf_ctrl_regs, u32 *num_sm_dsm_perf_ctrl_regs,
u32 **sm_dsm_perf_ctrl_regs, u32 **sm_dsm_perf_ctrl_regs,
u32 *ctrl_register_stride); u32 *ctrl_register_stride);
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index); void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
#endif
bool gr_gm20b_is_tpc_addr(struct gk20a *g, u32 addr); bool gr_gm20b_is_tpc_addr(struct gk20a *g, u32 addr);
u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr); u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr);
int gr_gm20b_dump_gr_status_regs(struct gk20a *g, int gr_gm20b_dump_gr_status_regs(struct gk20a *g,

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B GPU GR * GP10B GPU GR
* *
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -365,6 +365,7 @@ int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
return 0; return 0;
} }
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
{ {
nvgpu_tegra_fuse_write_bypass(g, 0x1); nvgpu_tegra_fuse_write_bypass(g, 0x1);
@@ -379,6 +380,7 @@ void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, 0x0); nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, 0x0);
} }
} }
#endif
static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel *fault_ch) static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel *fault_ch)
{ {

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B GPU GR * GP10B GPU GR
* *
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -46,7 +46,9 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data); void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data);
int gr_gp10b_dump_gr_status_regs(struct gk20a *g, int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
struct nvgpu_debug_context *o); struct nvgpu_debug_context *o);
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index); void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
#endif
int gr_gp10b_pre_process_sm_exception(struct gk20a *g, int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
bool sm_debugger_attached, struct nvgpu_channel *fault_ch, bool sm_debugger_attached, struct nvgpu_channel *fault_ch,

View File

@@ -1,7 +1,7 @@
/* /*
* GV100 GPU GR * GV100 GPU GR
* *
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -40,9 +40,11 @@
#include <nvgpu/hw/gv100/hw_gr_gv100.h> #include <nvgpu/hw/gv100/hw_gr_gv100.h>
#include <nvgpu/hw/gv100/hw_perf_gv100.h> #include <nvgpu/hw/gv100/hw_perf_gv100.h>
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
{ {
} }
#endif
static u32 gr_gv100_get_active_fbpa_mask(struct gk20a *g) static u32 gr_gv100_get_active_fbpa_mask(struct gk20a *g)
{ {

View File

@@ -1,7 +1,7 @@
/* /*
* GV100 GPU GR * GV100 GPU GR
* *
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -31,7 +31,9 @@
struct gk20a; struct gk20a;
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index); void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
#endif
void gr_gv100_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr, void gr_gv100_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
u32 num_fbpas, u32 num_fbpas,
u32 *priv_addr_table, u32 *t); u32 *priv_addr_table, u32 *t);

View File

@@ -413,6 +413,7 @@ int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
return 0; return 0;
} }
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
{ {
u32 fuse_val; u32 fuse_val;
@@ -437,6 +438,7 @@ void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, fuse_val); nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, fuse_val);
} }
#endif
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g, static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g,

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B GPU GR * GV11B GPU GR
* *
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -35,7 +35,9 @@ void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data); void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data);
int gr_gv11b_dump_gr_status_regs(struct gk20a *g, int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
struct nvgpu_debug_context *o); struct nvgpu_debug_context *o);
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index); void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
#endif
int gr_gv11b_pre_process_sm_exception(struct gk20a *g, int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
bool sm_debugger_attached, struct nvgpu_channel *fault_ch, bool sm_debugger_attached, struct nvgpu_channel *fault_ch,

View File

@@ -200,7 +200,9 @@ static const struct gpu_ops gm20b_ops = {
.set_circular_buffer_size = gr_gm20b_set_circular_buffer_size, .set_circular_buffer_size = gr_gm20b_set_circular_buffer_size,
.get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs, .get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs, .get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
#ifdef CONFIG_NVGPU_TEGRA_FUSE
.set_gpc_tpc_mask = gr_gm20b_set_gpc_tpc_mask, .set_gpc_tpc_mask = gr_gm20b_set_gpc_tpc_mask,
#endif
.is_tpc_addr = gr_gm20b_is_tpc_addr, .is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num, .get_tpc_num = gr_gm20b_get_tpc_num,
.dump_gr_regs = gr_gm20b_dump_gr_status_regs, .dump_gr_regs = gr_gm20b_dump_gr_status_regs,

View File

@@ -248,7 +248,9 @@ static const struct gpu_ops gp10b_ops = {
.set_circular_buffer_size = gr_gp10b_set_circular_buffer_size, .set_circular_buffer_size = gr_gp10b_set_circular_buffer_size,
.get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs, .get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs, .get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
#ifdef CONFIG_NVGPU_TEGRA_FUSE
.set_gpc_tpc_mask = gr_gp10b_set_gpc_tpc_mask, .set_gpc_tpc_mask = gr_gp10b_set_gpc_tpc_mask,
#endif
.is_tpc_addr = gr_gm20b_is_tpc_addr, .is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num, .get_tpc_num = gr_gm20b_get_tpc_num,
.dump_gr_regs = gr_gp10b_dump_gr_status_regs, .dump_gr_regs = gr_gp10b_dump_gr_status_regs,

View File

@@ -310,7 +310,9 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
.set_circular_buffer_size = gr_gv11b_set_circular_buffer_size, .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size,
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs, .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs, .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
#ifdef CONFIG_NVGPU_TEGRA_FUSE
.set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask, .set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask,
#endif
.is_tpc_addr = gr_gm20b_is_tpc_addr, .is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num, .get_tpc_num = gr_gm20b_get_tpc_num,
.dump_gr_regs = gr_gv11b_dump_gr_status_regs, .dump_gr_regs = gr_gv11b_dump_gr_status_regs,

View File

@@ -333,7 +333,9 @@ static const struct gpu_ops tu104_ops = {
.set_circular_buffer_size = gr_gv11b_set_circular_buffer_size, .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size,
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs, .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gr_tu104_get_sm_dsm_perf_ctrl_regs, .get_sm_dsm_perf_ctrl_regs = gr_tu104_get_sm_dsm_perf_ctrl_regs,
#ifdef CONFIG_NVGPU_TEGRA_FUSE
.set_gpc_tpc_mask = gr_gv100_set_gpc_tpc_mask, .set_gpc_tpc_mask = gr_gv100_set_gpc_tpc_mask,
#endif
.is_tpc_addr = gr_gm20b_is_tpc_addr, .is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num, .get_tpc_num = gr_gm20b_get_tpc_num,
.dump_gr_regs = gr_gv11b_dump_gr_status_regs, .dump_gr_regs = gr_gv11b_dump_gr_status_regs,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -181,7 +181,9 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.set_circular_buffer_size = NULL, .set_circular_buffer_size = NULL,
.get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs, .get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs, .get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
#ifdef CONFIG_NVGPU_TEGRA_FUSE
.set_gpc_tpc_mask = NULL, .set_gpc_tpc_mask = NULL,
#endif
.is_tpc_addr = gr_gm20b_is_tpc_addr, .is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num, .get_tpc_num = gr_gm20b_get_tpc_num,
.dump_gr_regs = NULL, .dump_gr_regs = NULL,

View File

@@ -237,7 +237,9 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.set_circular_buffer_size = NULL, .set_circular_buffer_size = NULL,
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs, .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs, .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
#ifdef CONFIG_NVGPU_TEGRA_FUSE
.set_gpc_tpc_mask = NULL, .set_gpc_tpc_mask = NULL,
#endif
.is_tpc_addr = gr_gm20b_is_tpc_addr, .is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num, .get_tpc_num = gr_gm20b_get_tpc_num,
.dump_gr_regs = NULL, .dump_gr_regs = NULL,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -30,10 +30,14 @@
struct gk20a; struct gk20a;
#include <nvgpu/types.h> #include <nvgpu/types.h>
#include <nvgpu/errno.h>
#ifdef CONFIG_NVGPU_TEGRA_FUSE
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
int nvgpu_tegra_get_gpu_speedo_id(struct gk20a *g); int nvgpu_tegra_get_gpu_speedo_id(struct gk20a *g, int *id);
#endif int nvgpu_tegra_fuse_read_reserved_calib(struct gk20a *g, u32 *val);
#endif /* CONFIG_NVGPU_NON_FUSA */
/** /**
* @brief - Write Fuse bypass register which controls fuse bypass. * @brief - Write Fuse bypass register which controls fuse bypass.
@@ -97,7 +101,50 @@ void nvgpu_tegra_fuse_write_opt_gpu_tpc1_disable(struct gk20a *g, u32 val);
*/ */
int nvgpu_tegra_fuse_read_gcplex_config_fuse(struct gk20a *g, u32 *val); int nvgpu_tegra_fuse_read_gcplex_config_fuse(struct gk20a *g, u32 *val);
#else /* CONFIG_NVGPU_TEGRA_FUSE */
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
int nvgpu_tegra_fuse_read_reserved_calib(struct gk20a *g, u32 *val); static inline int nvgpu_tegra_get_gpu_speedo_id(struct gk20a *g, int *id)
#endif {
return -EINVAL;
}
static inline int nvgpu_tegra_fuse_read_reserved_calib(struct gk20a *g,
u32 *val)
{
return -EINVAL;
}
#endif /* CONFIG_NVGPU_NON_FUSA */
static inline void nvgpu_tegra_fuse_write_bypass(struct gk20a *g, u32 val)
{
}
static inline void nvgpu_tegra_fuse_write_access_sw(struct gk20a *g, u32 val)
{
}
static inline void nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(struct gk20a *g,
u32 val)
{
}
static inline void nvgpu_tegra_fuse_write_opt_gpu_tpc1_disable(struct gk20a *g,
u32 val)
{
}
static inline int nvgpu_tegra_fuse_read_gcplex_config_fuse(struct gk20a *g,
u32 *val)
{
/*
* Setting gcplex_config fuse to wpr_enabled/vpr_auto_fetch_disable
* by default that is expected on the production chip.
*/
*val = 0x4;
return 0;
}
#endif /* CONFIG_NVGPU_TEGRA_FUSE */
#endif /* NVGPU_FUSE_H */ #endif /* NVGPU_FUSE_H */

View File

@@ -1065,7 +1065,9 @@ struct gops_gr {
void (*get_ovr_perf_regs)(struct gk20a *g, void (*get_ovr_perf_regs)(struct gk20a *g,
u32 *num_ovr_perf_regs, u32 *num_ovr_perf_regs,
u32 **ovr_perf_regsr); u32 **ovr_perf_regsr);
#ifdef CONFIG_NVGPU_TEGRA_FUSE
void (*set_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index); void (*set_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index);
#endif
int (*decode_egpc_addr)(struct gk20a *g, int (*decode_egpc_addr)(struct gk20a *g,
u32 addr, enum ctxsw_addr_type *addr_type, u32 addr, enum ctxsw_addr_type *addr_type,
u32 *gpc_num, u32 *tpc_num, u32 *gpc_num, u32 *tpc_num,

View File

@@ -26,6 +26,7 @@
struct gk20a; struct gk20a;
#ifdef CONFIG_NVGPU_TEGRA_FUSE
/** /**
* @brief Check whether running on silicon or not. * @brief Check whether running on silicon or not.
* *
@@ -114,4 +115,42 @@ bool nvgpu_is_soc_t194_a01(struct gk20a *g);
*/ */
int nvgpu_init_soc_vars(struct gk20a *g); int nvgpu_init_soc_vars(struct gk20a *g);
#else /* CONFIG_NVGPU_TEGRA_FUSE */
static inline bool nvgpu_platform_is_silicon(struct gk20a *g)
{
return true;
}
static inline bool nvgpu_platform_is_simulation(struct gk20a *g)
{
return false;
}
static inline bool nvgpu_platform_is_fpga(struct gk20a *g)
{
return false;
}
static inline bool nvgpu_is_hypervisor_mode(struct gk20a *g)
{
return false;
}
static inline bool nvgpu_is_bpmp_running(struct gk20a *g)
{
return false;
}
static inline bool nvgpu_is_soc_t194_a01(struct gk20a *g)
{
return false;
}
static inline int nvgpu_init_soc_vars(struct gk20a *g)
{
return 0;
}
#endif /* CONFIG_NVGPU_TEGRA_FUSE */
#endif /* NVGPU_SOC_H */ #endif /* NVGPU_SOC_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -15,9 +15,11 @@
#include <nvgpu/fuse.h> #include <nvgpu/fuse.h>
int nvgpu_tegra_get_gpu_speedo_id(struct gk20a *g) int nvgpu_tegra_get_gpu_speedo_id(struct gk20a *g, int *id)
{ {
return tegra_sku_info.gpu_speedo_id; *id = tegra_sku_info.gpu_speedo_id;
return 0;
} }
/* /*

View File

@@ -31,11 +31,13 @@
#include <linux/of_gpio.h> #include <linux/of_gpio.h>
#include <uapi/linux/nvgpu.h> #include <uapi/linux/nvgpu.h>
#ifdef CONFIG_NVGPU_TEGRA_FUSE
#include <dt-bindings/soc/gm20b-fuse.h> #include <dt-bindings/soc/gm20b-fuse.h>
#include <dt-bindings/soc/gp10b-fuse.h> #include <dt-bindings/soc/gp10b-fuse.h>
#include <dt-bindings/soc/gv11b-fuse.h> #include <dt-bindings/soc/gv11b-fuse.h>
#include <soc/tegra/fuse.h> #include <soc/tegra/fuse.h>
#endif /* CONFIG_NVGPU_TEGRA_FUSE */
#include <nvgpu/hal_init.h> #include <nvgpu/hal_init.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
@@ -1134,9 +1136,12 @@ static int gk20a_pm_railgate(struct device *dev)
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
g->pstats.last_rail_gate_complete = jiffies; g->pstats.last_rail_gate_complete = jiffies;
#endif #endif
#ifdef CONFIG_NVGPU_TEGRA_FUSE
ret = tegra_fuse_clock_disable(); ret = tegra_fuse_clock_disable();
if (ret) if (ret)
nvgpu_err(g, "failed to disable tegra fuse clock, err=%d", ret); nvgpu_err(g, "failed to disable tegra fuse clock, err=%d", ret);
#endif
return ret; return ret;
} }
@@ -1151,11 +1156,14 @@ static int gk20a_pm_unrailgate(struct device *dev)
if (!platform->unrailgate) if (!platform->unrailgate)
return 0; return 0;
#ifdef CONFIG_NVGPU_TEGRA_FUSE
ret = tegra_fuse_clock_enable(); ret = tegra_fuse_clock_enable();
if (ret) { if (ret) {
nvgpu_err(g, "failed to enable tegra fuse clock, err=%d", ret); nvgpu_err(g, "failed to enable tegra fuse clock, err=%d", ret);
return ret; return ret;
} }
#endif
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
g->pstats.last_rail_ungate_start = jiffies; g->pstats.last_rail_ungate_start = jiffies;
if (g->pstats.railgating_cycle_count >= 1) if (g->pstats.railgating_cycle_count >= 1)
@@ -1505,6 +1513,7 @@ static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a)
static int nvgpu_read_fuse_overrides(struct gk20a *g) static int nvgpu_read_fuse_overrides(struct gk20a *g)
{ {
#ifdef CONFIG_NVGPU_TEGRA_FUSE
struct device_node *np = nvgpu_get_node(g); struct device_node *np = nvgpu_get_node(g);
struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g)); struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
u32 *fuses; u32 *fuses;
@@ -1545,7 +1554,7 @@ static int nvgpu_read_fuse_overrides(struct gk20a *g)
} }
nvgpu_kfree(g, fuses); nvgpu_kfree(g, fuses);
#endif
return 0; return 0;
} }

View File

@@ -36,7 +36,9 @@
#endif #endif
#include <linux/platform/tegra/tegra_emc.h> #include <linux/platform/tegra/tegra_emc.h>
#ifdef CONFIG_NVGPU_TEGRA_FUSE
#include <soc/tegra/chip-id.h> #include <soc/tegra/chip-id.h>
#endif
#include <nvgpu/kmem.h> #include <nvgpu/kmem.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
@@ -830,8 +832,10 @@ static int gk20a_tegra_probe(struct device *dev)
dev_warn(dev, "board does not support scaling"); dev_warn(dev, "board does not support scaling");
} }
platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_B1; platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_B1;
#ifdef CONFIG_NVGPU_TEGRA_FUSE
if (tegra_chip_get_revision() > TEGRA210_REVISION_A04p) if (tegra_chip_get_revision() > TEGRA210_REVISION_A04p)
platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_C1; platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_C1;
#endif
} }
if (platform->platform_chip_id == TEGRA_132) if (platform->platform_chip_id == TEGRA_132)

View File

@@ -18,7 +18,6 @@
#include <linux/devfreq.h> #include <linux/devfreq.h>
#include <linux/export.h> #include <linux/export.h>
#include <soc/tegra/chip-id.h>
#include <linux/pm_qos.h> #include <linux/pm_qos.h>
#include <governor.h> #include <governor.h>

View File

@@ -890,6 +890,7 @@ static DEVICE_ATTR(tpc_pg_mask, ROOTRW, tpc_pg_mask_read, tpc_pg_mask_store);
static ssize_t tpc_fs_mask_store(struct device *dev, static ssize_t tpc_fs_mask_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count) struct device_attribute *attr, const char *buf, size_t count)
{ {
#ifdef CONFIG_NVGPU_TEGRA_FUSE
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g); struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image =
@@ -924,6 +925,9 @@ static ssize_t tpc_fs_mask_store(struct device *dev,
} }
return count; return count;
#else
return -ENODEV;
#endif
} }
static ssize_t tpc_fs_mask_read(struct device *dev, static ssize_t tpc_fs_mask_read(struct device *dev,

View File

@@ -22,7 +22,9 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/pm_qos.h> #include <linux/pm_qos.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#ifdef CONFIG_NVGPU_TEGRA_FUSE
#include <soc/tegra/chip-id.h> #include <soc/tegra/chip-id.h>
#endif
#include <nvgpu/kmem.h> #include <nvgpu/kmem.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
@@ -357,8 +359,10 @@ int vgpu_probe(struct platform_device *pdev)
} }
l->dev = dev; l->dev = dev;
#ifdef CONFIG_NVGPU_TEGRA_FUSE
if (tegra_platform_is_vdk()) if (tegra_platform_is_vdk())
nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true); nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
#endif
gk20a->is_virtual = true; gk20a->is_virtual = true;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,7 +27,7 @@
#include <nvgpu/posix/soc_fuse.h> #include <nvgpu/posix/soc_fuse.h>
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
int nvgpu_tegra_get_gpu_speedo_id(struct gk20a *g) int nvgpu_tegra_get_gpu_speedo_id(struct gk20a *g, int *id)
{ {
return 0; return 0;
} }