gpu: nvgpu: rename feature Make and C flags

Name the Make and C flag variables consistently wih syntax:
CONFIG_NVGPU_<feature name>

s/NVGPU_DEBUGGER/CONFIG_NVGPU_DEBUGGER
s/NVGPU_CYCLESTATS/CONFIG_NVGPU_CYCLESTATS
s/NVGPU_USERD/CONFIG_NVGPU_USERD
s/NVGPU_CHANNEL_WDT/CONFIG_NVGPU_CHANNEL_WDT
s/NVGPU_FEATURE_CE/CONFIG_NVGPU_CE
s/NVGPU_GRAPHICS/CONFIG_NVGPU_GRAPHICS
s/NVGPU_ENGINE/CONFIG_NVGPU_FIFO_ENGINE_ACTIVITY
s/NVGPU_FEATURE_CHANNEL_TSG_SCHED/CONFIG_NVGPU_CHANNEL_TSG_SCHED
s/NVGPU_FEATURE_CHANNEL_TSG_CONTROL/CONFIG_NVGPU_CHANNEL_TSG_CONTROL
s/NVGPU_FEATURE_ENGINE_QUEUE/CONFIG_NVGPU_ENGINE_QUEUE
s/GK20A_CTXSW_TRACE/CONFIG_NVGPU_FECS_TRACE
s/IGPU_VIRT_SUPPORT/CONFIG_NVGPU_IGPU_VIRT
s/CONFIG_TEGRA_NVLINK/CONFIG_NVGPU_NVLINK
s/NVGPU_DGPU_SUPPORT/CONFIG_NVGPU_DGPU
s/NVGPU_VPR/CONFIG_NVGPU_VPR
s/NVGPU_REPLAYABLE_FAULT/CONFIG_NVGPU_REPLAYABLE_FAULT
s/NVGPU_FEATURE_LS_PMU/CONFIG_NVGPU_LS_PMU
s/NVGPU_FEATURE_POWER_PG/CONFIG_NVGPU_POWER_PG

JIRA NVGPU-3624

Change-Id: I8b2492b085095fc6ee95926d8f8c3929702a1773
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2130290
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-06-09 15:31:49 +05:30
committed by mobile promotions
parent cc3b0467db
commit 3f08cf8a48
151 changed files with 706 additions and 699 deletions

View File

@@ -58,19 +58,19 @@ config NVGPU_TRACK_MEM_USAGE
to other OSes which do not have Linux' kmem_leak. to other OSes which do not have Linux' kmem_leak.
config GK20A_CYCLE_STATS config NVGPU_CYCLESTATS
bool "Support GK20A GPU CYCLE STATS" bool "Support GPU CYCLE STATS"
depends on GK20A depends on GK20A
default y default y
help help
Say Y here to enable the cycle stats debugging features. Say Y here to enable the cycle stats debugging features.
config GK20A_CTXSW_TRACE config NVGPU_FECS_TRACE
bool "Support GK20A Context Switch tracing" bool "Support NVGPU FECS Context Switch tracing"
depends on GK20A depends on GK20A
default y default y
help help
Enable support for the GK20A Context Switch Tracing. In this mode, Enable support for the NVGPU Context Switch Tracing. In this mode,
FECS collects timestamps for contexts loaded on GR engine. This FECS collects timestamps for contexts loaded on GR engine. This
allows tracking context switches on GR engine, as well as allows tracking context switches on GR engine, as well as
identifying processes that submitted work. identifying processes that submitted work.
@@ -168,7 +168,7 @@ config NVGPU_DEBUGGER
help help
Support for debugger APIs Support for debugger APIs
config NVGPU_FEATURE_LS_PMU config NVGPU_LS_PMU
bool "LS PMU support" bool "LS PMU support"
depends on GK20A depends on GK20A
default y default y

View File

@@ -18,23 +18,27 @@ endif
ccflags-y += -D__NVGPU_PREVENT_UNTRUSTED_SPECULATION ccflags-y += -D__NVGPU_PREVENT_UNTRUSTED_SPECULATION
ifeq ($(CONFIG_NVGPU_DEBUGGER),y) ifeq ($(CONFIG_NVGPU_DEBUGGER),y)
ccflags-y += -DNVGPU_DEBUGGER ccflags-y += -DCONFIG_NVGPU_DEBUGGER
endif endif
ccflags-y += -DNVGPU_FEATURE_ACR_LEGACY ifeq ($(CONFIG_TEGRA_NVLINK),y)
ccflags-y += -DNVGPU_FEATURE_ENGINE_QUEUE ccflags-y += -DCONFIG_NVGPU_NVLINK
ccflags-y += -DNVGPU_ENGINE endif
ccflags-y += -DNVGPU_USERD
ccflags-y += -DNVGPU_CHANNEL_WDT ccflags-y += -DCONFIG_NVGPU_ACR_LEGACY
ccflags-y += -DNVGPU_FEATURE_LS_PMU ccflags-y += -DCONFIG_NVGPU_ENGINE_QUEUE
ccflags-y += -DNVGPU_DGPU_SUPPORT ccflags-y += -DCONFIG_NVGPU_FIFO_ENGINE_ACTIVITY
ccflags-y += -DNVGPU_VPR ccflags-y += -DCONFIG_NVGPU_USERD
ccflags-y += -DNVGPU_REPLAYABLE_FAULT ccflags-y += -DCONFIG_NVGPU_CHANNEL_WDT
ccflags-y += -DNVGPU_GRAPHICS ccflags-y += -DCONFIG_NVGPU_LS_PMU
ccflags-y += -DNVGPU_FEATURE_CHANNEL_TSG_SCHEDULING ccflags-y += -DCONFIG_NVGPU_DGPU
ccflags-y += -DNVGPU_FEATURE_CHANNEL_TSG_CONTROL ccflags-y += -DCONFIG_NVGPU_VPR
ccflags-y += -DNVGPU_FEATURE_POWER_PG ccflags-y += -DCONFIG_NVGPU_REPLAYABLE_FAULT
ccflags-y += -DNVGPU_FEATURE_CE ccflags-y += -DCONFIG_NVGPU_GRAPHICS
ccflags-y += -DCONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
ccflags-y += -DCONFIG_NVGPU_CHANNEL_TSG_CONTROL
ccflags-y += -DCONFIG_NVGPU_POWER_PG
ccflags-y += -DCONFIG_NVGPU_CE
obj-$(CONFIG_GK20A) := nvgpu.o obj-$(CONFIG_GK20A) := nvgpu.o
@@ -438,13 +442,13 @@ nvgpu-$(CONFIG_DEBUG_FS) += \
os/linux/debug_kmem.o os/linux/debug_kmem.o
endif endif
nvgpu-$(CONFIG_GK20A_CTXSW_TRACE) += \ nvgpu-$(CONFIG_NVGPU_FECS_TRACE) += \
common/gr/fecs_trace.o \ common/gr/fecs_trace.o \
hal/gr/fecs_trace/fecs_trace_gm20b.o \ hal/gr/fecs_trace/fecs_trace_gm20b.o \
hal/gr/fecs_trace/fecs_trace_gv11b.o \ hal/gr/fecs_trace/fecs_trace_gv11b.o \
os/linux/fecs_trace_linux.o os/linux/fecs_trace_linux.o
ifeq ($(CONFIG_GK20A_CTXSW_TRACE),y) ifeq ($(CONFIG_NVGPU_FECS_TRACE),y)
nvgpu-$(CONFIG_DEBUG_FS) += \ nvgpu-$(CONFIG_DEBUG_FS) += \
os/linux/debug_fecs_trace.o os/linux/debug_fecs_trace.o
endif endif
@@ -487,7 +491,7 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
os/linux/vgpu/vgpu_linux.o \ os/linux/vgpu/vgpu_linux.o \
os/linux/vgpu/gv11b/platform_gv11b_vgpu_tegra.o os/linux/vgpu/gv11b/platform_gv11b_vgpu_tegra.o
ifeq ($(CONFIG_GK20A_CTXSW_TRACE),y) ifeq ($(CONFIG_NVGPU_FECS_TRACE),y)
nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
os/linux/vgpu/fecs_trace_vgpu_linux.o os/linux/vgpu/fecs_trace_vgpu_linux.o
endif endif
@@ -610,11 +614,11 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/vgpu/gv11b/vgpu_hal_gv11b.o \ common/vgpu/gv11b/vgpu_hal_gv11b.o \
common/vgpu/gv11b/vgpu_tsg_gv11b.o \ common/vgpu/gv11b/vgpu_tsg_gv11b.o \
nvgpu-$(CONFIG_GK20A_CYCLE_STATS) += \ nvgpu-$(CONFIG_NVGPU_CYCLESTATS) += \
common/perf/cyclestats_snapshot.o \ common/perf/cyclestats_snapshot.o \
common/cyclestats/cyclestats.o common/cyclestats/cyclestats.o
ifeq ($(CONFIG_TEGRA_GR_VIRTUALIZATION),y) ifeq ($(CONFIG_TEGRA_GR_VIRTUALIZATION),y)
nvgpu-$(CONFIG_GK20A_CYCLE_STATS) += \ nvgpu-$(CONFIG_NVGPU_CYCLESTATS) += \
common/vgpu/perf/cyclestats_snapshot_vgpu.o common/vgpu/perf/cyclestats_snapshot_vgpu.o
endif endif

View File

@@ -56,73 +56,73 @@ NVGPU_COMMON_CFLAGS += \
-DCONFIG_PCI_MSI -DCONFIG_PCI_MSI
# Enable debugger APIs for safety build until devctl whitelisting is done # Enable debugger APIs for safety build until devctl whitelisting is done
NVGPU_DEBUGGER := 1 CONFIG_NVGPU_DEBUGGER := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_DEBUGGER NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_DEBUGGER
# Enable cyclestats APIs for safety build until complete debugger support is enabled # Enable cyclestats APIs for safety build until complete debugger support is enabled
NVGPU_CYCLESTATS_SUPPORT := 1 CONFIG_NVGPU_CYCLESTATS := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_GK20A_CYCLE_STATS NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_CYCLESTATS
# Enable USERD for safety build until we switch to user mode submits only # Enable USERD for safety build until we switch to user mode submits only
NVGPU_COMMON_CFLAGS += -DNVGPU_USERD NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_USERD
# Enable Channel WDT for safety build until we switch to user mode submits only # Enable Channel WDT for safety build until we switch to user mode submits only
NVGPU_COMMON_CFLAGS += -DNVGPU_CHANNEL_WDT NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_CHANNEL_WDT
# Enable CE support for safety build until we remove Vidmem clear support. # Enable CE support for safety build until we remove Vidmem clear support.
NVGPU_FEATURE_CE := 1 CONFIG_NVGPU_CE := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_CE NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_CE
# Enable Grpahics support for safety build until we switch to compute only # Enable Grpahics support for safety build until we switch to compute only
NVGPU_GRAPHICS := 1 CONFIG_NVGPU_GRAPHICS := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_GRAPHICS NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_GRAPHICS
NVGPU_COMMON_CFLAGS += -DNVGPU_ENGINE NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_FIFO_ENGINE_ACTIVITY
# Enable Channel/TSG Scheduling for safety build until devctl whitelisting is done # Enable Channel/TSG Scheduling for safety build until devctl whitelisting is done
NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING := 1 CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_CHANNEL_TSG_SCHEDULING NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
# Enable Channel/TSG Control for safety build until devctl whitelisting is done # Enable Channel/TSG Control for safety build until devctl whitelisting is done
NVGPU_FEATURE_CHANNEL_TSG_CONTROL := 1 CONFIG_NVGPU_CHANNEL_TSG_CONTROL := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_CHANNEL_TSG_CONTROL NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_CHANNEL_TSG_CONTROL
# #
# Flags enabled for only the regular build profile. # Flags enabled for only the regular build profile.
# #
ifneq ($(profile),safety) ifneq ($(profile),safety)
# ACR feature to enable old tegra ACR profile support # ACR feature to enable old tegra ACR profile support
NVGPU_FEATURE_ACR_LEGACY := 1 CONFIG_NVGPU_ACR_LEGACY := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_ACR_LEGACY NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_ACR_LEGACY
NVGPU_FEATURE_ENGINE_QUEUE := 1 CONFIG_NVGPU_ENGINE_QUEUE := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_ENGINE_QUEUE NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_ENGINE_QUEUE
NVGPU_FECS_TRACE_SUPPORT := 1 CONFIG_NVGPU_FECS_TRACE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_GK20A_CTXSW_TRACE NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_FECS_TRACE
IGPU_VIRT_SUPPORT := 1 CONFIG_NVGPU_IGPU_VIRT := 1
NVGPU_COMMON_CFLAGS += -DIGPU_VIRT_SUPPORT NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_IGPU_VIRT
# Enable nvlink support for normal build. # Enable nvlink support for normal build.
NVGPU_NVLINK_SUPPORT := 1 CONFIG_NVGPU_NVLINK := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_TEGRA_NVLINK NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_NVLINK
# Enable dgpu support for normal build. # Enable dgpu support for normal build.
NVGPU_DGPU_SUPPORT := 1 CONFIG_NVGPU_DGPU := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_DGPU_SUPPORT NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_DGPU
NVGPU_VPR := 1 CONFIG_NVGPU_VPR := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_VPR NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_VPR
NVGPU_REPLAYABLE_FAULT := 1 CONFIG_NVGPU_REPLAYABLE_FAULT := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_REPLAYABLE_FAULT NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_REPLAYABLE_FAULT
# Enable LS PMU support for normal build # Enable LS PMU support for normal build
NVGPU_FEATURE_LS_PMU := 1 CONFIG_NVGPU_LS_PMU := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_LS_PMU NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_LS_PMU
# Enable elpg support for normal build # Enable elpg support for normal build
NVGPU_FEATURE_POWER_PG := 1 CONFIG_NVGPU_POWER_PG := 1
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_POWER_PG NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_POWER_PG
endif endif

View File

@@ -45,7 +45,7 @@ srcs += os/posix/nvgpu.c \
os/posix/posix-vidmem.c \ os/posix/posix-vidmem.c \
os/posix/fecs_trace_posix.c os/posix/fecs_trace_posix.c
ifdef NVGPU_NVLINK_SUPPORT ifdef CONFIG_NVGPU_NVLINK
srcs += os/posix/posix-nvlink.c srcs += os/posix/posix-nvlink.c
endif endif
endif endif
@@ -274,21 +274,21 @@ srcs += common/utils/enabled.c \
hal/top/top_gp106.c \ hal/top/top_gp106.c \
hal/top/top_gv11b.c hal/top/top_gv11b.c
ifeq ($(NVGPU_FEATURE_ACR_LEGACY),1) ifeq ($(CONFIG_NVGPU_ACR_LEGACY),1)
srcs += \ srcs += \
common/acr/acr_blob_construct_v0.c \ common/acr/acr_blob_construct_v0.c \
common/acr/acr_sw_gm20b.c \ common/acr/acr_sw_gm20b.c \
common/acr/acr_sw_gp10b.c common/acr/acr_sw_gp10b.c
endif endif
ifeq ($(NVGPU_FEATURE_ENGINE_QUEUE),1) ifeq ($(CONFIG_NVGPU_ENGINE_QUEUE),1)
srcs += common/engine_queues/engine_mem_queue.c \ srcs += common/engine_queues/engine_mem_queue.c \
common/engine_queues/engine_dmem_queue.c \ common/engine_queues/engine_dmem_queue.c \
common/engine_queues/engine_emem_queue.c \ common/engine_queues/engine_emem_queue.c \
common/engine_queues/engine_fb_queue.c common/engine_queues/engine_fb_queue.c
endif endif
ifeq ($(NVGPU_GRAPHICS),1) ifeq ($(CONFIG_NVGPU_GRAPHICS),1)
srcs += common/gr/zbc.c \ srcs += common/gr/zbc.c \
common/gr/zcull.c \ common/gr/zcull.c \
hal/gr/zbc/zbc_gm20b.c \ hal/gr/zbc/zbc_gm20b.c \
@@ -298,7 +298,7 @@ srcs += common/gr/zbc.c \
hal/gr/zcull/zcull_gv11b.c hal/gr/zcull/zcull_gv11b.c
endif endif
ifeq ($(NVGPU_DEBUGGER),1) ifeq ($(CONFIG_NVGPU_DEBUGGER),1)
srcs += common/debugger.c \ srcs += common/debugger.c \
common/regops/regops.c \ common/regops/regops.c \
common/gr/hwpm_map.c \ common/gr/hwpm_map.c \
@@ -319,23 +319,23 @@ srcs += common/debugger.c \
hal/gr/gr/gr_tu104.c hal/gr/gr/gr_tu104.c
endif endif
ifeq ($(NVGPU_FEATURE_CE),1) ifeq ($(CONFIG_NVGPU_CE),1)
srcs += common/ce/ce.c srcs += common/ce/ce.c
endif endif
ifeq ($(NVGPU_FECS_TRACE_SUPPORT),1) ifeq ($(CONFIG_NVGPU_FECS_TRACE),1)
srcs += common/gr/fecs_trace.c \ srcs += common/gr/fecs_trace.c \
hal/gr/fecs_trace/fecs_trace_gm20b.c \ hal/gr/fecs_trace/fecs_trace_gm20b.c \
hal/gr/fecs_trace/fecs_trace_gv11b.c hal/gr/fecs_trace/fecs_trace_gv11b.c
ifeq ($(IGPU_VIRT_SUPPORT), 1) ifeq ($(CONFIG_NVGPU_IGPU_VIRT),1)
srcs += common/vgpu/gr/fecs_trace_vgpu.c srcs += common/vgpu/gr/fecs_trace_vgpu.c
endif endif
endif endif
ifeq ($(NVGPU_CYCLESTATS_SUPPORT),1) ifeq ($(CONFIG_NVGPU_CYCLESTATS),1)
srcs += common/perf/cyclestats_snapshot.c \ srcs += common/perf/cyclestats_snapshot.c \
common/cyclestats/cyclestats.c common/cyclestats/cyclestats.c
ifeq ($(IGPU_VIRT_SUPPORT), 1) ifeq ($(CONFIG_NVGPU_IGPU_VIRT),1)
srcs += common/vgpu/perf/cyclestats_snapshot_vgpu.c srcs += common/vgpu/perf/cyclestats_snapshot_vgpu.c
endif endif
endif endif
@@ -348,7 +348,7 @@ endif
srcs += hal/gr/config/gr_config_gm20b.c \ srcs += hal/gr/config/gr_config_gm20b.c \
hal/gr/config/gr_config_gv100.c hal/gr/config/gr_config_gv100.c
ifeq ($(NVGPU_FEATURE_LS_PMU),1) ifeq ($(CONFIG_NVGPU_LS_PMU),1)
# Add LS PMU files which are required for normal build # Add LS PMU files which are required for normal build
srcs += \ srcs += \
common/pmu/boardobj/boardobj.c \ common/pmu/boardobj/boardobj.c \
@@ -420,11 +420,11 @@ srcs += \
hal/pmu/pmu_tu104.c hal/pmu/pmu_tu104.c
endif endif
ifeq ($(NVGPU_FEATURE_POWER_PG), 1) ifeq ($(CONFIG_NVGPU_POWER_PG),1)
srcs += common/power_features/pg/pg.c srcs += common/power_features/pg/pg.c
endif endif
ifeq ($(IGPU_VIRT_SUPPORT), 1) ifeq ($(CONFIG_NVGPU_IGPU_VIRT),1)
srcs += common/vgpu/init/init_vgpu.c \ srcs += common/vgpu/init/init_vgpu.c \
common/vgpu/init/init_hal_vgpu.c \ common/vgpu/init/init_hal_vgpu.c \
common/vgpu/ivc/comm_vgpu.c \ common/vgpu/ivc/comm_vgpu.c \
@@ -454,7 +454,7 @@ srcs += common/vgpu/init/init_vgpu.c \
common/vgpu/gp10b/vgpu_hal_gp10b.c common/vgpu/gp10b/vgpu_hal_gp10b.c
endif endif
ifeq ($(NVGPU_NVLINK_SUPPORT), 1) ifeq ($(CONFIG_NVGPU_NVLINK),1)
srcs += common/vbios/nvlink_bios.c \ srcs += common/vbios/nvlink_bios.c \
common/nvlink/probe.c \ common/nvlink/probe.c \
common/nvlink/init/device_reginit.c \ common/nvlink/init/device_reginit.c \
@@ -471,7 +471,7 @@ srcs += common/vbios/nvlink_bios.c \
hal/nvlink/link_mode_transitions_tu104.c hal/nvlink/link_mode_transitions_tu104.c
endif endif
ifeq ($(NVGPU_DGPU_SUPPORT), 1) ifeq ($(CONFIG_NVGPU_DGPU),1)
srcs += common/sec2/sec2.c \ srcs += common/sec2/sec2.c \
common/sec2/sec2_allocator.c \ common/sec2/sec2_allocator.c \
common/sec2/sec2_lsfm.c \ common/sec2/sec2_lsfm.c \

View File

@@ -27,12 +27,12 @@
#include <nvgpu/acr.h> #include <nvgpu/acr.h>
#include "acr_priv.h" #include "acr_priv.h"
#ifdef NVGPU_FEATURE_ACR_LEGACY #ifdef CONFIG_NVGPU_ACR_LEGACY
#include "acr_sw_gm20b.h" #include "acr_sw_gm20b.h"
#include "acr_sw_gp10b.h" #include "acr_sw_gp10b.h"
#endif #endif
#include "acr_sw_gv11b.h" #include "acr_sw_gv11b.h"
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
#include "acr_sw_gv100.h" #include "acr_sw_gv100.h"
#include "acr_sw_tu104.h" #include "acr_sw_tu104.h"
#endif #endif
@@ -133,7 +133,7 @@ int nvgpu_acr_init(struct gk20a *g, struct nvgpu_acr **acr)
} }
switch (ver) { switch (ver) {
#ifdef NVGPU_FEATURE_ACR_LEGACY #ifdef CONFIG_NVGPU_ACR_LEGACY
case GK20A_GPUID_GM20B: case GK20A_GPUID_GM20B:
case GK20A_GPUID_GM20B_B: case GK20A_GPUID_GM20B_B:
nvgpu_gm20b_acr_sw_init(g, *acr); nvgpu_gm20b_acr_sw_init(g, *acr);
@@ -145,7 +145,7 @@ int nvgpu_acr_init(struct gk20a *g, struct nvgpu_acr **acr)
case NVGPU_GPUID_GV11B: case NVGPU_GPUID_GV11B:
nvgpu_gv11b_acr_sw_init(g, *acr); nvgpu_gv11b_acr_sw_init(g, *acr);
break; break;
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_GV100: case NVGPU_GPUID_GV100:
nvgpu_gv100_acr_sw_init(g, *acr); nvgpu_gv100_acr_sw_init(g, *acr);
break; break;

View File

@@ -34,7 +34,7 @@ int nvgpu_acr_alloc_blob_space_sys(struct gk20a *g, size_t size,
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_PHYSICALLY_ADDRESSED, return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_PHYSICALLY_ADDRESSED,
size, mem); size, mem);
} }
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_alloc_blob_space_vid(struct gk20a *g, size_t size, int nvgpu_acr_alloc_blob_space_vid(struct gk20a *g, size_t size,
struct nvgpu_mem *mem) struct nvgpu_mem *mem)
{ {

View File

@@ -28,7 +28,7 @@ struct nvgpu_mem;
int nvgpu_acr_alloc_blob_space_sys(struct gk20a *g, size_t size, int nvgpu_acr_alloc_blob_space_sys(struct gk20a *g, size_t size,
struct nvgpu_mem *mem); struct nvgpu_mem *mem);
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_alloc_blob_space_vid(struct gk20a *g, size_t size, int nvgpu_acr_alloc_blob_space_vid(struct gk20a *g, size_t size,
struct nvgpu_mem *mem); struct nvgpu_mem *mem);
#endif #endif

View File

@@ -35,7 +35,7 @@
#include "acr_wpr.h" #include "acr_wpr.h"
#include "acr_priv.h" #include "acr_priv.h"
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
int nvgpu_acr_lsf_pmu_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img) int nvgpu_acr_lsf_pmu_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
{ {
struct lsf_ucode_desc *lsf_desc; struct lsf_ucode_desc *lsf_desc;
@@ -557,7 +557,7 @@ static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
/* Update the argc/argv members*/ /* Update the argc/argv members*/
ldr_cfg->argc = 1; ldr_cfg->argc = 1;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
nvgpu_pmu_fw_get_cmd_line_args_offset(g, &ldr_cfg->argv); nvgpu_pmu_fw_get_cmd_line_args_offset(g, &ldr_cfg->argv);
#endif #endif
*p_bl_gen_desc_size = (u32)sizeof(struct loader_config); *p_bl_gen_desc_size = (u32)sizeof(struct loader_config);

View File

@@ -39,7 +39,7 @@ static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
dma_addr->hi |= u64_hi32(value); dma_addr->hi |= u64_hi32(value);
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
int nvgpu_acr_lsf_pmu_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img) int nvgpu_acr_lsf_pmu_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
{ {
struct lsf_ucode_desc_v1 *lsf_desc; struct lsf_ucode_desc_v1 *lsf_desc;

View File

@@ -370,7 +370,7 @@ err_release_acr_fw:
acr_desc->acr_fw = NULL; acr_desc->acr_fw = NULL;
return status; return status;
} }
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn, int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn,
struct nvgpu_firmware *hs_fw, u32 timeout) struct nvgpu_firmware *hs_fw, u32 timeout)
{ {

View File

@@ -24,7 +24,7 @@
#define ACR_H #define ACR_H
#include "acr_bootstrap.h" #include "acr_bootstrap.h"
#ifdef NVGPU_FEATURE_ACR_LEGACY #ifdef CONFIG_NVGPU_ACR_LEGACY
#include "acr_blob_construct_v0.h" #include "acr_blob_construct_v0.h"
#endif #endif
#include "acr_blob_construct_v1.h" #include "acr_blob_construct_v1.h"

View File

@@ -116,7 +116,7 @@ static u32 gm20b_acr_lsf_pmu(struct gk20a *g,
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE; lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = false; lsf->is_lazy_bootstrap = false;
lsf->is_priv_load = false; lsf->is_priv_load = false;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v0; lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v0;
lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset; lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset;
#endif #endif

View File

@@ -94,7 +94,7 @@ static u32 gv100_acr_lsf_pmu(struct gk20a *g,
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE; lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = false; lsf->is_lazy_bootstrap = false;
lsf->is_priv_load = false; lsf->is_priv_load = false;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v1; lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v1;
lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset; lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset;
#endif #endif

View File

@@ -24,7 +24,7 @@
#include <nvgpu/firmware.h> #include <nvgpu/firmware.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/fw.h> #include <nvgpu/pmu/fw.h>
#endif #endif
@@ -118,7 +118,7 @@ void gv11b_acr_fill_bl_dmem_desc(struct gk20a *g,
} }
/* LSF static config functions */ /* LSF static config functions */
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
static u32 gv11b_acr_lsf_pmu(struct gk20a *g, static u32 gv11b_acr_lsf_pmu(struct gk20a *g,
struct acr_lsf_config *lsf) struct acr_lsf_config *lsf)
{ {
@@ -180,7 +180,7 @@ static u32 gv11b_acr_lsf_conifg(struct gk20a *g,
struct nvgpu_acr *acr) struct nvgpu_acr *acr)
{ {
u32 lsf_enable_mask = 0; u32 lsf_enable_mask = 0;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
lsf_enable_mask |= gv11b_acr_lsf_pmu(g, &acr->lsf[FALCON_ID_PMU]); lsf_enable_mask |= gv11b_acr_lsf_pmu(g, &acr->lsf[FALCON_ID_PMU]);
#endif #endif
lsf_enable_mask |= gv11b_acr_lsf_fecs(g, &acr->lsf[FALCON_ID_FECS]); lsf_enable_mask |= gv11b_acr_lsf_fecs(g, &acr->lsf[FALCON_ID_FECS]);

View File

@@ -33,7 +33,7 @@ void nvgpu_acr_wpr_info_sys(struct gk20a *g, struct wpr_carveout_info *inf)
{ {
g->ops.fb.read_wpr_info(g, &inf->wpr_base, &inf->size); g->ops.fb.read_wpr_info(g, &inf->wpr_base, &inf->size);
} }
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
void nvgpu_acr_wpr_info_vid(struct gk20a *g, struct wpr_carveout_info *inf) void nvgpu_acr_wpr_info_vid(struct gk20a *g, struct wpr_carveout_info *inf)
{ {
inf->wpr_base = g->mm.vidmem.bootstrap_base; inf->wpr_base = g->mm.vidmem.bootstrap_base;

View File

@@ -33,7 +33,7 @@ struct wpr_carveout_info {
}; };
void nvgpu_acr_wpr_info_sys(struct gk20a *g, struct wpr_carveout_info *inf); void nvgpu_acr_wpr_info_sys(struct gk20a *g, struct wpr_carveout_info *inf);
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
void nvgpu_acr_wpr_info_vid(struct gk20a *g, struct wpr_carveout_info *inf); void nvgpu_acr_wpr_info_vid(struct gk20a *g, struct wpr_carveout_info *inf);
#endif #endif

View File

@@ -536,7 +536,7 @@ u32 nvgpu_ce_create_context(struct gk20a *g,
goto end; goto end;
} }
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
ce_ctx->ch->wdt.enabled = false; ce_ctx->ch->wdt.enabled = false;
#endif #endif
@@ -576,7 +576,7 @@ u32 nvgpu_ce_create_context(struct gk20a *g,
(void) memset(ce_ctx->cmd_buf_mem.cpu_va, 0x00, (void) memset(ce_ctx->cmd_buf_mem.cpu_va, 0x00,
ce_ctx->cmd_buf_mem.size); ce_ctx->cmd_buf_mem.size);
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
/* -1 means default channel timeslice value */ /* -1 means default channel timeslice value */
if (timeslice != -1) { if (timeslice != -1) {
err = g->ops.tsg.set_timeslice(ce_ctx->tsg, timeslice); err = g->ops.tsg.set_timeslice(ce_ctx->tsg, timeslice);

View File

@@ -34,7 +34,7 @@
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
#include <nvgpu/worker.h> #include <nvgpu/worker.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/perf_pstate.h> #include <nvgpu/pmu/perf_pstate.h>
#include <nvgpu/pmu/volt.h> #include <nvgpu/pmu/volt.h>
#include <nvgpu/pmu/clk/clk.h> #include <nvgpu/pmu/clk/clk.h>
@@ -107,7 +107,7 @@ void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm)
nvgpu_clk_arb_queue_notification(g, &arb->notification_queue, alarm); nvgpu_clk_arb_queue_notification(g, &arb->notification_queue, alarm);
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb) int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb)
{ {
struct gk20a *g = arb->g; struct gk20a *g = arb->g;
@@ -383,7 +383,7 @@ static void nvgpu_clk_arb_worker_poll_wakeup_process_item(
clk_arb_dbg(g, " "); clk_arb_dbg(g, " ");
if (clk_arb_work_item->item_type == CLK_ARB_WORK_UPDATE_VF_TABLE) { if (clk_arb_work_item->item_type == CLK_ARB_WORK_UPDATE_VF_TABLE) {
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
nvgpu_clk_arb_run_vf_table_cb(clk_arb_work_item->arb); nvgpu_clk_arb_run_vf_table_cb(clk_arb_work_item->arb);
#endif #endif
} else if (clk_arb_work_item->item_type == CLK_ARB_WORK_UPDATE_ARB) { } else if (clk_arb_work_item->item_type == CLK_ARB_WORK_UPDATE_ARB) {
@@ -614,7 +614,7 @@ void nvgpu_clk_arb_release_session(struct gk20a *g,
nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item); nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item);
} }
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g) void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g)
{ {
struct nvgpu_clk_arb *arb = g->clk_arb; struct nvgpu_clk_arb *arb = g->clk_arb;

View File

@@ -411,7 +411,7 @@ int nvgpu_engine_mem_queue_init(struct nvgpu_engine_mem_queue **queue_p,
case QUEUE_TYPE_DMEM: case QUEUE_TYPE_DMEM:
engine_dmem_queue_init(queue); engine_dmem_queue_init(queue);
break; break;
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
case QUEUE_TYPE_EMEM: case QUEUE_TYPE_EMEM:
engine_emem_queue_init(queue); engine_emem_queue_init(queue);
break; break;

View File

@@ -24,7 +24,7 @@
#include <nvgpu/falcon.h> #include <nvgpu/falcon.h>
#include "falcon_sw_gk20a.h" #include "falcon_sw_gk20a.h"
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
#include "falcon_sw_gv100.h" #include "falcon_sw_gv100.h"
#include "falcon_sw_tu104.h" #include "falcon_sw_tu104.h"
#endif #endif
@@ -692,7 +692,7 @@ static int falcon_sw_init(struct gk20a *g, struct nvgpu_falcon *flcn)
case NVGPU_GPUID_GV11B: case NVGPU_GPUID_GV11B:
gk20a_falcon_sw_init(flcn); gk20a_falcon_sw_init(flcn);
break; break;
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_GV100: case NVGPU_GPUID_GV100:
gv100_falcon_sw_init(flcn); gv100_falcon_sw_init(flcn);
break; break;

View File

@@ -291,7 +291,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
struct dbg_session_data *session_data, *tmp_s; struct dbg_session_data *session_data, *tmp_s;
struct dbg_session_channel_data *ch_data, *tmp; struct dbg_session_channel_data *ch_data, *tmp;
int err; int err;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
bool deferred_reset_pending; bool deferred_reset_pending;
#endif #endif
@@ -379,7 +379,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
__func__, "references"); __func__, "references");
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
/* if engine reset was deferred, perform it now */ /* if engine reset was deferred, perform it now */
nvgpu_mutex_acquire(&f->deferred_reset_mutex); nvgpu_mutex_acquire(&f->deferred_reset_mutex);
deferred_reset_pending = g->fifo.deferred_reset_pending; deferred_reset_pending = g->fifo.deferred_reset_pending;
@@ -403,7 +403,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
nvgpu_log_info(g, "freeing bound channel context, timeout=%ld", nvgpu_log_info(g, "freeing bound channel context, timeout=%ld",
timeout); timeout);
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
if (g->ops.gr.fecs_trace.unbind_channel && !ch->vpr) if (g->ops.gr.fecs_trace.unbind_channel && !ch->vpr)
g->ops.gr.fecs_trace.unbind_channel(g, &ch->inst_block); g->ops.gr.fecs_trace.unbind_channel(g, &ch->inst_block);
#endif #endif
@@ -717,7 +717,7 @@ struct nvgpu_channel *gk20a_open_new_channel(struct gk20a *g,
ch->ctxsw_timeout_debug_dump = true; ch->ctxsw_timeout_debug_dump = true;
ch->unserviceable = false; ch->unserviceable = false;
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
/* init kernel watchdog timeout */ /* init kernel watchdog timeout */
ch->wdt.enabled = true; ch->wdt.enabled = true;
ch->wdt.limit_ms = g->ch_wdt_init_limit_ms; ch->wdt.limit_ms = g->ch_wdt_init_limit_ms;
@@ -1211,7 +1211,7 @@ static int nvgpu_channel_setup_ramfc(struct nvgpu_channel *c,
u64 pbdma_acquire_timeout = 0ULL; u64 pbdma_acquire_timeout = 0ULL;
struct gk20a *g = c->g; struct gk20a *g = c->g;
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
if (c->wdt.enabled && nvgpu_is_timeouts_enabled(c->g)) { if (c->wdt.enabled && nvgpu_is_timeouts_enabled(c->g)) {
pbdma_acquire_timeout = c->wdt.limit_ms; pbdma_acquire_timeout = c->wdt.limit_ms;
} }
@@ -1386,7 +1386,7 @@ int nvgpu_channel_setup_bind(struct nvgpu_channel *c,
struct gk20a *g = c->g; struct gk20a *g = c->g;
int err = 0; int err = 0;
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
if ((args->flags & NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR) != 0U) { if ((args->flags & NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR) != 0U) {
c->vpr = true; c->vpr = true;
} }
@@ -1568,7 +1568,7 @@ u32 nvgpu_channel_update_gpfifo_get_and_get_free_count(struct nvgpu_channel *ch)
return nvgpu_channel_get_gpfifo_free_count(ch); return nvgpu_channel_get_gpfifo_free_count(ch);
} }
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
static void nvgpu_channel_wdt_init(struct nvgpu_channel *ch) static void nvgpu_channel_wdt_init(struct nvgpu_channel *ch)
{ {
@@ -1762,7 +1762,7 @@ static void nvgpu_channel_wdt_handler(struct nvgpu_channel *ch)
gk20a_gr_debug_dump(g); gk20a_gr_debug_dump(g);
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
if (g->ops.tsg.force_reset(ch, if (g->ops.tsg.force_reset(ch,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT, NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT,
ch->wdt.debug_dump) != 0) { ch->wdt.debug_dump) != 0) {
@@ -1826,7 +1826,7 @@ nvgpu_channel_worker_from_worker(struct nvgpu_worker *worker)
((uintptr_t)worker - offsetof(struct nvgpu_channel_worker, worker)); ((uintptr_t)worker - offsetof(struct nvgpu_channel_worker, worker));
}; };
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
static void nvgpu_channel_worker_poll_init(struct nvgpu_worker *worker) static void nvgpu_channel_worker_poll_init(struct nvgpu_worker *worker)
{ {
@@ -1890,7 +1890,7 @@ static void nvgpu_channel_worker_poll_wakeup_process_item(
} }
static const struct nvgpu_worker_ops channel_worker_ops = { static const struct nvgpu_worker_ops channel_worker_ops = {
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
.pre_process = nvgpu_channel_worker_poll_init, .pre_process = nvgpu_channel_worker_poll_init,
.wakeup_post_process = .wakeup_post_process =
nvgpu_channel_worker_poll_wakeup_post_process_item, nvgpu_channel_worker_poll_wakeup_post_process_item,
@@ -2009,7 +2009,7 @@ int nvgpu_channel_add_job(struct nvgpu_channel *c,
job->num_mapped_buffers = num_mapped_buffers; job->num_mapped_buffers = num_mapped_buffers;
job->mapped_buffers = mapped_buffers; job->mapped_buffers = mapped_buffers;
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
nvgpu_channel_wdt_start(c); nvgpu_channel_wdt_start(c);
#endif #endif
@@ -2058,7 +2058,7 @@ void nvgpu_channel_clean_up_jobs(struct nvgpu_channel *c,
struct nvgpu_channel_job *job; struct nvgpu_channel_job *job;
struct gk20a *g; struct gk20a *g;
bool job_finished = false; bool job_finished = false;
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
bool watchdog_on = false; bool watchdog_on = false;
#endif #endif
@@ -2075,7 +2075,7 @@ void nvgpu_channel_clean_up_jobs(struct nvgpu_channel *c,
vm = c->vm; vm = c->vm;
g = c->g; g = c->g;
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
/* /*
* If !clean_all, we're in a condition where watchdog isn't supported * If !clean_all, we're in a condition where watchdog isn't supported
* anyway (this would be a no-op). * anyway (this would be a no-op).
@@ -2112,7 +2112,7 @@ void nvgpu_channel_clean_up_jobs(struct nvgpu_channel *c,
completed = nvgpu_fence_is_expired(job->post_fence); completed = nvgpu_fence_is_expired(job->post_fence);
if (!completed) { if (!completed) {
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
/* /*
* The watchdog eventually sees an updated gp_get if * The watchdog eventually sees an updated gp_get if
* something happened in this loop. A new job can have * something happened in this loop. A new job can have
@@ -2320,7 +2320,7 @@ static void nvgpu_channel_destroy(struct gk20a *g, struct nvgpu_channel *c)
nvgpu_mutex_destroy(&c->joblist.cleanup_lock); nvgpu_mutex_destroy(&c->joblist.cleanup_lock);
nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock);
nvgpu_mutex_destroy(&c->sync_lock); nvgpu_mutex_destroy(&c->sync_lock);
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex);
nvgpu_mutex_destroy(&c->cs_client_mutex); nvgpu_mutex_destroy(&c->cs_client_mutex);
#endif #endif
@@ -2377,7 +2377,7 @@ int nvgpu_channel_init_support(struct gk20a *g, u32 chid)
nvgpu_spinlock_init(&c->ref_actions_lock); nvgpu_spinlock_init(&c->ref_actions_lock);
#endif #endif
nvgpu_spinlock_init(&c->joblist.dynamic.lock); nvgpu_spinlock_init(&c->joblist.dynamic.lock);
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
nvgpu_spinlock_init(&c->wdt.lock); nvgpu_spinlock_init(&c->wdt.lock);
#endif #endif
@@ -2389,7 +2389,7 @@ int nvgpu_channel_init_support(struct gk20a *g, u32 chid)
nvgpu_mutex_init(&c->joblist.cleanup_lock); nvgpu_mutex_init(&c->joblist.cleanup_lock);
nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock); nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock);
nvgpu_mutex_init(&c->sync_lock); nvgpu_mutex_init(&c->sync_lock);
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex); nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex);
nvgpu_mutex_init(&c->cs_client_mutex); nvgpu_mutex_init(&c->cs_client_mutex);
#endif #endif
@@ -2566,7 +2566,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
nvgpu_warn(g, "failed to broadcast"); nvgpu_warn(g, "failed to broadcast");
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
if (post_events) { if (post_events) {
struct nvgpu_tsg *tsg = struct nvgpu_tsg *tsg =
nvgpu_tsg_from_ch(c); nvgpu_tsg_from_ch(c);
@@ -2726,7 +2726,7 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
nvgpu_kfree(g, infos); nvgpu_kfree(g, infos);
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
int nvgpu_channel_deferred_reset_engines(struct gk20a *g, int nvgpu_channel_deferred_reset_engines(struct gk20a *g,
struct nvgpu_channel *ch) struct nvgpu_channel *ch)
{ {

View File

@@ -25,7 +25,7 @@
#include <nvgpu/errno.h> #include <nvgpu/errno.h>
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
#include <nvgpu/bitops.h> #include <nvgpu/bitops.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/pmu/mutex.h> #include <nvgpu/pmu/mutex.h>
#endif #endif
@@ -247,7 +247,7 @@ u32 nvgpu_engine_get_all_ce_reset_mask(struct gk20a *g)
return reset_mask; return reset_mask;
} }
#ifdef NVGPU_ENGINE #ifdef CONFIG_NVGPU_FIFO_ENGINE_ACTIVITY
int nvgpu_engine_enable_activity(struct gk20a *g, int nvgpu_engine_enable_activity(struct gk20a *g,
struct nvgpu_engine_info *eng_info) struct nvgpu_engine_info *eng_info)
@@ -284,7 +284,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
{ {
u32 pbdma_chid = NVGPU_INVALID_CHANNEL_ID; u32 pbdma_chid = NVGPU_INVALID_CHANNEL_ID;
u32 engine_chid = NVGPU_INVALID_CHANNEL_ID; u32 engine_chid = NVGPU_INVALID_CHANNEL_ID;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = -EINVAL; int mutex_ret = -EINVAL;
#endif #endif
@@ -301,7 +301,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
return -EBUSY; return -EBUSY;
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (g->ops.pmu.is_pmu_supported(g)) { if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
@@ -356,7 +356,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
} }
clean_up: clean_up:
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu, if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0){ PMU_MUTEX_ID_FIFO, &token) != 0){
@@ -536,13 +536,13 @@ void nvgpu_engine_reset(struct gk20a *g, u32 engine_id)
} }
if (engine_enum == NVGPU_ENGINE_GR) { if (engine_enum == NVGPU_ENGINE_GR) {
#ifdef NVGPU_FEATURE_POWER_PG #ifdef CONFIG_NVGPU_POWER_PG
if (nvgpu_pg_elpg_disable(g) != 0 ) { if (nvgpu_pg_elpg_disable(g) != 0 ) {
nvgpu_err(g, "failed to set disable elpg"); nvgpu_err(g, "failed to set disable elpg");
} }
#endif #endif
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
/* /*
* Resetting engine will alter read/write index. Need to flush * Resetting engine will alter read/write index. Need to flush
* circular buffer before re-enabling FECS. * circular buffer before re-enabling FECS.
@@ -563,7 +563,7 @@ void nvgpu_engine_reset(struct gk20a *g, u32 engine_id)
nvgpu_err(g, "failed to halt gr pipe"); nvgpu_err(g, "failed to halt gr pipe");
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
/* /*
* resetting engine using mc_enable_r() is not * resetting engine using mc_enable_r() is not
* enough, we do full init sequence * enough, we do full init sequence
@@ -581,7 +581,7 @@ void nvgpu_engine_reset(struct gk20a *g, u32 engine_id)
"gr cannot be reset without halting gr pipe"); "gr cannot be reset without halting gr pipe");
} }
#ifdef NVGPU_FEATURE_POWER_PG #ifdef CONFIG_NVGPU_POWER_PG
if (nvgpu_pg_elpg_enable(g) != 0 ) { if (nvgpu_pg_elpg_enable(g) != 0 ) {
nvgpu_err(g, "failed to set enable elpg"); nvgpu_err(g, "failed to set enable elpg");
} }
@@ -924,7 +924,7 @@ u32 nvgpu_engine_get_runlist_busy_engines(struct gk20a *g, u32 runlist_id)
return eng_bitmask; return eng_bitmask;
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
bool nvgpu_engine_should_defer_reset(struct gk20a *g, u32 engine_id, bool nvgpu_engine_should_defer_reset(struct gk20a *g, u32 engine_id,
u32 engine_subid, bool fake_fault) u32 engine_subid, bool fake_fault)
{ {

View File

@@ -48,7 +48,7 @@ void nvgpu_fifo_cleanup_sw_common(struct gk20a *g)
nvgpu_engine_cleanup_sw(g); nvgpu_engine_cleanup_sw(g);
nvgpu_pbdma_cleanup_sw(g); nvgpu_pbdma_cleanup_sw(g);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
f->deferred_reset_pending = false; f->deferred_reset_pending = false;
nvgpu_mutex_destroy(&f->deferred_reset_mutex); nvgpu_mutex_destroy(&f->deferred_reset_mutex);
#endif #endif
@@ -80,7 +80,7 @@ int nvgpu_fifo_setup_sw_common(struct gk20a *g)
nvgpu_mutex_init(&f->intr.isr.mutex); nvgpu_mutex_init(&f->intr.isr.mutex);
nvgpu_mutex_init(&f->engines_reset_mutex); nvgpu_mutex_init(&f->engines_reset_mutex);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
nvgpu_mutex_init(&f->deferred_reset_mutex); nvgpu_mutex_init(&f->deferred_reset_mutex);
#endif #endif

View File

@@ -29,7 +29,7 @@
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
#include <nvgpu/rc.h> #include <nvgpu/rc.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/mutex.h> #include <nvgpu/pmu/mutex.h>
#endif #endif
@@ -447,14 +447,14 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
return ret; return ret;
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
/* trigger host to expire current timeslice and reschedule runlist from front */ /* trigger host to expire current timeslice and reschedule runlist from front */
int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
bool wait_preempt) bool wait_preempt)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct nvgpu_runlist_info *runlist; struct nvgpu_runlist_info *runlist;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0; int mutex_ret = 0;
#endif #endif
@@ -464,7 +464,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) { if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) {
return -EBUSY; return -EBUSY;
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire( mutex_ret = nvgpu_pmu_lock_acquire(
g, g->pmu, PMU_MUTEX_ID_FIFO, &token); g, g->pmu, PMU_MUTEX_ID_FIFO, &token);
#endif #endif
@@ -483,7 +483,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
nvgpu_err(g, "wait pending failed for runlist %u", nvgpu_err(g, "wait pending failed for runlist %u",
ch->runlist_id); ch->runlist_id);
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu, if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) { PMU_MUTEX_ID_FIFO, &token) != 0) {
@@ -507,7 +507,7 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
{ {
struct nvgpu_runlist_info *runlist = NULL; struct nvgpu_runlist_info *runlist = NULL;
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0; int mutex_ret = 0;
#endif #endif
@@ -518,13 +518,13 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
runlist = f->runlist_info[runlist_id]; runlist = f->runlist_info[runlist_id];
nvgpu_mutex_acquire(&runlist->runlist_lock); nvgpu_mutex_acquire(&runlist->runlist_lock);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
#endif #endif
ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add, ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add,
wait_for_finish); wait_for_finish);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu, if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) { PMU_MUTEX_ID_FIFO, &token) != 0) {
@@ -610,19 +610,19 @@ const char *nvgpu_runlist_interleave_level_name(u32 interleave_level)
void nvgpu_fifo_runlist_set_state(struct gk20a *g, u32 runlists_mask, void nvgpu_fifo_runlist_set_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state) u32 runlist_state)
{ {
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0; int mutex_ret = 0;
#endif #endif
nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x", nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x",
runlists_mask, runlist_state); runlists_mask, runlist_state);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
#endif #endif
g->ops.runlist.write_state(g, runlists_mask, runlist_state); g->ops.runlist.write_state(g, runlists_mask, runlist_state);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu, if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) { PMU_MUTEX_ID_FIFO, &token) != 0) {

View File

@@ -406,7 +406,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
&& !c->deterministic) || && !c->deterministic) ||
!skip_buffer_refcounting); !skip_buffer_refcounting);
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
need_job_tracking = need_job_tracking || c->wdt.enabled; need_job_tracking = need_job_tracking || c->wdt.enabled;
#endif #endif
@@ -444,7 +444,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
need_sync_framework || need_sync_framework ||
!skip_buffer_refcounting; !skip_buffer_refcounting;
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
need_deferred_cleanup = need_deferred_cleanup || c->wdt.enabled; need_deferred_cleanup = need_deferred_cleanup || c->wdt.enabled;
#endif #endif

View File

@@ -215,7 +215,7 @@ int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg,
g->ops.channel.disable(ch); g->ops.channel.disable(ch);
nvgpu_rwsem_up_write(&tsg->ch_list_lock); nvgpu_rwsem_up_write(&tsg->ch_list_lock);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
if (ch->mmu_debug_mode_enabled) { if (ch->mmu_debug_mode_enabled) {
err = nvgpu_tsg_set_mmu_debug_mode(tsg, ch, false); err = nvgpu_tsg_set_mmu_debug_mode(tsg, ch, false);
if (err != 0) { if (err != 0) {
@@ -300,7 +300,7 @@ static void nvgpu_tsg_destroy(struct gk20a *g, struct nvgpu_tsg *tsg)
nvgpu_mutex_destroy(&tsg->event_id_list_lock); nvgpu_mutex_destroy(&tsg->event_id_list_lock);
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
/* force reset tsg that the channel is bound to */ /* force reset tsg that the channel is bound to */
int nvgpu_tsg_force_reset_ch(struct nvgpu_channel *ch, int nvgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
u32 err_code, bool verbose) u32 err_code, bool verbose)
@@ -521,7 +521,7 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg,
return recover; return recover;
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int nvgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level) int nvgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
@@ -863,7 +863,7 @@ void nvgpu_tsg_reset_faulted_eng_pbdma(struct gk20a *g, struct nvgpu_tsg *tsg,
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
int nvgpu_tsg_set_mmu_debug_mode(struct nvgpu_tsg *tsg, int nvgpu_tsg_set_mmu_debug_mode(struct nvgpu_tsg *tsg,
struct nvgpu_channel *ch, bool enable) struct nvgpu_channel *ch, bool enable)
{ {

View File

@@ -31,7 +31,7 @@
#include <nvgpu/vm_area.h> #include <nvgpu/vm_area.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
int nvgpu_userd_init_slabs(struct gk20a *g) int nvgpu_userd_init_slabs(struct gk20a *g)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -76,7 +76,7 @@ void nvgpu_userd_free_slabs(struct gk20a *g)
int nvgpu_userd_init_channel(struct gk20a *g, struct nvgpu_channel *c) int nvgpu_userd_init_channel(struct gk20a *g, struct nvgpu_channel *c)
{ {
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_mem *mem; struct nvgpu_mem *mem;
u32 slab = c->chid / f->num_channels_per_slab; u32 slab = c->chid / f->num_channels_per_slab;
@@ -124,7 +124,7 @@ done:
int nvgpu_userd_setup_sw(struct gk20a *g) int nvgpu_userd_setup_sw(struct gk20a *g)
{ {
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
int err; int err;
u32 size, num_pages; u32 size, num_pages;
@@ -159,7 +159,7 @@ clean_up:
void nvgpu_userd_cleanup_sw(struct gk20a *g) void nvgpu_userd_cleanup_sw(struct gk20a *g)
{ {
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
if (f->userd_gpu_va != 0ULL) { if (f->userd_gpu_va != 0ULL) {

View File

@@ -353,7 +353,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0];
/* Circular Buffer */ /* Circular Buffer */
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer, if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR)) { NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR)) {
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
@@ -366,7 +366,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
NVGPU_GR_GLOBAL_CTX_CIRCULAR, NVGPU_GR_GLOBAL_CTX_CIRCULAR,
vm, NVGPU_VM_MAP_CACHEABLE, true); vm, NVGPU_VM_MAP_CACHEABLE, true);
g_bfr_index[NVGPU_GR_CTX_CIRCULAR_VA] = NVGPU_GR_GLOBAL_CTX_CIRCULAR; g_bfr_index[NVGPU_GR_CTX_CIRCULAR_VA] = NVGPU_GR_GLOBAL_CTX_CIRCULAR;
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
} }
#endif #endif
if (gpu_va == 0ULL) { if (gpu_va == 0ULL) {
@@ -376,7 +376,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va; g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va;
/* Attribute Buffer */ /* Attribute Buffer */
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer, if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR)) { NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR)) {
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
@@ -389,7 +389,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, NVGPU_GR_GLOBAL_CTX_ATTRIBUTE,
vm, NVGPU_VM_MAP_CACHEABLE, false); vm, NVGPU_VM_MAP_CACHEABLE, false);
g_bfr_index[NVGPU_GR_CTX_ATTRIBUTE_VA] = NVGPU_GR_GLOBAL_CTX_ATTRIBUTE; g_bfr_index[NVGPU_GR_CTX_ATTRIBUTE_VA] = NVGPU_GR_GLOBAL_CTX_ATTRIBUTE;
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
} }
#endif #endif
if (gpu_va == 0ULL) { if (gpu_va == 0ULL) {
@@ -399,7 +399,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va; g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va;
/* Page Pool */ /* Page Pool */
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer, if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR)) { NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR)) {
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
@@ -412,7 +412,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL, NVGPU_GR_GLOBAL_CTX_PAGEPOOL,
vm, NVGPU_VM_MAP_CACHEABLE, true); vm, NVGPU_VM_MAP_CACHEABLE, true);
g_bfr_index[NVGPU_GR_CTX_PAGEPOOL_VA] = NVGPU_GR_GLOBAL_CTX_PAGEPOOL; g_bfr_index[NVGPU_GR_CTX_PAGEPOOL_VA] = NVGPU_GR_GLOBAL_CTX_PAGEPOOL;
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
} }
#endif #endif
if (gpu_va == 0ULL) { if (gpu_va == 0ULL) {
@@ -432,7 +432,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va; g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va;
g_bfr_index[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP; g_bfr_index[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP;
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
/* FECS trace buffer */ /* FECS trace buffer */
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) { if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) {
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
@@ -704,7 +704,7 @@ u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx)
return gr_ctx->ctx_id; return gr_ctx->ctx_id;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
void nvgpu_gr_ctx_set_zcull_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, void nvgpu_gr_ctx_set_zcull_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
u32 mode, u64 gpu_va) u32 mode, u64 gpu_va)
{ {
@@ -969,7 +969,7 @@ u32 nvgpu_gr_ctx_read_ctx_id(struct nvgpu_gr_ctx *gr_ctx)
return gr_ctx->ctx_id; return gr_ctx->ctx_id;
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
void nvgpu_gr_ctx_set_boosted_ctx(struct nvgpu_gr_ctx *gr_ctx, bool boost) void nvgpu_gr_ctx_set_boosted_ctx(struct nvgpu_gr_ctx *gr_ctx, bool boost)
{ {
gr_ctx->boosted_ctx = boost; gr_ctx->boosted_ctx = boost;

View File

@@ -119,7 +119,7 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
g->ops.gr.init.pd_tpc_per_gpc(g, config); g->ops.gr.init.pd_tpc_per_gpc(g, config);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
/* gr__setup_pd_mapping */ /* gr__setup_pd_mapping */
g->ops.gr.init.rop_mapping(g, config); g->ops.gr.init.rop_mapping(g, config);
#endif #endif

View File

@@ -103,7 +103,7 @@ static int nvgpu_gr_global_ctx_buffer_alloc_sys(struct gk20a *g,
return err; return err;
} }
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
static int nvgpu_gr_global_ctx_buffer_alloc_vpr(struct gk20a *g, static int nvgpu_gr_global_ctx_buffer_alloc_vpr(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
u32 index) u32 index)
@@ -137,7 +137,7 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
if (desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR].size == 0U || if (desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR].size == 0U ||
desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL].size == 0U || desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL].size == 0U ||
desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE].size == 0U || desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE].size == 0U ||
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR].size == 0U || desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR].size == 0U ||
desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR].size == 0U || desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR].size == 0U ||
desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR].size == 0U || desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR].size == 0U ||
@@ -185,7 +185,7 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
goto clean_up; goto clean_up;
} }
} }
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
err = nvgpu_gr_global_ctx_buffer_alloc_vpr(g, desc, err = nvgpu_gr_global_ctx_buffer_alloc_vpr(g, desc,
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR); NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR);
if (err != 0) { if (err != 0) {

View File

@@ -28,7 +28,7 @@
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>
#include <nvgpu/gr/config.h> #include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr_intr.h> #include <nvgpu/gr/gr_intr.h>
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
#include <nvgpu/gr/zbc.h> #include <nvgpu/gr/zbc.h>
#include <nvgpu/gr/zcull.h> #include <nvgpu/gr/zcull.h>
#endif #endif
@@ -57,7 +57,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g)
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR, size); NVGPU_GR_GLOBAL_CTX_CIRCULAR, size);
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR, size); NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR, size);
#endif #endif
@@ -67,7 +67,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g)
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL, size); NVGPU_GR_GLOBAL_CTX_PAGEPOOL, size);
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR, size); NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR, size);
#endif #endif
@@ -78,7 +78,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g)
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, size); NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, size);
#ifdef NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size); NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size);
#endif #endif
@@ -88,7 +88,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g)
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP, size); NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP, size);
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
size = nvgpu_gr_fecs_trace_buffer_size(g); size = nvgpu_gr_fecs_trace_buffer_size(g);
nvgpu_log_info(g, "fecs_trace_buffer_size : %d", size); nvgpu_log_info(g, "fecs_trace_buffer_size : %d", size);
@@ -192,12 +192,12 @@ static int gr_init_setup_hw(struct gk20a *g)
/* load gr floorsweeping registers */ /* load gr floorsweeping registers */
g->ops.gr.init.pes_vsc_stream(g); g->ops.gr.init.pes_vsc_stream(g);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_zcull_init_hw(g, gr->zcull, gr->config); err = nvgpu_gr_zcull_init_hw(g, gr->zcull, gr->config);
if (err != 0) { if (err != 0) {
goto out; goto out;
} }
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
if (g->ops.priv_ring.set_ppriv_timeout_settings != NULL) { if (g->ops.priv_ring.set_ppriv_timeout_settings != NULL) {
g->ops.priv_ring.set_ppriv_timeout_settings(g); g->ops.priv_ring.set_ppriv_timeout_settings(g);
@@ -232,12 +232,12 @@ static int gr_init_setup_hw(struct gk20a *g)
/* reset and enable exceptions */ /* reset and enable exceptions */
g->ops.gr.intr.enable_exceptions(g, gr->config, true); g->ops.gr.intr.enable_exceptions(g, gr->config, true);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_zbc_load_table(g, gr->zbc); err = nvgpu_gr_zbc_load_table(g, gr->zbc);
if (err != 0) { if (err != 0) {
goto out; goto out;
} }
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
/* /*
* Disable both surface and LG coalesce. * Disable both surface and LG coalesce.
@@ -284,7 +284,7 @@ static void gr_remove_support(struct gk20a *g)
nvgpu_netlist_deinit_ctx_vars(g); nvgpu_netlist_deinit_ctx_vars(g);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
nvgpu_gr_hwpm_map_deinit(g, gr->hwpm_map); nvgpu_gr_hwpm_map_deinit(g, gr->hwpm_map);
#endif #endif
@@ -294,10 +294,10 @@ static void gr_remove_support(struct gk20a *g)
nvgpu_gr_intr_remove_support(g, gr->intr); nvgpu_gr_intr_remove_support(g, gr->intr);
gr->intr = NULL; gr->intr = NULL;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_gr_zbc_deinit(g, gr->zbc); nvgpu_gr_zbc_deinit(g, gr->zbc);
nvgpu_gr_zcull_deinit(g, gr->zcull); nvgpu_gr_zcull_deinit(g, gr->zcull);
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
nvgpu_gr_obj_ctx_deinit(g, gr->golden_image); nvgpu_gr_obj_ctx_deinit(g, gr->golden_image);
} }
@@ -404,7 +404,7 @@ static int gr_init_setup_sw(struct gk20a *g)
goto clean_up; goto clean_up;
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
err = nvgpu_gr_hwpm_map_init(g, &g->gr->hwpm_map, err = nvgpu_gr_hwpm_map_init(g, &g->gr->hwpm_map,
nvgpu_gr_falcon_get_pm_ctxsw_image_size(g->gr->falcon)); nvgpu_gr_falcon_get_pm_ctxsw_image_size(g->gr->falcon));
if (err != 0) { if (err != 0) {
@@ -413,7 +413,7 @@ static int gr_init_setup_sw(struct gk20a *g)
} }
#endif #endif
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_config_init_map_tiles(g, gr->config); err = nvgpu_gr_config_init_map_tiles(g, gr->config);
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
@@ -425,7 +425,7 @@ static int gr_init_setup_sw(struct gk20a *g)
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
} }
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
gr->gr_ctx_desc = nvgpu_gr_ctx_desc_alloc(g); gr->gr_ctx_desc = nvgpu_gr_ctx_desc_alloc(g);
if (gr->gr_ctx_desc == NULL) { if (gr->gr_ctx_desc == NULL) {
@@ -450,12 +450,12 @@ static int gr_init_setup_sw(struct gk20a *g)
goto clean_up; goto clean_up;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_zbc_init(g, &gr->zbc); err = nvgpu_gr_zbc_init(g, &gr->zbc);
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
} }
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
gr->intr = nvgpu_gr_intr_init_support(g); gr->intr = nvgpu_gr_intr_init_support(g);
if (gr->intr == NULL) { if (gr->intr == NULL) {
@@ -733,7 +733,7 @@ int nvgpu_gr_disable_ctxsw(struct gk20a *g)
gr->ctxsw_disable_count++; gr->ctxsw_disable_count++;
if (gr->ctxsw_disable_count == 1) { if (gr->ctxsw_disable_count == 1) {
#ifdef NVGPU_FEATURE_POWER_PG #ifdef CONFIG_NVGPU_POWER_PG
err = nvgpu_pg_elpg_disable(g); err = nvgpu_pg_elpg_disable(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, nvgpu_err(g,
@@ -780,7 +780,7 @@ int nvgpu_gr_enable_ctxsw(struct gk20a *g)
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to start fecs ctxsw"); nvgpu_err(g, "failed to start fecs ctxsw");
} }
#ifdef NVGPU_FEATURE_POWER_PG #ifdef CONFIG_NVGPU_POWER_PG
else { else {
if (nvgpu_pg_elpg_enable(g) != 0) { if (nvgpu_pg_elpg_enable(g) != 0) {
nvgpu_err(g, nvgpu_err(g,

View File

@@ -102,7 +102,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
temp2 = nvgpu_safe_mult_u64((size_t)config->max_gpc_count, sizeof(u32)); temp2 = nvgpu_safe_mult_u64((size_t)config->max_gpc_count, sizeof(u32));
config->gpc_tpc_count = nvgpu_kzalloc(g, gpc_size); config->gpc_tpc_count = nvgpu_kzalloc(g, gpc_size);
config->gpc_tpc_mask = nvgpu_kzalloc(g, temp2); config->gpc_tpc_mask = nvgpu_kzalloc(g, temp2);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
config->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, config->max_zcull_per_gpc_count = nvgpu_get_litter_value(g,
GPU_LIT_NUM_ZCULL_BANKS); GPU_LIT_NUM_ZCULL_BANKS);
@@ -117,7 +117,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
config->gpc_skip_mask = nvgpu_kzalloc(g, temp3); config->gpc_skip_mask = nvgpu_kzalloc(g, temp3);
if ((config->gpc_tpc_count == NULL) || (config->gpc_tpc_mask == NULL) || if ((config->gpc_tpc_count == NULL) || (config->gpc_tpc_mask == NULL) ||
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
(config->gpc_zcb_count == NULL) || (config->gpc_zcb_count == NULL) ||
#endif #endif
(config->gpc_ppc_count == NULL) || (config->gpc_ppc_count == NULL) ||
@@ -143,7 +143,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
config->ppc_count = 0; config->ppc_count = 0;
config->tpc_count = 0; config->tpc_count = 0;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
config->zcb_count = 0; config->zcb_count = 0;
#endif #endif
for (gpc_index = 0; gpc_index < config->gpc_count; gpc_index++) { for (gpc_index = 0; gpc_index < config->gpc_count; gpc_index++) {
@@ -153,7 +153,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
config->tpc_count = nvgpu_safe_add_u32(config->tpc_count, config->tpc_count = nvgpu_safe_add_u32(config->tpc_count,
config->gpc_tpc_count[gpc_index]); config->gpc_tpc_count[gpc_index]);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
config->gpc_zcb_count[gpc_index] = config->gpc_zcb_count[gpc_index] =
g->ops.gr.config.get_zcull_count_in_gpc(g, config, g->ops.gr.config.get_zcull_count_in_gpc(g, config,
gpc_index); gpc_index);
@@ -220,7 +220,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
nvgpu_log_info(g, "max_gpc_count: %d", config->max_gpc_count); nvgpu_log_info(g, "max_gpc_count: %d", config->max_gpc_count);
nvgpu_log_info(g, "max_tpc_per_gpc_count: %d", config->max_tpc_per_gpc_count); nvgpu_log_info(g, "max_tpc_per_gpc_count: %d", config->max_tpc_per_gpc_count);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_log_info(g, "max_zcull_per_gpc_count: %d", config->max_zcull_per_gpc_count); nvgpu_log_info(g, "max_zcull_per_gpc_count: %d", config->max_zcull_per_gpc_count);
#endif #endif
nvgpu_log_info(g, "max_tpc_count: %d", config->max_tpc_count); nvgpu_log_info(g, "max_tpc_count: %d", config->max_tpc_count);
@@ -233,7 +233,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
nvgpu_log_info(g, "gpc_tpc_count[%d] : %d", nvgpu_log_info(g, "gpc_tpc_count[%d] : %d",
gpc_index, config->gpc_tpc_count[gpc_index]); gpc_index, config->gpc_tpc_count[gpc_index]);
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
for (gpc_index = 0; gpc_index < config->gpc_count; gpc_index++) { for (gpc_index = 0; gpc_index < config->gpc_count; gpc_index++) {
nvgpu_log_info(g, "gpc_zcb_count[%d] : %d", nvgpu_log_info(g, "gpc_zcb_count[%d] : %d",
gpc_index, config->gpc_zcb_count[gpc_index]); gpc_index, config->gpc_zcb_count[gpc_index]);
@@ -274,7 +274,7 @@ clean_up:
return NULL; return NULL;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
static u32 prime_set[18] = { static u32 prime_set[18] = {
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61 }; 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61 };
@@ -519,7 +519,7 @@ void nvgpu_gr_config_deinit(struct gk20a *g, struct nvgpu_gr_config *config)
nvgpu_kfree(g, config->gpc_ppc_count); nvgpu_kfree(g, config->gpc_ppc_count);
nvgpu_kfree(g, config->gpc_skip_mask); nvgpu_kfree(g, config->gpc_skip_mask);
nvgpu_kfree(g, config->gpc_tpc_mask); nvgpu_kfree(g, config->gpc_tpc_mask);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_kfree(g, config->gpc_zcb_count); nvgpu_kfree(g, config->gpc_zcb_count);
nvgpu_kfree(g, config->map_tiles); nvgpu_kfree(g, config->map_tiles);
#endif #endif

View File

@@ -58,7 +58,7 @@ struct nvgpu_gr_config {
u32 *pes_tpc_mask[GK20A_GR_MAX_PES_PER_GPC]; u32 *pes_tpc_mask[GK20A_GR_MAX_PES_PER_GPC];
u32 *gpc_skip_mask; u32 *gpc_skip_mask;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
u32 max_zcull_per_gpc_count; u32 max_zcull_per_gpc_count;
u32 zcb_count; u32 zcb_count;
u32 *gpc_zcb_count; u32 *gpc_zcb_count;

View File

@@ -30,11 +30,11 @@
#include <nvgpu/sizes.h> #include <nvgpu/sizes.h>
#include <nvgpu/mm.h> #include <nvgpu/mm.h>
#include <nvgpu/acr.h> #include <nvgpu/acr.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/lsfm.h> #include <nvgpu/pmu/lsfm.h>
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
#endif #endif
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
#include <nvgpu/sec2/lsfm.h> #include <nvgpu/sec2/lsfm.h>
#endif #endif
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
@@ -74,7 +74,7 @@ void nvgpu_gr_falcon_remove_support(struct gk20a *g,
int nvgpu_gr_falcon_bind_fecs_elpg(struct gk20a *g) int nvgpu_gr_falcon_bind_fecs_elpg(struct gk20a *g)
{ {
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
struct nvgpu_pmu *pmu = g->pmu; struct nvgpu_pmu *pmu = g->pmu;
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm; struct vm_gk20a *vm = mm->pmu.vm;
@@ -188,7 +188,7 @@ u32 nvgpu_gr_falcon_get_preempt_image_size(struct nvgpu_gr_falcon *falcon)
return falcon->sizes.preempt_image_size; return falcon->sizes.preempt_image_size;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
u32 nvgpu_gr_falcon_get_zcull_image_size(struct nvgpu_gr_falcon *falcon) u32 nvgpu_gr_falcon_get_zcull_image_size(struct nvgpu_gr_falcon *falcon)
{ {
return falcon->sizes.zcull_image_size; return falcon->sizes.zcull_image_size;
@@ -551,14 +551,14 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
/* this must be recovery so bootstrap fecs and gpccs */ /* this must be recovery so bootstrap fecs and gpccs */
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) { if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
nvgpu_gr_falcon_load_gpccs_with_bootloader(g, falcon); nvgpu_gr_falcon_load_gpccs_with_bootloader(g, falcon);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, g->pmu, err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, g->pmu,
g->pmu->lsfm, BIT32(FALCON_ID_FECS)); g->pmu->lsfm, BIT32(FALCON_ID_FECS));
#endif #endif
} else { } else {
/* bind WPR VA inst block */ /* bind WPR VA inst block */
nvgpu_gr_falcon_bind_instblk(g, falcon); nvgpu_gr_falcon_bind_instblk(g, falcon);
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
err = nvgpu_sec2_bootstrap_ls_falcons(g, err = nvgpu_sec2_bootstrap_ls_falcons(g,
&g->sec2, FALCON_ID_FECS); &g->sec2, FALCON_ID_FECS);
@@ -566,7 +566,7 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
&g->sec2, FALCON_ID_GPCCS); &g->sec2, FALCON_ID_GPCCS);
} else } else
#endif #endif
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (g->support_ls_pmu) { if (g->support_ls_pmu) {
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g,
g->pmu, g->pmu->lsfm, g->pmu, g->pmu->lsfm,
@@ -604,7 +604,7 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
falcon_id_mask |= BIT8(FALCON_ID_GPCCS); falcon_id_mask |= BIT8(FALCON_ID_GPCCS);
} }
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
err = nvgpu_sec2_bootstrap_ls_falcons(g, err = nvgpu_sec2_bootstrap_ls_falcons(g,
&g->sec2, FALCON_ID_FECS); &g->sec2, FALCON_ID_FECS);
@@ -612,7 +612,7 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
&g->sec2, FALCON_ID_GPCCS); &g->sec2, FALCON_ID_GPCCS);
} else } else
#endif #endif
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (g->support_ls_pmu) { if (g->support_ls_pmu) {
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g,
g->pmu, g->pmu->lsfm, g->pmu, g->pmu->lsfm,

View File

@@ -28,7 +28,7 @@
#include <nvgpu/safe_ops.h> #include <nvgpu/safe_ops.h>
#include <nvgpu/error_notifier.h> #include <nvgpu/error_notifier.h>
#include <nvgpu/power_features/pg.h> #include <nvgpu/power_features/pg.h>
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
#include <nvgpu/cyclestats.h> #include <nvgpu/cyclestats.h>
#endif #endif
@@ -132,7 +132,7 @@ static int gr_intr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
return ret; return ret;
} }
#if defined(NVGPU_FEATURE_CHANNEL_TSG_CONTROL) && defined(NVGPU_DEBUGGER) #if defined(CONFIG_NVGPU_CHANNEL_TSG_CONTROL) && defined(CONFIG_NVGPU_DEBUGGER)
static void gr_intr_post_bpt_events(struct gk20a *g, struct nvgpu_tsg *tsg, static void gr_intr_post_bpt_events(struct gk20a *g, struct nvgpu_tsg *tsg,
u32 global_esr) u32 global_esr)
{ {
@@ -351,7 +351,7 @@ int nvgpu_gr_intr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
nvgpu_gr_tpc_offset(g, tpc)); nvgpu_gr_tpc_offset(g, tpc));
u32 global_esr, warp_esr, global_mask; u32 global_esr, warp_esr, global_mask;
u64 hww_warp_esr_pc = 0; u64 hww_warp_esr_pc = 0;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
bool sm_debugger_attached; bool sm_debugger_attached;
bool do_warp_sync = false, early_exit = false, ignore_debugger = false; bool do_warp_sync = false, early_exit = false, ignore_debugger = false;
bool disable_sm_exceptions = true; bool disable_sm_exceptions = true;
@@ -384,7 +384,7 @@ int nvgpu_gr_intr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
nvgpu_safe_cast_u32_to_s32( nvgpu_safe_cast_u32_to_s32(
g->ops.gr.intr.record_sm_error_state(g, gpc, tpc, sm, fault_ch))); g->ops.gr.intr.record_sm_error_state(g, gpc, tpc, sm, fault_ch)));
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
sm_debugger_attached = g->ops.gr.sm_debugger_attached(g); sm_debugger_attached = g->ops.gr.sm_debugger_attached(g);
if (!sm_debugger_attached) { if (!sm_debugger_attached) {
nvgpu_err(g, "sm hww global 0x%08x warp 0x%08x", nvgpu_err(g, "sm hww global 0x%08x warp 0x%08x",
@@ -495,7 +495,7 @@ int nvgpu_gr_intr_handle_fecs_error(struct gk20a *g, struct nvgpu_channel *ch,
} else if (fecs_host_intr.ctxsw_intr0 != 0U) { } else if (fecs_host_intr.ctxsw_intr0 != 0U) {
mailbox_value = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g, mailbox_value = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
mailbox_id); mailbox_id);
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
if (mailbox_value == if (mailbox_value ==
g->ops.gr.fecs_trace.get_buffer_full_mailbox_val()) { g->ops.gr.fecs_trace.get_buffer_full_mailbox_val()) {
nvgpu_info(g, "ctxsw intr0 set by ucode, " nvgpu_info(g, "ctxsw intr0 set by ucode, "
@@ -632,7 +632,7 @@ void nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
nvgpu_cyclestats_exec(g, ch, isr_data->data_lo); nvgpu_cyclestats_exec(g, ch, isr_data->data_lo);
#endif #endif
@@ -656,7 +656,7 @@ void nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g,
if (tsg != NULL) { if (tsg != NULL) {
int err; int err;
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
g->ops.tsg.post_event_id(tsg, g->ops.tsg.post_event_id(tsg,
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN); NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
#endif #endif
@@ -811,7 +811,7 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
need_reset = true; need_reset = true;
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
/* signal clients waiting on an event */ /* signal clients waiting on an event */
if (g->ops.gr.sm_debugger_attached(g) && if (g->ops.gr.sm_debugger_attached(g) &&
post_event && (fault_ch != NULL)) { post_event && (fault_ch != NULL)) {
@@ -853,7 +853,7 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
/* Enable fifo access */ /* Enable fifo access */
g->ops.gr.init.fifo_access(g, true); g->ops.gr.init.fifo_access(g, true);
#if defined(NVGPU_FEATURE_CHANNEL_TSG_CONTROL) && defined(NVGPU_DEBUGGER) #if defined(CONFIG_NVGPU_CHANNEL_TSG_CONTROL) && defined(CONFIG_NVGPU_DEBUGGER)
/* Posting of BPT events should be the last thing in this function */ /* Posting of BPT events should be the last thing in this function */
if ((global_esr != 0U) && (tsg != NULL) && (need_reset == false)) { if ((global_esr != 0U) && (tsg != NULL) && (need_reset == false)) {
gr_intr_post_bpt_events(g, tsg, global_esr); gr_intr_post_bpt_events(g, tsg, global_esr);

View File

@@ -30,7 +30,7 @@ struct nvgpu_gr_ctx_desc;
struct nvgpu_gr_global_ctx_buffer_desc; struct nvgpu_gr_global_ctx_buffer_desc;
struct nvgpu_gr_obj_ctx_golden_image; struct nvgpu_gr_obj_ctx_golden_image;
struct nvgpu_gr_config; struct nvgpu_gr_config;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
struct nvgpu_gr_zbc; struct nvgpu_gr_zbc;
struct nvgpu_gr_zcull; struct nvgpu_gr_zcull;
#endif #endif
@@ -51,11 +51,11 @@ struct nvgpu_gr {
struct nvgpu_gr_config *config; struct nvgpu_gr_config *config;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
struct nvgpu_gr_hwpm_map *hwpm_map; struct nvgpu_gr_hwpm_map *hwpm_map;
#endif #endif
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
struct nvgpu_gr_zcull *zcull; struct nvgpu_gr_zcull *zcull;
struct nvgpu_gr_zbc *zbc; struct nvgpu_gr_zbc *zbc;

View File

@@ -25,7 +25,7 @@
#include <nvgpu/gr/ctx.h> #include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/subctx.h> #include <nvgpu/gr/subctx.h>
#include <nvgpu/gr/obj_ctx.h> #include <nvgpu/gr/obj_ctx.h>
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
#include <nvgpu/gr/zcull.h> #include <nvgpu/gr/zcull.h>
#endif #endif
#include <nvgpu/gr/setup.h> #include <nvgpu/gr/setup.h>
@@ -34,7 +34,7 @@
#include "gr_priv.h" #include "gr_priv.h"
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
static int nvgpu_gr_setup_zcull(struct gk20a *g, struct nvgpu_channel *c, static int nvgpu_gr_setup_zcull(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_gr_ctx *gr_ctx) struct nvgpu_gr_ctx *gr_ctx)
{ {
@@ -166,7 +166,7 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
c->subctx, nvgpu_gr_ctx_get_ctx_mem(gr_ctx)->gpu_va); c->subctx, nvgpu_gr_ctx_get_ctx_mem(gr_ctx)->gpu_va);
} }
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
if (g->ops.gr.fecs_trace.bind_channel && !c->vpr) { if (g->ops.gr.fecs_trace.bind_channel && !c->vpr) {
err = g->ops.gr.fecs_trace.bind_channel(g, &c->inst_block, err = g->ops.gr.fecs_trace.bind_channel(g, &c->inst_block,
c->subctx, gr_ctx, tsg->tgid, 0); c->subctx, gr_ctx, tsg->tgid, 0);
@@ -222,7 +222,7 @@ void nvgpu_gr_setup_free_subctx(struct nvgpu_channel *c)
} }
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch, int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
u32 graphics_preempt_mode, u32 graphics_preempt_mode,
u32 compute_preempt_mode) u32 compute_preempt_mode)

View File

@@ -49,7 +49,7 @@ void nvgpu_gr_reset_golden_image_ptr(struct gk20a *g)
g->gr->golden_image = NULL; g->gr->golden_image = NULL;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
struct nvgpu_gr_zcull *nvgpu_gr_get_zcull_ptr(struct gk20a *g) struct nvgpu_gr_zcull *nvgpu_gr_get_zcull_ptr(struct gk20a *g)
{ {
return g->gr->zcull; return g->gr->zcull;
@@ -66,7 +66,7 @@ struct nvgpu_gr_config *nvgpu_gr_get_config_ptr(struct gk20a *g)
return g->gr->config; return g->gr->config;
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
struct nvgpu_gr_hwpm_map *nvgpu_gr_get_hwpm_map_ptr(struct gk20a *g) struct nvgpu_gr_hwpm_map *nvgpu_gr_get_hwpm_map_ptr(struct gk20a *g)
{ {
return g->gr->hwpm_map; return g->gr->hwpm_map;
@@ -78,7 +78,7 @@ struct nvgpu_gr_intr *nvgpu_gr_get_intr_ptr(struct gk20a *g)
return g->gr->intr; return g->gr->intr;
} }
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
struct nvgpu_gr_global_ctx_buffer_desc *nvgpu_gr_get_global_ctx_buffer_ptr( struct nvgpu_gr_global_ctx_buffer_desc *nvgpu_gr_get_global_ctx_buffer_ptr(
struct gk20a *g) struct gk20a *g)
{ {

View File

@@ -24,7 +24,7 @@
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/mm.h> #include <nvgpu/mm.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
#endif #endif
#include <nvgpu/gr/ctx.h> #include <nvgpu/gr/ctx.h>
@@ -507,7 +507,7 @@ restore_fe_go_idle:
goto clean_up; goto clean_up;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_ctx_init_zcull(g, gr_ctx); err = nvgpu_gr_ctx_init_zcull(g, gr_ctx);
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
@@ -531,7 +531,7 @@ restore_fe_go_idle:
} }
golden_image->ready = true; golden_image->ready = true;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
nvgpu_pmu_set_golden_image_initialized(g, true); nvgpu_pmu_set_golden_image_initialized(g, true);
#endif #endif
g->ops.gr.falcon.set_current_ctx_invalid(g); g->ops.gr.falcon.set_current_ctx_invalid(g);
@@ -726,7 +726,7 @@ void nvgpu_gr_obj_ctx_deinit(struct gk20a *g,
golden_image->local_golden_image); golden_image->local_golden_image);
golden_image->local_golden_image = NULL; golden_image->local_golden_image = NULL;
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
nvgpu_pmu_set_golden_image_initialized(g, false); nvgpu_pmu_set_golden_image_initialized(g, false);
#endif #endif
golden_image->ready = false; golden_image->ready = false;

View File

@@ -104,7 +104,7 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
g->ops.gr.ctxsw_prog.set_pm_ptr(g, ctxheader, g->ops.gr.ctxsw_prog.set_pm_ptr(g, ctxheader,
nvgpu_gr_ctx_get_pm_ctx_mem(gr_ctx)->gpu_va); nvgpu_gr_ctx_get_pm_ctx_mem(gr_ctx)->gpu_va);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
g->ops.gr.ctxsw_prog.set_zcull_ptr(g, ctxheader, g->ops.gr.ctxsw_prog.set_zcull_ptr(g, ctxheader,
nvgpu_gr_ctx_get_zcull_ctx_va(gr_ctx)); nvgpu_gr_ctx_get_zcull_ctx_va(gr_ctx));
#endif #endif
@@ -114,7 +114,7 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
g->ops.gr.ctxsw_prog.set_type_per_veid_header(g, ctxheader); g->ops.gr.ctxsw_prog.set_type_per_veid_header(g, ctxheader);
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
void nvgpu_gr_subctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_subctx *subctx, void nvgpu_gr_subctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
struct nvgpu_gr_ctx *gr_ctx) struct nvgpu_gr_ctx *gr_ctx)
{ {

View File

@@ -25,7 +25,7 @@
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/string.h> #include <nvgpu/string.h>
#include <nvgpu/power_features/pg.h> #include <nvgpu/power_features/pg.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
#endif #endif
@@ -39,7 +39,7 @@ static int nvgpu_gr_zbc_add(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
u32 i; u32 i;
int ret = -ENOSPC; int ret = -ENOSPC;
bool added = false; bool added = false;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 entries; u32 entries;
#endif #endif
@@ -134,7 +134,7 @@ static int nvgpu_gr_zbc_add(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
goto err_mutex; goto err_mutex;
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (!added && ret == 0) { if (!added && ret == 0) {
/* update zbc for elpg only when new entry is added */ /* update zbc for elpg only when new entry is added */
entries = max(zbc->max_used_color_index, entries = max(zbc->max_used_color_index,

View File

@@ -48,7 +48,7 @@
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>
#include <trace/events/gk20a.h> #include <trace/events/gk20a.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/pmu_pstate.h> #include <nvgpu/pmu/pmu_pstate.h>
#endif #endif
@@ -100,14 +100,14 @@ int gk20a_prepare_poweroff(struct gk20a *g)
} }
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
/* disable elpg before gr or fifo suspend */ /* disable elpg before gr or fifo suspend */
if (g->support_ls_pmu) { if (g->support_ls_pmu) {
ret = nvgpu_pmu_destroy(g, g->pmu); ret = nvgpu_pmu_destroy(g, g->pmu);
} }
#endif #endif
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
tmp_ret = nvgpu_sec2_destroy(g); tmp_ret = nvgpu_sec2_destroy(g);
if ((tmp_ret != 0) && (ret == 0)) { if ((tmp_ret != 0) && (ret == 0)) {
@@ -134,11 +134,11 @@ int gk20a_prepare_poweroff(struct gk20a *g)
nvgpu_falcon_sw_free(g, FALCON_ID_SEC2); nvgpu_falcon_sw_free(g, FALCON_ID_SEC2);
nvgpu_falcon_sw_free(g, FALCON_ID_PMU); nvgpu_falcon_sw_free(g, FALCON_ID_PMU);
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
nvgpu_ce_suspend(g); nvgpu_ce_suspend(g);
#endif #endif
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
/* deinit the bios */ /* deinit the bios */
nvgpu_bios_sw_deinit(g, g->bios); nvgpu_bios_sw_deinit(g, g->bios);
#endif #endif
@@ -197,7 +197,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
goto exit; goto exit;
} }
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
err = nvgpu_falcon_sw_init(g, FALCON_ID_SEC2); err = nvgpu_falcon_sw_init(g, FALCON_ID_SEC2);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to sw init FALCON_ID_SEC2"); nvgpu_err(g, "failed to sw init FALCON_ID_SEC2");
@@ -208,7 +208,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
err = nvgpu_falcon_sw_init(g, FALCON_ID_NVDEC); err = nvgpu_falcon_sw_init(g, FALCON_ID_NVDEC);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to sw init FALCON_ID_NVDEC"); nvgpu_err(g, "failed to sw init FALCON_ID_NVDEC");
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
goto done_sec2; goto done_sec2;
#else #else
goto done_pmu; goto done_pmu;
@@ -231,7 +231,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
goto done; goto done;
} }
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
err = nvgpu_init_sec2_setup_sw(g, &g->sec2); err = nvgpu_init_sec2_setup_sw(g, &g->sec2);
if (err != 0) { if (err != 0) {
@@ -249,7 +249,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
} }
} }
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
err = nvgpu_bios_sw_init(g, &g->bios); err = nvgpu_bios_sw_init(g, &g->bios);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "BIOS SW init failed %d", err); nvgpu_err(g, "BIOS SW init failed %d", err);
@@ -350,7 +350,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
nvgpu_mutex_acquire(&g->tpc_pg_lock); nvgpu_mutex_acquire(&g->tpc_pg_lock);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
if (g->can_tpc_powergate) { if (g->can_tpc_powergate) {
if (g->ops.gr.powergate_tpc != NULL) { if (g->ops.gr.powergate_tpc != NULL) {
g->ops.gr.powergate_tpc(g); g->ops.gr.powergate_tpc(g);
@@ -382,7 +382,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
} }
} }
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
err = nvgpu_init_sec2_support(g); err = nvgpu_init_sec2_support(g);
if (err != 0) { if (err != 0) {
@@ -393,7 +393,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
} }
#endif #endif
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
err = nvgpu_pmu_init(g, g->pmu); err = nvgpu_pmu_init(g, g->pmu);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to init gk20a pmu"); nvgpu_err(g, "failed to init gk20a pmu");
@@ -425,7 +425,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
nvgpu_mutex_release(&g->tpc_pg_lock); nvgpu_mutex_release(&g->tpc_pg_lock);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) {
err = nvgpu_pmu_pstate_sw_setup(g); err = nvgpu_pmu_pstate_sw_setup(g);
if (err != 0) { if (err != 0) {
@@ -475,7 +475,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
/* Restore the debug setting */ /* Restore the debug setting */
g->ops.fb.set_debug_mode(g, g->mmu_debug_ctrl); g->ops.fb.set_debug_mode(g, g->mmu_debug_ctrl);
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
err = nvgpu_ce_init_support(g); err = nvgpu_ce_init_support(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to init ce"); nvgpu_err(g, "failed to init ce");
@@ -529,7 +529,7 @@ done_gsp:
nvgpu_falcon_sw_free(g, FALCON_ID_GSPLITE); nvgpu_falcon_sw_free(g, FALCON_ID_GSPLITE);
done_nvdec: done_nvdec:
nvgpu_falcon_sw_free(g, FALCON_ID_NVDEC); nvgpu_falcon_sw_free(g, FALCON_ID_NVDEC);
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
done_sec2: done_sec2:
nvgpu_falcon_sw_free(g, FALCON_ID_SEC2); nvgpu_falcon_sw_free(g, FALCON_ID_SEC2);
#endif #endif
@@ -630,7 +630,7 @@ void gk20a_init_gpu_characteristics(struct gk20a *g)
g->ops.gr.init.detect_sm_arch(g); g->ops.gr.init.detect_sm_arch(g);
#ifdef CONFIG_GK20A_CYCLE_STATS #ifdef CONFIG_NVGPU_CYCLESTATS
if (g->ops.gr.init_cyclestats != NULL) { if (g->ops.gr.init_cyclestats != NULL) {
g->ops.gr.init_cyclestats(g); g->ops.gr.init_cyclestats(g);
} }
@@ -652,7 +652,7 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount)
nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!"); nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!");
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
nvgpu_ce_destroy(g); nvgpu_ce_destroy(g);
#endif #endif

View File

@@ -109,7 +109,7 @@ static int nvgpu_alloc_sysmem_flush(struct gk20a *g)
return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush); return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush);
} }
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm) static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm)
{ {
struct gk20a *g = gk20a_from_mm(mm); struct gk20a *g = gk20a_from_mm(mm);
@@ -292,7 +292,7 @@ static int nvgpu_init_mmu_debug(struct mm_gk20a *mm)
return -ENOMEM; return -ENOMEM;
} }
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
void nvgpu_init_mm_ce_context(struct gk20a *g) void nvgpu_init_mm_ce_context(struct gk20a *g)
{ {
#if defined(CONFIG_GK20A_VIDMEM) #if defined(CONFIG_GK20A_VIDMEM)
@@ -509,7 +509,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
} }
mm->remove_support = nvgpu_remove_mm_support; mm->remove_support = nvgpu_remove_mm_support;
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
mm->remove_ce_support = nvgpu_remove_mm_ce_support; mm->remove_ce_support = nvgpu_remove_mm_ce_support;
#endif #endif

View File

@@ -103,7 +103,7 @@ static int nvgpu_vidmem_do_clear_all(struct gk20a *g)
vidmem_dbg(g, "Clearing all VIDMEM:"); vidmem_dbg(g, "Clearing all VIDMEM:");
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
err = nvgpu_ce_execute_ops(g, err = nvgpu_ce_execute_ops(g,
mm->vidmem.ce_ctx_id, mm->vidmem.ce_ctx_id,
0, 0,
@@ -462,7 +462,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
nvgpu_fence_put(last_fence); nvgpu_fence_put(last_fence);
} }
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
err = nvgpu_ce_execute_ops(g, err = nvgpu_ce_execute_ops(g,
g->mm.vidmem.ce_ctx_id, g->mm.vidmem.ce_ctx_id,
0, 0,
@@ -479,7 +479,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
#endif #endif
if (err != 0) { if (err != 0) {
#ifdef NVGPU_FEATURE_CE #ifdef CONFIG_NVGPU_CE
nvgpu_err(g, nvgpu_err(g,
"Failed nvgpu_ce_execute_ops[%d]", err); "Failed nvgpu_ce_execute_ops[%d]", err);
#endif #endif

View File

@@ -267,7 +267,7 @@ static int nvgpu_netlist_init_ctx_vars_fw(struct gk20a *g)
goto clean_up; goto clean_up;
} }
break; break;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
case NETLIST_REGIONID_CTXREG_ZCULL_GPC: case NETLIST_REGIONID_CTXREG_ZCULL_GPC:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC");
err = nvgpu_netlist_alloc_load_aiv_list(g, err = nvgpu_netlist_alloc_load_aiv_list(g,
@@ -484,7 +484,7 @@ clean_up:
nvgpu_kfree(g, netlist_vars->ctxsw_regs.sys.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.sys.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.gpc.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.gpc.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.tpc.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.tpc.l);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_kfree(g, netlist_vars->ctxsw_regs.zcull_gpc.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.zcull_gpc.l);
#endif #endif
nvgpu_kfree(g, netlist_vars->ctxsw_regs.ppc.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.ppc.l);
@@ -556,7 +556,7 @@ void nvgpu_netlist_deinit_ctx_vars(struct gk20a *g)
nvgpu_kfree(g, netlist_vars->ctxsw_regs.sys.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.sys.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.gpc.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.gpc.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.tpc.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.tpc.l);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_kfree(g, netlist_vars->ctxsw_regs.zcull_gpc.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.zcull_gpc.l);
#endif #endif
nvgpu_kfree(g, netlist_vars->ctxsw_regs.ppc.l); nvgpu_kfree(g, netlist_vars->ctxsw_regs.ppc.l);
@@ -708,7 +708,7 @@ struct netlist_aiv_list *nvgpu_netlist_get_tpc_ctxsw_regs(struct gk20a *g)
return &g->netlist_vars->ctxsw_regs.tpc; return &g->netlist_vars->ctxsw_regs.tpc;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_aiv_list *nvgpu_netlist_get_zcull_gpc_ctxsw_regs(struct gk20a *g) struct netlist_aiv_list *nvgpu_netlist_get_zcull_gpc_ctxsw_regs(struct gk20a *g)
{ {
return &g->netlist_vars->ctxsw_regs.zcull_gpc; return &g->netlist_vars->ctxsw_regs.zcull_gpc;

View File

@@ -109,7 +109,7 @@ struct nvgpu_netlist_vars {
struct netlist_aiv_list sys; struct netlist_aiv_list sys;
struct netlist_aiv_list gpc; struct netlist_aiv_list gpc;
struct netlist_aiv_list tpc; struct netlist_aiv_list tpc;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_aiv_list zcull_gpc; struct netlist_aiv_list zcull_gpc;
#endif #endif
struct netlist_aiv_list ppc; struct netlist_aiv_list ppc;

View File

@@ -23,7 +23,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/nvlink_device_reginit.h> #include <nvgpu/nvlink_device_reginit.h>
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
int nvgpu_nvlink_reg_init(struct gk20a *g) int nvgpu_nvlink_reg_init(struct gk20a *g)
{ {
int err; int err;

View File

@@ -25,7 +25,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include "device_reginit_gv100.h" #include "device_reginit_gv100.h"
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
struct nvlink_reginit { struct nvlink_reginit {
u32 addr; u32 addr;
u32 value; u32 value;
@@ -177,4 +177,4 @@ int gv100_nvlink_reg_init(struct gk20a *g)
} }
return 0; return 0;
} }
#endif /* CONFIG_TEGRA_NVLINK */ #endif /* CONFIG_NVGPU_NVLINK */

View File

@@ -20,7 +20,7 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
@@ -508,4 +508,4 @@ void gv100_nvlink_isr(struct gk20a *g)
return; return;
} }
#endif /* CONFIG_TEGRA_NVLINK */ #endif /* CONFIG_NVGPU_NVLINK */

View File

@@ -24,7 +24,7 @@
#include <nvgpu/nvlink.h> #include <nvgpu/nvlink.h>
#include <nvgpu/nvlink_link_mode_transitions.h> #include <nvgpu/nvlink_link_mode_transitions.h>
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
/* /*
* WAR: use this function to find detault link, as only one is supported * WAR: use this function to find detault link, as only one is supported
* on the library for now * on the library for now

View File

@@ -25,7 +25,7 @@
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
#include <nvgpu/nvlink_minion.h> #include <nvgpu/nvlink_minion.h>
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
/* Extract a WORD from the MINION ucode */ /* Extract a WORD from the MINION ucode */
u32 nvgpu_nvlink_minion_extract_word(struct nvgpu_firmware *fw, u32 idx) u32 nvgpu_nvlink_minion_extract_word(struct nvgpu_firmware *fw, u32 idx)

View File

@@ -25,7 +25,7 @@
#include <nvgpu/nvlink_probe.h> #include <nvgpu/nvlink_probe.h>
#include <nvgpu/enabled.h> #include <nvgpu/enabled.h>
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
int nvgpu_nvlink_speed_config(struct gk20a *g) int nvgpu_nvlink_speed_config(struct gk20a *g)
{ {
@@ -82,7 +82,7 @@ int nvgpu_nvlink_dev_shutdown(struct gk20a *g)
int nvgpu_nvlink_remove(struct gk20a *g) int nvgpu_nvlink_remove(struct gk20a *g)
{ {
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
int err; int err;
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_NVLINK)) { if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_NVLINK)) {

View File

@@ -20,7 +20,7 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
#include <nvgpu/nvgpu_common.h> #include <nvgpu/nvgpu_common.h>
#include <nvgpu/nvlink_bios.h> #include <nvgpu/nvlink_bios.h>
@@ -915,4 +915,4 @@ int gv100_nvlink_speed_config(struct gk20a *g)
return 0; return 0;
} }
#endif /* CONFIG_TEGRA_NVLINK */ #endif /* CONFIG_NVGPU_NVLINK */

View File

@@ -20,7 +20,7 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
#include <nvgpu/nvgpu_common.h> #include <nvgpu/nvgpu_common.h>
#include <nvgpu/bitops.h> #include <nvgpu/bitops.h>
@@ -122,4 +122,4 @@ int tu104_nvlink_speed_config(struct gk20a *g)
return ret; return ret;
} }
#endif /* CONFIG_TEGRA_NVLINK */ #endif /* CONFIG_NVGPU_NVLINK */

View File

@@ -26,7 +26,7 @@
int nvgpu_nvlink_probe(struct gk20a *g) int nvgpu_nvlink_probe(struct gk20a *g)
{ {
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
int err; int err;
err = nvgpu_nvlink_setup_ndev(g); err = nvgpu_nvlink_setup_ndev(g);

View File

@@ -29,7 +29,7 @@
#include "lsfm_sw_gm20b.h" #include "lsfm_sw_gm20b.h"
#include "lsfm_sw_gp10b.h" #include "lsfm_sw_gp10b.h"
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
#include "lsfm_sw_gv100.h" #include "lsfm_sw_gv100.h"
#include "lsfm_sw_tu104.h" #include "lsfm_sw_tu104.h"
#endif #endif
@@ -153,7 +153,7 @@ int nvgpu_pmu_lsfm_init(struct gk20a *g, struct nvgpu_pmu_lsfm **lsfm)
case NVGPU_GPUID_GV11B: case NVGPU_GPUID_GV11B:
nvgpu_gp10b_lsfm_sw_init(g, *lsfm); nvgpu_gp10b_lsfm_sw_init(g, *lsfm);
break; break;
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_GV100: case NVGPU_GPUID_GV100:
nvgpu_gv100_lsfm_sw_init(g, *lsfm); nvgpu_gv100_lsfm_sw_init(g, *lsfm);
break; break;

View File

@@ -30,7 +30,7 @@
#include <nvgpu/boardobj.h> #include <nvgpu/boardobj.h>
#include <nvgpu/boardobjgrp.h> #include <nvgpu/boardobjgrp.h>
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
#include <nvgpu/pmu/mutex.h> #include <nvgpu/pmu/mutex.h>
#include <nvgpu/pmu/seq.h> #include <nvgpu/pmu/seq.h>
@@ -43,11 +43,11 @@
#include <nvgpu/pmu/pmu_pstate.h> #include <nvgpu/pmu/pmu_pstate.h>
#endif #endif
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
#include <nvgpu/sec2/lsfm.h> #include <nvgpu/sec2/lsfm.h>
#endif #endif
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
/* PMU locks used to sync with PMU-RTOS */ /* PMU locks used to sync with PMU-RTOS */
int nvgpu_pmu_lock_acquire(struct gk20a *g, struct nvgpu_pmu *pmu, int nvgpu_pmu_lock_acquire(struct gk20a *g, struct nvgpu_pmu *pmu,
u32 id, u32 *token) u32 id, u32 *token)
@@ -227,7 +227,7 @@ int nvgpu_pmu_init(struct gk20a *g, struct nvgpu_pmu *pmu)
} }
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
#ifdef NVGPU_DGPU_SUPPORT #ifdef CONFIG_NVGPU_DGPU
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
/* Reset PMU engine */ /* Reset PMU engine */
err = nvgpu_falcon_reset(g->pmu->flcn); err = nvgpu_falcon_reset(g->pmu->flcn);
@@ -323,7 +323,7 @@ int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu **pmu_p)
nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, false); nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, false);
goto exit; goto exit;
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
nvgpu_mutex_init(&pmu->isr_mutex); nvgpu_mutex_init(&pmu->isr_mutex);
/* Allocate memory for pmu_perfmon */ /* Allocate memory for pmu_perfmon */
@@ -380,7 +380,7 @@ exit:
void nvgpu_pmu_remove_support(struct gk20a *g, struct nvgpu_pmu *pmu) void nvgpu_pmu_remove_support(struct gk20a *g, struct nvgpu_pmu *pmu)
{ {
if(pmu != NULL) { if(pmu != NULL) {
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (pmu->remove_support != NULL) { if (pmu->remove_support != NULL) {
pmu->remove_support(g->pmu); pmu->remove_support(g->pmu);
} }
@@ -440,7 +440,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
if (!enable) { if (!enable) {
if (!g->ops.pmu.is_engine_in_reset(g)) { if (!g->ops.pmu.is_engine_in_reset(g)) {
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
g->ops.pmu.pmu_enable_irq(pmu, false); g->ops.pmu.pmu_enable_irq(pmu, false);
#endif #endif
pmu_enable_hw(pmu, false); pmu_enable_hw(pmu, false);

View File

@@ -21,7 +21,7 @@
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
#endif #endif
@@ -42,7 +42,7 @@ bool nvgpu_pg_elpg_is_enabled(struct gk20a *g)
int nvgpu_pg_elpg_enable(struct gk20a *g) int nvgpu_pg_elpg_enable(struct gk20a *g)
{ {
int err = 0; int err = 0;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (!g->can_elpg) { if (!g->can_elpg) {
@@ -63,7 +63,7 @@ int nvgpu_pg_elpg_enable(struct gk20a *g)
int nvgpu_pg_elpg_disable(struct gk20a *g) int nvgpu_pg_elpg_disable(struct gk20a *g)
{ {
int err = 0; int err = 0;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (!g->can_elpg) { if (!g->can_elpg) {
@@ -109,7 +109,7 @@ int nvgpu_pg_elpg_set_elpg_enabled(struct gk20a *g, bool enable)
if (!change_mode) { if (!change_mode) {
goto done; goto done;
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
err = nvgpu_pmu_pg_global_enable(g, enable); err = nvgpu_pmu_pg_global_enable(g, enable);
#endif #endif
done: done:

View File

@@ -33,7 +33,7 @@ int nvgpu_cg_pg_disable(struct gk20a *g)
g->ops.gr.init.wait_initialized(g); g->ops.gr.init.wait_initialized(g);
#ifdef NVGPU_FEATURE_POWER_PG #ifdef CONFIG_NVGPU_POWER_PG
/* disable elpg before clock gating */ /* disable elpg before clock gating */
err = nvgpu_pg_elpg_disable(g); err = nvgpu_pg_elpg_disable(g);
if (err != 0) { if (err != 0) {
@@ -63,7 +63,7 @@ int nvgpu_cg_pg_enable(struct gk20a *g)
nvgpu_cg_slcg_gr_perf_ltc_load_enable(g); nvgpu_cg_slcg_gr_perf_ltc_load_enable(g);
#ifdef NVGPU_FEATURE_POWER_PG #ifdef CONFIG_NVGPU_POWER_PG
err = nvgpu_pg_elpg_enable(g); err = nvgpu_pg_elpg_enable(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to set enable elpg"); nvgpu_err(g, "failed to set enable elpg");

View File

@@ -63,7 +63,7 @@ void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
nvgpu_tsg_set_error_notifier(g, tsg, nvgpu_tsg_set_error_notifier(g, tsg,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
#ifdef NVGPU_CHANNEL_WDT #ifdef CONFIG_NVGPU_CHANNEL_WDT
/* /*
* Cancel all channels' wdt since ctxsw timeout might * Cancel all channels' wdt since ctxsw timeout might
* trigger multiple watchdogs at a time * trigger multiple watchdogs at a time
@@ -194,7 +194,7 @@ void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct nvgpu_tsg *tsg,
* changing until engine status is checked to make sure tsg * changing until engine status is checked to make sure tsg
* being recovered is not loaded on the engines * being recovered is not loaded on the engines
*/ */
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
err = g->ops.gr.disable_ctxsw(g); err = g->ops.gr.disable_ctxsw(g);
#endif #endif
@@ -206,7 +206,7 @@ void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct nvgpu_tsg *tsg,
eng_bitmask = g->ops.engine.get_mask_on_id(g, eng_bitmask = g->ops.engine.get_mask_on_id(g,
tsg->tsgid, true); tsg->tsgid, true);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
/* /*
* it is ok to enable ctxsw before tsg is recovered. If engines * it is ok to enable ctxsw before tsg is recovered. If engines
* is 0, no engine recovery is needed and if it is non zero, * is 0, no engine recovery is needed and if it is non zero,

View File

@@ -40,7 +40,7 @@ int nvgpu_init_sim_netlist_ctx_vars(struct gk20a *g)
struct netlist_aiv_list *sys_ctxsw_regs; struct netlist_aiv_list *sys_ctxsw_regs;
struct netlist_aiv_list *gpc_ctxsw_regs; struct netlist_aiv_list *gpc_ctxsw_regs;
struct netlist_aiv_list *tpc_ctxsw_regs; struct netlist_aiv_list *tpc_ctxsw_regs;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_aiv_list *zcull_gpc_ctxsw_regs; struct netlist_aiv_list *zcull_gpc_ctxsw_regs;
#endif #endif
struct netlist_aiv_list *pm_sys_ctxsw_regs; struct netlist_aiv_list *pm_sys_ctxsw_regs;
@@ -73,7 +73,7 @@ int nvgpu_init_sim_netlist_ctx_vars(struct gk20a *g)
sys_ctxsw_regs = nvgpu_netlist_get_sys_ctxsw_regs(g); sys_ctxsw_regs = nvgpu_netlist_get_sys_ctxsw_regs(g);
gpc_ctxsw_regs = nvgpu_netlist_get_gpc_ctxsw_regs(g); gpc_ctxsw_regs = nvgpu_netlist_get_gpc_ctxsw_regs(g);
tpc_ctxsw_regs = nvgpu_netlist_get_tpc_ctxsw_regs(g); tpc_ctxsw_regs = nvgpu_netlist_get_tpc_ctxsw_regs(g);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
zcull_gpc_ctxsw_regs = nvgpu_netlist_get_zcull_gpc_ctxsw_regs(g); zcull_gpc_ctxsw_regs = nvgpu_netlist_get_zcull_gpc_ctxsw_regs(g);
#endif #endif
pm_sys_ctxsw_regs = nvgpu_netlist_get_pm_sys_ctxsw_regs(g); pm_sys_ctxsw_regs = nvgpu_netlist_get_pm_sys_ctxsw_regs(g);
@@ -121,7 +121,7 @@ int nvgpu_init_sim_netlist_ctx_vars(struct gk20a *g)
&gpc_ctxsw_regs->count); &gpc_ctxsw_regs->count);
g->sim->esc_readl(g, "GRCTX_REG_LIST_TPC_COUNT", 0, g->sim->esc_readl(g, "GRCTX_REG_LIST_TPC_COUNT", 0,
&tpc_ctxsw_regs->count); &tpc_ctxsw_regs->count);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
g->sim->esc_readl(g, "GRCTX_REG_LIST_ZCULL_GPC_COUNT", 0, g->sim->esc_readl(g, "GRCTX_REG_LIST_ZCULL_GPC_COUNT", 0,
&zcull_gpc_ctxsw_regs->count); &zcull_gpc_ctxsw_regs->count);
#endif #endif
@@ -177,7 +177,7 @@ int nvgpu_init_sim_netlist_ctx_vars(struct gk20a *g)
if (nvgpu_netlist_alloc_aiv_list(g, tpc_ctxsw_regs) == NULL) { if (nvgpu_netlist_alloc_aiv_list(g, tpc_ctxsw_regs) == NULL) {
goto fail; goto fail;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
if (nvgpu_netlist_alloc_aiv_list(g, zcull_gpc_ctxsw_regs) == NULL) { if (nvgpu_netlist_alloc_aiv_list(g, zcull_gpc_ctxsw_regs) == NULL) {
goto fail; goto fail;
} }
@@ -312,7 +312,7 @@ int nvgpu_init_sim_netlist_ctx_vars(struct gk20a *g)
i, &l[i].value); i, &l[i].value);
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
for (i = 0; i < zcull_gpc_ctxsw_regs->count; i++) { for (i = 0; i < zcull_gpc_ctxsw_regs->count; i++) {
struct netlist_aiv *l = zcull_gpc_ctxsw_regs->l; struct netlist_aiv *l = zcull_gpc_ctxsw_regs->l;
g->sim->esc_readl(g, "GRCTX_REG_LIST_ZCULL_GPC:ADDR", g->sim->esc_readl(g, "GRCTX_REG_LIST_ZCULL_GPC:ADDR",
@@ -392,7 +392,7 @@ fail:
nvgpu_kfree(g, sys_ctxsw_regs->l); nvgpu_kfree(g, sys_ctxsw_regs->l);
nvgpu_kfree(g, gpc_ctxsw_regs->l); nvgpu_kfree(g, gpc_ctxsw_regs->l);
nvgpu_kfree(g, tpc_ctxsw_regs->l); nvgpu_kfree(g, tpc_ctxsw_regs->l);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_kfree(g, zcull_gpc_ctxsw_regs->l); nvgpu_kfree(g, zcull_gpc_ctxsw_regs->l);
#endif #endif
nvgpu_kfree(g, ppc_ctxsw_regs->l); nvgpu_kfree(g, ppc_ctxsw_regs->l);

View File

@@ -31,7 +31,7 @@
int vgpu_userd_setup_sw(struct gk20a *g) int vgpu_userd_setup_sw(struct gk20a *g)
{ {
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
f->userd_entry_size = g->ops.userd.entry_size(g); f->userd_entry_size = g->ops.userd.entry_size(g);
@@ -44,7 +44,7 @@ int vgpu_userd_setup_sw(struct gk20a *g)
void vgpu_userd_cleanup_sw(struct gk20a *g) void vgpu_userd_cleanup_sw(struct gk20a *g)
{ {
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
nvgpu_userd_free_slabs(g); nvgpu_userd_free_slabs(g);
#endif #endif
} }

View File

@@ -103,10 +103,10 @@
static const struct gpu_ops vgpu_gp10b_ops = { static const struct gpu_ops vgpu_gp10b_ops = {
.ltc = { .ltc = {
.determine_L2_size_bytes = vgpu_determine_L2_size_bytes, .determine_L2_size_bytes = vgpu_determine_L2_size_bytes,
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.set_zbc_color_entry = NULL, .set_zbc_color_entry = NULL,
.set_zbc_depth_entry = NULL, .set_zbc_depth_entry = NULL,
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
.init_fs_state = vgpu_ltc_init_fs_state, .init_fs_state = vgpu_ltc_init_fs_state,
.flush = NULL, .flush = NULL,
.set_enabled = NULL, .set_enabled = NULL,
@@ -133,7 +133,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_num_pce = vgpu_ce_get_num_pce, .get_num_pce = vgpu_ce_get_num_pce,
}, },
.gr = { .gr = {
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.set_alpha_circular_buffer_size = NULL, .set_alpha_circular_buffer_size = NULL,
.set_circular_buffer_size = NULL, .set_circular_buffer_size = NULL,
.get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs, .get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
@@ -197,7 +197,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_patch_count = gm20b_ctxsw_prog_get_patch_count, .get_patch_count = gm20b_ctxsw_prog_get_patch_count,
.set_patch_count = gm20b_ctxsw_prog_set_patch_count, .set_patch_count = gm20b_ctxsw_prog_set_patch_count,
.set_patch_addr = gm20b_ctxsw_prog_set_patch_addr, .set_patch_addr = gm20b_ctxsw_prog_set_patch_addr,
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.set_zcull_ptr = gm20b_ctxsw_prog_set_zcull_ptr, .set_zcull_ptr = gm20b_ctxsw_prog_set_zcull_ptr,
.set_zcull = gm20b_ctxsw_prog_set_zcull, .set_zcull = gm20b_ctxsw_prog_set_zcull,
.set_zcull_mode_no_ctxsw = .set_zcull_mode_no_ctxsw =
@@ -237,7 +237,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_ppc_info = gm20b_ctxsw_prog_get_ppc_info, .get_ppc_info = gm20b_ctxsw_prog_get_ppc_info,
.get_local_priv_register_ctl_offset = .get_local_priv_register_ctl_offset =
gm20b_ctxsw_prog_get_local_priv_register_ctl_offset, gm20b_ctxsw_prog_get_local_priv_register_ctl_offset,
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
.hw_get_ts_tag_invalid_timestamp = .hw_get_ts_tag_invalid_timestamp =
gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp, gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp,
.hw_get_ts_tag = gm20b_ctxsw_prog_hw_get_ts_tag, .hw_get_ts_tag = gm20b_ctxsw_prog_hw_get_ts_tag,
@@ -261,14 +261,14 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.init_sm_id_table = vgpu_gr_init_sm_id_table, .init_sm_id_table = vgpu_gr_init_sm_id_table,
}, },
.setup = { .setup = {
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull, .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull,
#endif #endif
.alloc_obj_ctx = vgpu_gr_alloc_obj_ctx, .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx,
.free_gr_ctx = vgpu_gr_free_gr_ctx, .free_gr_ctx = vgpu_gr_free_gr_ctx,
.set_preemption_mode = vgpu_gr_set_preemption_mode, .set_preemption_mode = vgpu_gr_set_preemption_mode,
}, },
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.zbc = { .zbc = {
.add_color = NULL, .add_color = NULL,
.add_depth = NULL, .add_depth = NULL,
@@ -282,12 +282,12 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_zcull_info = vgpu_gr_get_zcull_info, .get_zcull_info = vgpu_gr_get_zcull_info,
.program_zcull_mapping = NULL, .program_zcull_mapping = NULL,
}, },
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
.falcon = { .falcon = {
.init_ctx_state = vgpu_gr_init_ctx_state, .init_ctx_state = vgpu_gr_init_ctx_state,
.load_ctxsw_ucode = NULL, .load_ctxsw_ucode = NULL,
}, },
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
.fecs_trace = { .fecs_trace = {
.alloc_user_buffer = vgpu_alloc_user_buffer, .alloc_user_buffer = vgpu_alloc_user_buffer,
.free_user_buffer = vgpu_free_user_buffer, .free_user_buffer = vgpu_free_user_buffer,
@@ -308,7 +308,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_buffer_full_mailbox_val = .get_buffer_full_mailbox_val =
gm20b_fecs_trace_get_buffer_full_mailbox_val, gm20b_fecs_trace_get_buffer_full_mailbox_val,
}, },
#endif /* CONFIG_GK20A_CTXSW_TRACE */ #endif /* CONFIG_NVGPU_FECS_TRACE */
.init = { .init = {
.get_no_of_sm = nvgpu_gr_get_no_of_sm, .get_no_of_sm = nvgpu_gr_get_no_of_sm,
.fs_state = vgpu_gr_init_fs_state, .fs_state = vgpu_gr_init_fs_state,
@@ -527,7 +527,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.userd = { .userd = {
.setup_sw = vgpu_userd_setup_sw, .setup_sw = vgpu_userd_setup_sw,
.cleanup_sw = vgpu_userd_cleanup_sw, .cleanup_sw = vgpu_userd_cleanup_sw,
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
.init_mem = gk20a_userd_init_mem, .init_mem = gk20a_userd_init_mem,
.gp_get = gk20a_userd_gp_get, .gp_get = gk20a_userd_gp_get,
.gp_put = gk20a_userd_gp_put, .gp_put = gk20a_userd_gp_put,
@@ -611,7 +611,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.init_blcg_mode = NULL, .init_blcg_mode = NULL,
.elcg_init_idle_filters = NULL, .elcg_init_idle_filters = NULL,
}, },
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
.pmu = { .pmu = {
.pmu_setup_elpg = NULL, .pmu_setup_elpg = NULL,
.pmu_get_queue_head = NULL, .pmu_get_queue_head = NULL,
@@ -652,7 +652,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.clk_arb_run_arbiter_cb = gp10b_clk_arb_run_arbiter_cb, .clk_arb_run_arbiter_cb = gp10b_clk_arb_run_arbiter_cb,
.clk_arb_cleanup = gp10b_clk_arb_cleanup, .clk_arb_cleanup = gp10b_clk_arb_cleanup,
}, },
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.regops = { .regops = {
.exec_regops = vgpu_exec_regops, .exec_regops = vgpu_exec_regops,
.get_global_whitelist_ranges = .get_global_whitelist_ranges =
@@ -695,7 +695,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.debug = { .debug = {
.show_dump = NULL, .show_dump = NULL,
}, },
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.debugger = { .debugger = {
.post_events = nvgpu_dbg_gpu_post_events, .post_events = nvgpu_dbg_gpu_post_events,
.dbg_set_powergate = vgpu_dbg_set_powergate, .dbg_set_powergate = vgpu_dbg_set_powergate,
@@ -727,7 +727,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.read_ptimer = vgpu_read_ptimer, .read_ptimer = vgpu_read_ptimer,
.get_timestamps_zipper = vgpu_get_timestamps_zipper, .get_timestamps_zipper = vgpu_get_timestamps_zipper,
}, },
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
.css = { .css = {
.enable_snapshot = vgpu_css_enable_snapshot_buffer, .enable_snapshot = vgpu_css_enable_snapshot_buffer,
.disable_snapshot = vgpu_css_release_snapshot_buffer, .disable_snapshot = vgpu_css_release_snapshot_buffer,
@@ -799,13 +799,13 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
gops->mm = vgpu_gp10b_ops.mm; gops->mm = vgpu_gp10b_ops.mm;
gops->pramin = vgpu_gp10b_ops.pramin; gops->pramin = vgpu_gp10b_ops.pramin;
gops->therm = vgpu_gp10b_ops.therm; gops->therm = vgpu_gp10b_ops.therm;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
gops->pmu = vgpu_gp10b_ops.pmu; gops->pmu = vgpu_gp10b_ops.pmu;
#endif #endif
gops->clk_arb = vgpu_gp10b_ops.clk_arb; gops->clk_arb = vgpu_gp10b_ops.clk_arb;
gops->mc = vgpu_gp10b_ops.mc; gops->mc = vgpu_gp10b_ops.mc;
gops->debug = vgpu_gp10b_ops.debug; gops->debug = vgpu_gp10b_ops.debug;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
gops->debugger = vgpu_gp10b_ops.debugger; gops->debugger = vgpu_gp10b_ops.debugger;
gops->regops = vgpu_gp10b_ops.regops; gops->regops = vgpu_gp10b_ops.regops;
gops->perf = vgpu_gp10b_ops.perf; gops->perf = vgpu_gp10b_ops.perf;
@@ -813,7 +813,7 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
#endif #endif
gops->bus = vgpu_gp10b_ops.bus; gops->bus = vgpu_gp10b_ops.bus;
gops->ptimer = vgpu_gp10b_ops.ptimer; gops->ptimer = vgpu_gp10b_ops.ptimer;
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
gops->css = vgpu_gp10b_ops.css; gops->css = vgpu_gp10b_ops.css;
#endif #endif
gops->falcon = vgpu_gp10b_ops.falcon; gops->falcon = vgpu_gp10b_ops.falcon;

View File

@@ -293,7 +293,7 @@ int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va; g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va;
/* FECS trace Buffer */ /* FECS trace Buffer */
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
gpu_va = nvgpu_vm_alloc_va(ch_vm, gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer, nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER), NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER),
@@ -311,7 +311,7 @@ int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
p->attr_va = g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA]; p->attr_va = g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA];
p->page_pool_va = g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA]; p->page_pool_va = g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA];
p->priv_access_map_va = g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA]; p->priv_access_map_va = g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA];
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
p->fecs_trace_va = g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA]; p->fecs_trace_va = g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA];
#endif #endif
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));

View File

@@ -163,7 +163,7 @@ int vgpu_free_user_buffer(struct gk20a *g)
} }
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
int vgpu_fecs_trace_max_entries(struct gk20a *g, int vgpu_fecs_trace_max_entries(struct gk20a *g,
struct nvgpu_gpu_ctxsw_trace_filter *filter) struct nvgpu_gpu_ctxsw_trace_filter *filter)
{ {
@@ -189,4 +189,4 @@ int vgpu_fecs_trace_set_filter(struct gk20a *g,
return err; return err;
} }
#endif /* CONFIG_GK20A_CTXSW_TRACE */ #endif /* CONFIG_NVGPU_FECS_TRACE */

View File

@@ -39,7 +39,7 @@
#include <nvgpu/gr/config.h> #include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr_intr.h> #include <nvgpu/gr/gr_intr.h>
#include <nvgpu/gr/gr_falcon.h> #include <nvgpu/gr/gr_falcon.h>
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
#include <nvgpu/gr/zbc.h> #include <nvgpu/gr/zbc.h>
#include <nvgpu/gr/zcull.h> #include <nvgpu/gr/zcull.h>
#endif #endif
@@ -60,7 +60,7 @@
#include "common/gr/gr_falcon_priv.h" #include "common/gr/gr_falcon_priv.h"
#include "common/gr/gr_intr_priv.h" #include "common/gr/gr_intr_priv.h"
#include "common/gr/ctx_priv.h" #include "common/gr/ctx_priv.h"
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
#include "common/gr/zcull_priv.h" #include "common/gr/zcull_priv.h"
#include "common/gr/zbc_priv.h" #include "common/gr/zbc_priv.h"
#endif #endif
@@ -155,7 +155,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g,
return -ENXIO; return -ENXIO;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
sizes->zcull_image_size = priv->constants.zcull_ctx_size; sizes->zcull_image_size = priv->constants.zcull_ctx_size;
if (sizes->zcull_image_size == 0U) { if (sizes->zcull_image_size == 0U) {
return -ENXIO; return -ENXIO;
@@ -209,7 +209,7 @@ int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP, size); NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP, size);
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
size = nvgpu_gr_fecs_trace_buffer_size(g); size = nvgpu_gr_fecs_trace_buffer_size(g);
nvgpu_log_info(g, "fecs_trace_buffer_size : %d", size); nvgpu_log_info(g, "fecs_trace_buffer_size : %d", size);
@@ -314,7 +314,7 @@ int vgpu_gr_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num, u32 flags)
nvgpu_err(g, "fail to commit gr ctx buffer"); nvgpu_err(g, "fail to commit gr ctx buffer");
goto out; goto out;
} }
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
/* for fecs bind channel */ /* for fecs bind channel */
err = nvgpu_pg_elpg_protected_call(g, err = nvgpu_pg_elpg_protected_call(g,
vgpu_gr_load_golden_ctx_image(g, c->virt_ctx)); vgpu_gr_load_golden_ctx_image(g, c->virt_ctx));
@@ -478,7 +478,7 @@ cleanup:
return err; return err;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
static int vgpu_gr_init_gr_zcull(struct gk20a *g, struct nvgpu_gr *gr, static int vgpu_gr_init_gr_zcull(struct gk20a *g, struct nvgpu_gr *gr,
u32 size) u32 size)
{ {
@@ -582,7 +582,7 @@ u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g)
return priv->constants.max_lts_per_ltc; return priv->constants.max_lts_per_ltc;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
int vgpu_gr_add_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc, int vgpu_gr_add_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
struct nvgpu_gr_zbc_entry *zbc_val) struct nvgpu_gr_zbc_entry *zbc_val)
{ {
@@ -671,7 +671,7 @@ static void vgpu_remove_gr_support(struct gk20a *g)
nvgpu_gr_config_deinit(gr->g, gr->config); nvgpu_gr_config_deinit(gr->g, gr->config);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_gr_zcull_deinit(gr->g, gr->zcull); nvgpu_gr_zcull_deinit(gr->g, gr->zcull);
#endif #endif
} }
@@ -722,7 +722,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
goto clean_up; goto clean_up;
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
err = nvgpu_gr_hwpm_map_init(g, &g->gr->hwpm_map, err = nvgpu_gr_hwpm_map_init(g, &g->gr->hwpm_map,
nvgpu_gr_falcon_get_pm_ctxsw_image_size(g->gr->falcon)); nvgpu_gr_falcon_get_pm_ctxsw_image_size(g->gr->falcon));
if (err != 0) { if (err != 0) {
@@ -731,7 +731,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
} }
#endif #endif
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = vgpu_gr_init_gr_zcull(g, gr, err = vgpu_gr_init_gr_zcull(g, gr,
nvgpu_gr_falcon_get_zcull_image_size(g->gr->falcon)); nvgpu_gr_falcon_get_zcull_image_size(g->gr->falcon));
if (err) { if (err) {
@@ -822,7 +822,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
g->ops.channel.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
break; break;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
case TEGRA_VGPU_GR_INTR_SM_EXCEPTION: case TEGRA_VGPU_GR_INTR_SM_EXCEPTION:
g->ops.debugger.post_events(ch); g->ops.debugger.post_events(ch);
break; break;
@@ -1220,7 +1220,7 @@ int vgpu_gr_update_pc_sampling(struct nvgpu_channel *ch, bool enable)
void vgpu_gr_init_cyclestats(struct gk20a *g) void vgpu_gr_init_cyclestats(struct gk20a *g)
{ {
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
bool snapshots_supported = true; bool snapshots_supported = true;
u32 max_css_buffer_size; u32 max_css_buffer_size;

View File

@@ -28,7 +28,7 @@
struct gk20a; struct gk20a;
struct nvgpu_channel; struct nvgpu_channel;
struct gr_gk20a; struct gr_gk20a;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
struct nvgpu_gr_zcull_info; struct nvgpu_gr_zcull_info;
struct nvgpu_gr_zcull; struct nvgpu_gr_zcull;
struct nvgpu_gr_zbc; struct nvgpu_gr_zbc;
@@ -55,7 +55,7 @@ u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config,
u32 vgpu_gr_get_max_fbps_count(struct gk20a *g); u32 vgpu_gr_get_max_fbps_count(struct gk20a *g);
u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g); u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g);
u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g); u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct nvgpu_channel *c, int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct nvgpu_channel *c,
u64 zcull_va, u32 mode); u64 zcull_va, u32 mode);
int vgpu_gr_get_zcull_info(struct gk20a *g, int vgpu_gr_get_zcull_info(struct gk20a *g,

View File

@@ -52,7 +52,7 @@
#include "hal/therm/therm_gp10b.h" #include "hal/therm/therm_gp10b.h"
#include "hal/therm/therm_gv11b.h" #include "hal/therm/therm_gv11b.h"
#include "hal/gr/fecs_trace/fecs_trace_gv11b.h" #include "hal/gr/fecs_trace/fecs_trace_gv11b.h"
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
#include "hal/gr/zbc/zbc_gv11b.h" #include "hal/gr/zbc/zbc_gv11b.h"
#endif #endif
#include "hal/gr/hwpm_map/hwpm_map_gv100.h" #include "hal/gr/hwpm_map/hwpm_map_gv100.h"
@@ -117,7 +117,7 @@
#include <nvgpu/vgpu/ce_vgpu.h> #include <nvgpu/vgpu/ce_vgpu.h>
#include <nvgpu/vgpu/vm_vgpu.h> #include <nvgpu/vgpu/vm_vgpu.h>
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
#include <nvgpu/gr/zbc.h> #include <nvgpu/gr/zbc.h>
#endif #endif
@@ -130,7 +130,7 @@
static const struct gpu_ops vgpu_gv11b_ops = { static const struct gpu_ops vgpu_gv11b_ops = {
.ltc = { .ltc = {
.determine_L2_size_bytes = vgpu_determine_L2_size_bytes, .determine_L2_size_bytes = vgpu_determine_L2_size_bytes,
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.set_zbc_s_entry = NULL, .set_zbc_s_entry = NULL,
.set_zbc_color_entry = NULL, .set_zbc_color_entry = NULL,
.set_zbc_depth_entry = NULL, .set_zbc_depth_entry = NULL,
@@ -160,7 +160,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_num_pce = vgpu_ce_get_num_pce, .get_num_pce = vgpu_ce_get_num_pce,
}, },
.gr = { .gr = {
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.set_alpha_circular_buffer_size = NULL, .set_alpha_circular_buffer_size = NULL,
.set_circular_buffer_size = NULL, .set_circular_buffer_size = NULL,
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs, .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
@@ -231,7 +231,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_patch_count = gm20b_ctxsw_prog_get_patch_count, .get_patch_count = gm20b_ctxsw_prog_get_patch_count,
.set_patch_count = gm20b_ctxsw_prog_set_patch_count, .set_patch_count = gm20b_ctxsw_prog_set_patch_count,
.set_patch_addr = gm20b_ctxsw_prog_set_patch_addr, .set_patch_addr = gm20b_ctxsw_prog_set_patch_addr,
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.set_zcull_ptr = gv11b_ctxsw_prog_set_zcull_ptr, .set_zcull_ptr = gv11b_ctxsw_prog_set_zcull_ptr,
.set_zcull = gm20b_ctxsw_prog_set_zcull, .set_zcull = gm20b_ctxsw_prog_set_zcull,
.set_zcull_mode_no_ctxsw = .set_zcull_mode_no_ctxsw =
@@ -273,7 +273,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_ppc_info = gm20b_ctxsw_prog_get_ppc_info, .get_ppc_info = gm20b_ctxsw_prog_get_ppc_info,
.get_local_priv_register_ctl_offset = .get_local_priv_register_ctl_offset =
gm20b_ctxsw_prog_get_local_priv_register_ctl_offset, gm20b_ctxsw_prog_get_local_priv_register_ctl_offset,
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
.hw_get_ts_tag_invalid_timestamp = .hw_get_ts_tag_invalid_timestamp =
gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp, gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp,
.hw_get_ts_tag = gm20b_ctxsw_prog_hw_get_ts_tag, .hw_get_ts_tag = gm20b_ctxsw_prog_hw_get_ts_tag,
@@ -305,7 +305,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.init_sm_id_table = vgpu_gr_init_sm_id_table, .init_sm_id_table = vgpu_gr_init_sm_id_table,
}, },
.setup = { .setup = {
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull, .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull,
#endif #endif
.alloc_obj_ctx = vgpu_gr_alloc_obj_ctx, .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx,
@@ -313,7 +313,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.free_subctx = vgpu_channel_free_ctx_header, .free_subctx = vgpu_channel_free_ctx_header,
.set_preemption_mode = vgpu_gr_set_preemption_mode, .set_preemption_mode = vgpu_gr_set_preemption_mode,
}, },
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.zbc = { .zbc = {
.add_color = NULL, .add_color = NULL,
.add_depth = NULL, .add_depth = NULL,
@@ -327,8 +327,8 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_zcull_info = vgpu_gr_get_zcull_info, .get_zcull_info = vgpu_gr_get_zcull_info,
.program_zcull_mapping = NULL, .program_zcull_mapping = NULL,
}, },
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.hwpm_map = { .hwpm_map = {
.align_regs_perf_pma = .align_regs_perf_pma =
gv100_gr_hwpm_map_align_regs_perf_pma, gv100_gr_hwpm_map_align_regs_perf_pma,
@@ -338,7 +338,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.init_ctx_state = vgpu_gr_init_ctx_state, .init_ctx_state = vgpu_gr_init_ctx_state,
.load_ctxsw_ucode = NULL, .load_ctxsw_ucode = NULL,
}, },
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
.fecs_trace = { .fecs_trace = {
.alloc_user_buffer = vgpu_alloc_user_buffer, .alloc_user_buffer = vgpu_alloc_user_buffer,
.free_user_buffer = vgpu_free_user_buffer, .free_user_buffer = vgpu_free_user_buffer,
@@ -359,7 +359,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_buffer_full_mailbox_val = .get_buffer_full_mailbox_val =
gv11b_fecs_trace_get_buffer_full_mailbox_val, gv11b_fecs_trace_get_buffer_full_mailbox_val,
}, },
#endif /* CONFIG_GK20A_CTXSW_TRACE */ #endif /* CONFIG_NVGPU_FECS_TRACE */
.init = { .init = {
.get_no_of_sm = nvgpu_gr_get_no_of_sm, .get_no_of_sm = nvgpu_gr_get_no_of_sm,
.get_nonpes_aware_tpc = .get_nonpes_aware_tpc =
@@ -613,7 +613,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.userd = { .userd = {
.setup_sw = vgpu_userd_setup_sw, .setup_sw = vgpu_userd_setup_sw,
.cleanup_sw = vgpu_userd_cleanup_sw, .cleanup_sw = vgpu_userd_cleanup_sw,
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
.init_mem = gk20a_userd_init_mem, .init_mem = gk20a_userd_init_mem,
.gp_get = gv11b_userd_gp_get, .gp_get = gv11b_userd_gp_get,
.gp_put = gv11b_userd_gp_put, .gp_put = gv11b_userd_gp_put,
@@ -704,7 +704,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.init_blcg_mode = NULL, .init_blcg_mode = NULL,
.elcg_init_idle_filters = NULL, .elcg_init_idle_filters = NULL,
}, },
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
.pmu = { .pmu = {
.pmu_setup_elpg = NULL, .pmu_setup_elpg = NULL,
.pmu_get_queue_head = NULL, .pmu_get_queue_head = NULL,
@@ -747,7 +747,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.clk_arb_run_arbiter_cb = gp10b_clk_arb_run_arbiter_cb, .clk_arb_run_arbiter_cb = gp10b_clk_arb_run_arbiter_cb,
.clk_arb_cleanup = gp10b_clk_arb_cleanup, .clk_arb_cleanup = gp10b_clk_arb_cleanup,
}, },
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.regops = { .regops = {
.exec_regops = vgpu_exec_regops, .exec_regops = vgpu_exec_regops,
.get_global_whitelist_ranges = .get_global_whitelist_ranges =
@@ -791,7 +791,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.debug = { .debug = {
.show_dump = NULL, .show_dump = NULL,
}, },
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.debugger = { .debugger = {
.post_events = nvgpu_dbg_gpu_post_events, .post_events = nvgpu_dbg_gpu_post_events,
.dbg_set_powergate = vgpu_dbg_set_powergate, .dbg_set_powergate = vgpu_dbg_set_powergate,
@@ -823,7 +823,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.read_ptimer = vgpu_read_ptimer, .read_ptimer = vgpu_read_ptimer,
.get_timestamps_zipper = vgpu_get_timestamps_zipper, .get_timestamps_zipper = vgpu_get_timestamps_zipper,
}, },
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
.css = { .css = {
.enable_snapshot = vgpu_css_enable_snapshot_buffer, .enable_snapshot = vgpu_css_enable_snapshot_buffer,
.disable_snapshot = vgpu_css_release_snapshot_buffer, .disable_snapshot = vgpu_css_release_snapshot_buffer,
@@ -894,13 +894,13 @@ int vgpu_gv11b_init_hal(struct gk20a *g)
gops->netlist = vgpu_gv11b_ops.netlist; gops->netlist = vgpu_gv11b_ops.netlist;
gops->mm = vgpu_gv11b_ops.mm; gops->mm = vgpu_gv11b_ops.mm;
gops->therm = vgpu_gv11b_ops.therm; gops->therm = vgpu_gv11b_ops.therm;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
gops->pmu = vgpu_gv11b_ops.pmu; gops->pmu = vgpu_gv11b_ops.pmu;
#endif #endif
gops->clk_arb = vgpu_gv11b_ops.clk_arb; gops->clk_arb = vgpu_gv11b_ops.clk_arb;
gops->mc = vgpu_gv11b_ops.mc; gops->mc = vgpu_gv11b_ops.mc;
gops->debug = vgpu_gv11b_ops.debug; gops->debug = vgpu_gv11b_ops.debug;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
gops->debugger = vgpu_gv11b_ops.debugger; gops->debugger = vgpu_gv11b_ops.debugger;
gops->regops = vgpu_gv11b_ops.regops; gops->regops = vgpu_gv11b_ops.regops;
gops->perf = vgpu_gv11b_ops.perf; gops->perf = vgpu_gv11b_ops.perf;
@@ -908,7 +908,7 @@ int vgpu_gv11b_init_hal(struct gk20a *g)
#endif #endif
gops->bus = vgpu_gv11b_ops.bus; gops->bus = vgpu_gv11b_ops.bus;
gops->ptimer = vgpu_gv11b_ops.ptimer; gops->ptimer = vgpu_gv11b_ops.ptimer;
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
gops->css = vgpu_gv11b_ops.css; gops->css = vgpu_gv11b_ops.css;
#endif #endif
gops->falcon = vgpu_gv11b_ops.falcon; gops->falcon = vgpu_gv11b_ops.falcon;

View File

@@ -64,7 +64,7 @@ void vgpu_remove_support_common(struct gk20a *g)
struct tegra_vgpu_intr_msg msg; struct tegra_vgpu_intr_msg msg;
int err; int err;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
if (g->dbg_regops_tmp_buf) { if (g->dbg_regops_tmp_buf) {
nvgpu_kfree(g, g->dbg_regops_tmp_buf); nvgpu_kfree(g, g->dbg_regops_tmp_buf);
} }
@@ -82,7 +82,7 @@ void vgpu_remove_support_common(struct gk20a *g)
g->mm.remove_support(&g->mm); g->mm.remove_support(&g->mm);
} }
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
nvgpu_free_cyclestats_snapshot_data(g); nvgpu_free_cyclestats_snapshot_data(g);
#endif #endif

View File

@@ -67,7 +67,7 @@ int vgpu_intr_thread(void *dev_id)
vgpu_fifo_isr(g, &msg->info.fifo_intr); vgpu_fifo_isr(g, &msg->info.fifo_intr);
} }
break; break;
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
case TEGRA_VGPU_EVENT_FECS_TRACE: case TEGRA_VGPU_EVENT_FECS_TRACE:
vgpu_fecs_trace_data_update(g); vgpu_fecs_trace_data_update(g);
break; break;

View File

@@ -187,7 +187,7 @@ int vgpu_intr_thread(void *dev_id)
vgpu_fifo_isr(g, &msg->info.fifo_intr); vgpu_fifo_isr(g, &msg->info.fifo_intr);
} }
break; break;
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
case TEGRA_VGPU_EVENT_FECS_TRACE: case TEGRA_VGPU_EVENT_FECS_TRACE:
vgpu_fecs_trace_data_update(g); vgpu_fecs_trace_data_update(g);
break; break;
@@ -230,7 +230,7 @@ void vgpu_remove_support_common(struct gk20a *g)
struct tegra_vgpu_intr_msg msg; struct tegra_vgpu_intr_msg msg;
int err; int err;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
if (g->dbg_regops_tmp_buf) { if (g->dbg_regops_tmp_buf) {
nvgpu_kfree(g, g->dbg_regops_tmp_buf); nvgpu_kfree(g, g->dbg_regops_tmp_buf);
} }

View File

@@ -347,7 +347,7 @@ void gm20b_fb_set_debug_mode(struct gk20a *g, bool enable)
fb_mmu_debug_ctrl_debug_m(), fb_debug_ctrl); fb_mmu_debug_ctrl_debug_m(), fb_debug_ctrl);
gk20a_writel(g, fb_mmu_debug_ctrl_r(), reg_val); gk20a_writel(g, fb_mmu_debug_ctrl_r(), reg_val);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
g->ops.gr.set_debug_mode(g, enable); g->ops.gr.set_debug_mode(g, enable);
#endif #endif
} }

View File

@@ -53,7 +53,7 @@ static void gv11b_init_nvlink_soc_credits(struct gk20a *g)
nvgpu_log(g, gpu_dbg_info, "nvlink soc credits init done by bpmp"); nvgpu_log(g, gpu_dbg_info, "nvlink soc credits init done by bpmp");
} else { } else {
#ifndef __NVGPU_POSIX__ #ifndef __NVGPU_POSIX__
#ifdef CONFIG_TEGRA_NVLINK #ifdef CONFIG_NVGPU_NVLINK
nvgpu_mss_nvlink_init_credits(g); nvgpu_mss_nvlink_init_credits(g);
#endif #endif
#endif #endif

View File

@@ -258,7 +258,7 @@ bool gk20a_fifo_handle_mmu_fault_locked(
fault_id = nvgpu_readl(g, fifo_intr_mmu_fault_id_r()); fault_id = nvgpu_readl(g, fifo_intr_mmu_fault_id_r());
fake_fault = false; fake_fault = false;
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
nvgpu_mutex_acquire(&g->fifo.deferred_reset_mutex); nvgpu_mutex_acquire(&g->fifo.deferred_reset_mutex);
g->fifo.deferred_reset_pending = false; g->fifo.deferred_reset_pending = false;
nvgpu_mutex_release(&g->fifo.deferred_reset_mutex); nvgpu_mutex_release(&g->fifo.deferred_reset_mutex);
@@ -290,7 +290,7 @@ bool gk20a_fifo_handle_mmu_fault_locked(
if (ctxsw) { if (ctxsw) {
g->ops.gr.falcon.dump_stats(g); g->ops.gr.falcon.dump_stats(g);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
nvgpu_err(g, " gr_status_r: 0x%x", nvgpu_err(g, " gr_status_r: 0x%x",
g->ops.gr.get_gr_status(g)); g->ops.gr.get_gr_status(g));
#endif #endif
@@ -340,7 +340,7 @@ bool gk20a_fifo_handle_mmu_fault_locked(
/* check if engine reset should be deferred */ /* check if engine reset should be deferred */
if (engine_id != NVGPU_INVALID_ENG_ID) { if (engine_id != NVGPU_INVALID_ENG_ID) {
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
bool defer = nvgpu_engine_should_defer_reset(g, bool defer = nvgpu_engine_should_defer_reset(g,
engine_id, mmfault_info.client_type, engine_id, mmfault_info.client_type,
fake_fault); fake_fault);
@@ -360,12 +360,12 @@ bool gk20a_fifo_handle_mmu_fault_locked(
} else { } else {
#endif #endif
nvgpu_engine_reset(g, engine_id); nvgpu_engine_reset(g, engine_id);
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
} }
#endif #endif
} }
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
if (tsg != NULL) { if (tsg != NULL) {
nvgpu_gr_fecs_trace_add_tsg_reset(g, tsg); nvgpu_gr_fecs_trace_add_tsg_reset(g, tsg);
} }

View File

@@ -32,7 +32,7 @@
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/tsg.h> #include <nvgpu/tsg.h>
#include <nvgpu/preempt.h> #include <nvgpu/preempt.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/mutex.h> #include <nvgpu/pmu/mutex.h>
#endif #endif
@@ -101,7 +101,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch) int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{ {
int ret = 0; int ret = 0;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0; int mutex_ret = 0;
#endif #endif
@@ -109,12 +109,12 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
/* we have no idea which runlist we are using. lock all */ /* we have no idea which runlist we are using. lock all */
nvgpu_runlist_lock_active_runlists(g); nvgpu_runlist_lock_active_runlists(g);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
#endif #endif
ret = gk20a_fifo_preempt_locked(g, ch->chid, ID_TYPE_CHANNEL); ret = gk20a_fifo_preempt_locked(g, ch->chid, ID_TYPE_CHANNEL);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu, if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) { PMU_MUTEX_ID_FIFO, &token) != 0) {
@@ -150,7 +150,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg) int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
int ret = 0; int ret = 0;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0; int mutex_ret = 0;
#endif #endif
@@ -158,12 +158,12 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
/* we have no idea which runlist we are using. lock all */ /* we have no idea which runlist we are using. lock all */
nvgpu_runlist_lock_active_runlists(g); nvgpu_runlist_lock_active_runlists(g);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
#endif #endif
ret = gk20a_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG); ret = gk20a_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu, if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) { PMU_MUTEX_ID_FIFO, &token) != 0) {

View File

@@ -37,7 +37,7 @@
#include <nvgpu/engine_status.h> #include <nvgpu/engine_status.h>
#include <nvgpu/preempt.h> #include <nvgpu/preempt.h>
#include <nvgpu/nvgpu_err.h> #include <nvgpu/nvgpu_err.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/mutex.h> #include <nvgpu/pmu/mutex.h>
#endif #endif
@@ -88,7 +88,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist; struct nvgpu_runlist_info *runlist;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0; int mutex_ret = 0;
#endif #endif
@@ -96,7 +96,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask)
/* runlist_lock are locked by teardown and sched are disabled too */ /* runlist_lock are locked by teardown and sched are disabled too */
nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask); nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
#endif #endif
@@ -116,7 +116,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask)
runlist->reset_eng_bitmask = runlist->eng_bitmask; runlist->reset_eng_bitmask = runlist->eng_bitmask;
} }
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token); &token);
@@ -430,7 +430,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
int ret = 0; int ret = 0;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0; int mutex_ret = 0;
#endif #endif
@@ -448,12 +448,12 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
/* WAR for Bug 2065990 */ /* WAR for Bug 2065990 */
nvgpu_tsg_disable_sched(g, tsg); nvgpu_tsg_disable_sched(g, tsg);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
#endif #endif
ret = gv11b_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG); ret = gv11b_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG);
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token); &token);

View File

@@ -44,7 +44,7 @@ int gv11b_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
#ifdef NVGPU_REPLAYABLE_FAULT #ifdef CONFIG_NVGPU_REPLAYABLE_FAULT
if ((flags & NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE) != 0U) { if ((flags & NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE) != 0U) {
replayable = true; replayable = true;
} }

View File

@@ -35,7 +35,7 @@
#define FECS_MAILBOX_0_ACK_RESTORE 0x4U #define FECS_MAILBOX_0_ACK_RESTORE 0x4U
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next) int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next)
{ {
return nvgpu_runlist_reschedule(ch, preempt_next, true); return nvgpu_runlist_reschedule(ch, preempt_next, true);
@@ -138,7 +138,7 @@ void gk20a_runlist_write_state(struct gk20a *g, u32 runlists_mask,
nvgpu_writel(g, fifo_sched_disable_r(), reg_val); nvgpu_writel(g, fifo_sched_disable_r(), reg_val);
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
/* trigger host preempt of GR pending load ctx if that ctx is not for ch */ /* trigger host preempt of GR pending load ctx if that ctx is not for ch */
int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch, int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
bool wait_preempt) bool wait_preempt)

View File

@@ -29,7 +29,7 @@ struct nvgpu_channel;
struct nvgpu_tsg; struct nvgpu_tsg;
struct gk20a; struct gk20a;
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next); int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next);
int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch, int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
bool wait_preempt); bool wait_preempt);

View File

@@ -26,7 +26,7 @@
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h> #include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gv11b_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next) int gv11b_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next)
{ {
/* /*

View File

@@ -27,7 +27,7 @@
struct nvgpu_channel; struct nvgpu_channel;
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gv11b_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next); int gv11b_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next);
#endif #endif
u32 gv11b_runlist_count_max(void); u32 gv11b_runlist_count_max(void);

View File

@@ -80,7 +80,7 @@ u32 gm20b_gr_config_get_tpc_count_in_gpc(struct gk20a *g,
return gr_gpc0_fs_gpc_num_available_tpcs_v(tmp); return gr_gpc0_fs_gpc_num_available_tpcs_v(tmp);
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
u32 gm20b_gr_config_get_zcull_count_in_gpc(struct gk20a *g, u32 gm20b_gr_config_get_zcull_count_in_gpc(struct gk20a *g,
struct nvgpu_gr_config *config, u32 gpc_index) struct nvgpu_gr_config *config, u32 gpc_index)
{ {

View File

@@ -34,7 +34,7 @@ u32 gm20b_gr_config_get_gpc_tpc_mask(struct gk20a *g,
struct nvgpu_gr_config *config, u32 gpc_index); struct nvgpu_gr_config *config, u32 gpc_index);
u32 gm20b_gr_config_get_tpc_count_in_gpc(struct gk20a *g, u32 gm20b_gr_config_get_tpc_count_in_gpc(struct gk20a *g,
struct nvgpu_gr_config *config, u32 gpc_index); struct nvgpu_gr_config *config, u32 gpc_index);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
u32 gm20b_gr_config_get_zcull_count_in_gpc(struct gk20a *g, u32 gm20b_gr_config_get_zcull_count_in_gpc(struct gk20a *g,
struct nvgpu_gr_config *config, u32 gpc_index); struct nvgpu_gr_config *config, u32 gpc_index);
#endif #endif

View File

@@ -80,7 +80,7 @@ void gm20b_ctxsw_prog_set_patch_addr(struct gk20a *g,
ctxsw_prog_main_image_patch_adr_hi_o(), u64_hi32(addr)); ctxsw_prog_main_image_patch_adr_hi_o(), u64_hi32(addr));
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
void gm20b_ctxsw_prog_set_zcull_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem, void gm20b_ctxsw_prog_set_zcull_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem,
u64 addr) u64 addr)
{ {
@@ -275,7 +275,7 @@ u32 gm20b_ctxsw_prog_get_local_priv_register_ctl_offset(u32 *context)
return ctxsw_prog_local_priv_register_ctl_offset_v(data); return ctxsw_prog_local_priv_register_ctl_offset_v(data);
} }
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
u32 gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp(void) u32 gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp(void)
{ {
return ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_v(); return ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_v();

View File

@@ -39,7 +39,7 @@ void gm20b_ctxsw_prog_set_patch_count(struct gk20a *g,
struct nvgpu_mem *ctx_mem, u32 count); struct nvgpu_mem *ctx_mem, u32 count);
void gm20b_ctxsw_prog_set_patch_addr(struct gk20a *g, void gm20b_ctxsw_prog_set_patch_addr(struct gk20a *g,
struct nvgpu_mem *ctx_mem, u64 addr); struct nvgpu_mem *ctx_mem, u64 addr);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
void gm20b_ctxsw_prog_set_zcull_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem, void gm20b_ctxsw_prog_set_zcull_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem,
u64 addr); u64 addr);
void gm20b_ctxsw_prog_set_zcull(struct gk20a *g, struct nvgpu_mem *ctx_mem, void gm20b_ctxsw_prog_set_zcull(struct gk20a *g, struct nvgpu_mem *ctx_mem,
@@ -78,7 +78,7 @@ void gm20b_ctxsw_prog_get_extended_buffer_size_offset(u32 *context,
u32 *size, u32 *offset); u32 *size, u32 *offset);
void gm20b_ctxsw_prog_get_ppc_info(u32 *context, u32 *num_ppcs, u32 *ppc_mask); void gm20b_ctxsw_prog_get_ppc_info(u32 *context, u32 *num_ppcs, u32 *ppc_mask);
u32 gm20b_ctxsw_prog_get_local_priv_register_ctl_offset(u32 *context); u32 gm20b_ctxsw_prog_get_local_priv_register_ctl_offset(u32 *context);
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
u32 gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp(void); u32 gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp(void);
u32 gm20b_ctxsw_prog_hw_get_ts_tag(u64 ts); u32 gm20b_ctxsw_prog_hw_get_ts_tag(u64 ts);
u64 gm20b_ctxsw_prog_hw_record_ts_timestamp(u64 ts); u64 gm20b_ctxsw_prog_hw_record_ts_timestamp(u64 ts);

View File

@@ -696,7 +696,7 @@ int gm20b_gr_falcon_init_ctx_state(struct gk20a *g,
"query golden image size failed"); "query golden image size failed");
return ret; return ret;
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
ret = gm20b_gr_falcon_ctrl_ctxsw(g, ret = gm20b_gr_falcon_ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_PM_IMAGE_SIZE, NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_PM_IMAGE_SIZE,
0, &sizes->pm_ctxsw_image_size); 0, &sizes->pm_ctxsw_image_size);
@@ -707,7 +707,7 @@ int gm20b_gr_falcon_init_ctx_state(struct gk20a *g,
} }
#endif #endif
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
ret = gm20b_gr_falcon_ctrl_ctxsw(g, ret = gm20b_gr_falcon_ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE, NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
0, &sizes->zcull_image_size); 0, &sizes->zcull_image_size);
@@ -851,7 +851,7 @@ int gm20b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
fecs_method, data, ret_val); fecs_method, data, ret_val);
switch (fecs_method) { switch (fecs_method) {
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
case NVGPU_GR_FALCON_METHOD_CTXSW_STOP: case NVGPU_GR_FALCON_METHOD_CTXSW_STOP:
op.method.addr = op.method.addr =
gr_fecs_method_push_adr_stop_ctxsw_v(); gr_fecs_method_push_adr_stop_ctxsw_v();
@@ -892,7 +892,7 @@ int gm20b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
gr_fecs_method_push_adr_discover_image_size_v(); gr_fecs_method_push_adr_discover_image_size_v();
op.mailbox.ret = ret_val; op.mailbox.ret = ret_val;
break; break;
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
case NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE: case NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE:
op.method.addr = op.method.addr =
gr_fecs_method_push_adr_discover_zcull_image_size_v(); gr_fecs_method_push_adr_discover_zcull_image_size_v();
@@ -900,7 +900,7 @@ int gm20b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
break; break;
#endif #endif
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
case NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_PM_IMAGE_SIZE: case NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_PM_IMAGE_SIZE:
op.method.addr = op.method.addr =
gr_fecs_method_push_adr_discover_pm_image_size_v(); gr_fecs_method_push_adr_discover_pm_image_size_v();
@@ -908,8 +908,11 @@ int gm20b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
sleepduringwait = true; sleepduringwait = true;
break; break;
#endif #endif
/* Replace NVGPU_GRAPHICS switch here with relevant power feature switch */ /*
#ifdef NVGPU_GRAPHICS * Replace CONFIG_NVGPU_GRAPHICS switch here with relevant
* power feature switch.
*/
#ifdef CONFIG_NVGPU_GRAPHICS
case NVGPU_GR_FALCON_METHOD_REGLIST_DISCOVER_IMAGE_SIZE: case NVGPU_GR_FALCON_METHOD_REGLIST_DISCOVER_IMAGE_SIZE:
op.method.addr = op.method.addr =
gr_fecs_method_push_adr_discover_reglist_image_size_v(); gr_fecs_method_push_adr_discover_reglist_image_size_v();
@@ -956,7 +959,7 @@ int gm20b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
op.cond.fail = GR_IS_UCODE_OP_AND; op.cond.fail = GR_IS_UCODE_OP_AND;
sleepduringwait = true; sleepduringwait = true;
break; break;
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
case NVGPU_GR_FALCON_METHOD_FECS_TRACE_FLUSH: case NVGPU_GR_FALCON_METHOD_FECS_TRACE_FLUSH:
op.method.addr = op.method.addr =
gr_fecs_method_push_adr_write_timestamp_record_v(); gr_fecs_method_push_adr_write_timestamp_record_v();

View File

@@ -40,7 +40,7 @@ int gp10b_gr_falcon_init_ctx_state(struct gk20a *g,
if (err != 0) { if (err != 0) {
return err; return err;
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = g->ops.gr.falcon.ctrl_ctxsw(g, err = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE, 0U, NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE, 0U,
&sizes->preempt_image_size); &sizes->preempt_image_size);
@@ -58,7 +58,7 @@ int gp10b_gr_falcon_init_ctx_state(struct gk20a *g,
int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
u32 data, u32 *ret_val) u32 data, u32 *ret_val)
{ {
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
struct nvgpu_fecs_method_op op = { struct nvgpu_fecs_method_op op = {
.mailbox = { .id = 0U, .data = 0U, .ret = NULL, .mailbox = { .id = 0U, .data = 0U, .ret = NULL,
.clr = ~U32(0U), .ok = 0U, .fail = 0U}, .clr = ~U32(0U), .ok = 0U, .fail = 0U},
@@ -73,7 +73,7 @@ int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
fecs_method, data, ret_val); fecs_method, data, ret_val);
switch (fecs_method) { switch (fecs_method) {
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
case NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE: case NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE:
op.method.addr = op.method.addr =
gr_fecs_method_push_adr_discover_preemption_image_size_v(); gr_fecs_method_push_adr_discover_preemption_image_size_v();

View File

@@ -438,7 +438,7 @@ int gr_gm20b_update_pc_sampling(struct nvgpu_channel *c,
void gr_gm20b_init_cyclestats(struct gk20a *g) void gr_gm20b_init_cyclestats(struct gk20a *g)
{ {
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT, true);
#else #else

View File

@@ -471,7 +471,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
nvgpu_gr_ctx_set_cilp_preempt_pending(gr_ctx, true); nvgpu_gr_ctx_set_cilp_preempt_pending(gr_ctx, true);
g->gr->cilp_preempt_pending_chid = fault_ch->chid; g->gr->cilp_preempt_pending_chid = fault_ch->chid;
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
g->ops.tsg.post_event_id(tsg, NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED); g->ops.tsg.post_event_id(tsg, NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
#endif #endif
@@ -487,7 +487,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
bool sm_debugger_attached, struct nvgpu_channel *fault_ch, bool sm_debugger_attached, struct nvgpu_channel *fault_ch,
bool *early_exit, bool *ignore_debugger) bool *early_exit, bool *ignore_debugger)
{ {
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
bool cilp_enabled = false; bool cilp_enabled = false;
struct nvgpu_tsg *tsg; struct nvgpu_tsg *tsg;
@@ -757,7 +757,7 @@ clean_up:
return err; return err;
} }
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch, int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch,
bool boost) bool boost)
{ {

View File

@@ -55,7 +55,7 @@ u32 get_ecc_override_val(struct gk20a *g);
int gr_gp10b_suspend_contexts(struct gk20a *g, int gr_gp10b_suspend_contexts(struct gk20a *g,
struct dbg_session_gk20a *dbg_s, struct dbg_session_gk20a *dbg_s,
int *ctx_resident_ch_fd); int *ctx_resident_ch_fd);
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch, int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch,
bool boost); bool boost);
#endif #endif

View File

@@ -454,7 +454,7 @@ void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, fuse_val); nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, fuse_val);
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g, static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 gpc, u32 tpc, u32 sm,
u32 warp_esr_error, u32 warp_esr_error,
@@ -628,7 +628,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
bool sm_debugger_attached, struct nvgpu_channel *fault_ch, bool sm_debugger_attached, struct nvgpu_channel *fault_ch,
bool *early_exit, bool *ignore_debugger) bool *early_exit, bool *ignore_debugger)
{ {
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
int ret; int ret;
bool cilp_enabled = false; bool cilp_enabled = false;
u32 warp_esr_error = gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(warp_esr); u32 warp_esr_error = gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(warp_esr);

View File

@@ -258,7 +258,7 @@ void gm20b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask)
nvgpu_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask); nvgpu_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask);
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
void gm20b_gr_init_rop_mapping(struct gk20a *g, void gm20b_gr_init_rop_mapping(struct gk20a *g,
struct nvgpu_gr_config *gr_config) struct nvgpu_gr_config *gr_config)
{ {

View File

@@ -48,7 +48,7 @@ u32 gm20b_gr_init_get_sm_id_size(void);
int gm20b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id, int gm20b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);
void gm20b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask); void gm20b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
void gm20b_gr_init_rop_mapping(struct gk20a *g, void gm20b_gr_init_rop_mapping(struct gk20a *g,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);
#endif #endif

View File

@@ -474,7 +474,7 @@ void gv11b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask)
nvgpu_writel(g, gr_fe_tpc_fs_r(gpc_index), pes_tpc_mask); nvgpu_writel(g, gr_fe_tpc_fs_r(gpc_index), pes_tpc_mask);
} }
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
void gv11b_gr_init_rop_mapping(struct gk20a *g, void gv11b_gr_init_rop_mapping(struct gk20a *g,
struct nvgpu_gr_config *gr_config) struct nvgpu_gr_config *gr_config)
{ {

View File

@@ -42,7 +42,7 @@ void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
int gv11b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id, int gv11b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);
void gv11b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask); void gv11b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask);
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
void gv11b_gr_init_rop_mapping(struct gk20a *g, void gv11b_gr_init_rop_mapping(struct gk20a *g,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);
#endif #endif

View File

@@ -64,7 +64,7 @@ int gm20b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
goto fail; goto fail;
} }
#if defined(NVGPU_DEBUGGER) && defined(NVGPU_GRAPHICS) #if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
if (class_num == MAXWELL_B) { if (class_num == MAXWELL_B) {
switch (offset << 2) { switch (offset << 2) {
case NVB197_SET_SHADER_EXCEPTIONS: case NVB197_SET_SHADER_EXCEPTIONS:

View File

@@ -111,7 +111,7 @@ int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
struct nvgpu_channel *ch; struct nvgpu_channel *ch;
u32 chid = NVGPU_INVALID_CHANNEL_ID; u32 chid = NVGPU_INVALID_CHANNEL_ID;
int ret = 0; int ret = 0;
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
struct nvgpu_tsg *tsg; struct nvgpu_tsg *tsg;
#endif #endif
struct nvgpu_fecs_host_intr_status fecs_host_intr; struct nvgpu_fecs_host_intr_status fecs_host_intr;
@@ -156,12 +156,12 @@ int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
goto clean_up; goto clean_up;
} }
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
/* Post events to UMD */ /* Post events to UMD */
g->ops.debugger.post_events(ch); g->ops.debugger.post_events(ch);
#endif #endif
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
tsg = &g->fifo.tsg[ch->tsgid]; tsg = &g->fifo.tsg[ch->tsgid];
g->ops.tsg.post_event_id(tsg, g->ops.tsg.post_event_id(tsg,
NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE); NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
@@ -219,7 +219,7 @@ int gp10b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
goto fail; goto fail;
} }
#if defined(NVGPU_DEBUGGER) && defined(NVGPU_GRAPHICS) #if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
if (class_num == PASCAL_A) { if (class_num == PASCAL_A) {
switch (offset << 2) { switch (offset << 2) {
case NVC097_SET_SHADER_EXCEPTIONS: case NVC097_SET_SHADER_EXCEPTIONS:

View File

@@ -199,7 +199,7 @@ int gv11b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
goto fail; goto fail;
} }
#if defined(NVGPU_DEBUGGER) && defined(NVGPU_GRAPHICS) #if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
if (class_num == VOLTA_A) { if (class_num == VOLTA_A) {
switch (offset << 2) { switch (offset << 2) {
case NVC397_SET_SHADER_EXCEPTIONS: case NVC397_SET_SHADER_EXCEPTIONS:

View File

@@ -91,7 +91,7 @@ int tu104_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
goto fail; goto fail;
} }
#if defined(NVGPU_DEBUGGER) && defined(NVGPU_GRAPHICS) #if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
if (class_num == TURING_A) { if (class_num == TURING_A) {
switch (offset << 2) { switch (offset << 2) {
case NVC597_SET_SHADER_EXCEPTIONS: case NVC597_SET_SHADER_EXCEPTIONS:

View File

@@ -33,14 +33,14 @@
#include <nvgpu/fifo/userd.h> #include <nvgpu/fifo/userd.h>
#include <nvgpu/fuse.h> #include <nvgpu/fuse.h>
#include <nvgpu/regops.h> #include <nvgpu/regops.h>
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
#include <nvgpu/gr/zbc.h> #include <nvgpu/gr/zbc.h>
#endif #endif
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_intr.h> #include <nvgpu/gr/gr_intr.h>
#include <nvgpu/gr/gr_falcon.h> #include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/gr/setup.h> #include <nvgpu/gr/setup.h>
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/pmu_perfmon.h> #include <nvgpu/pmu/pmu_perfmon.h>
#endif #endif
#include <nvgpu/gr/fecs_trace.h> #include <nvgpu/gr/fecs_trace.h>
@@ -83,7 +83,7 @@
#include "hal/fifo/mmu_fault_gk20a.h" #include "hal/fifo/mmu_fault_gk20a.h"
#include "hal/fifo/mmu_fault_gm20b.h" #include "hal/fifo/mmu_fault_gm20b.h"
#include "hal/rc/rc_gk20a.h" #include "hal/rc/rc_gk20a.h"
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
#include "hal/gr/zbc/zbc_gm20b.h" #include "hal/gr/zbc/zbc_gm20b.h"
#include "hal/gr/zcull/zcull_gm20b.h" #include "hal/gr/zcull/zcull_gm20b.h"
#endif #endif
@@ -107,7 +107,7 @@
#include "hal/fifo/channel_gk20a.h" #include "hal/fifo/channel_gk20a.h"
#include "hal/fifo/channel_gm20b.h" #include "hal/fifo/channel_gm20b.h"
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include "common/pmu/pg/pg_sw_gm20b.h" #include "common/pmu/pg/pg_sw_gm20b.h"
#endif #endif
@@ -121,10 +121,10 @@
static const struct gpu_ops gm20b_ops = { static const struct gpu_ops gm20b_ops = {
.ltc = { .ltc = {
.determine_L2_size_bytes = gm20b_determine_L2_size_bytes, .determine_L2_size_bytes = gm20b_determine_L2_size_bytes,
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry, .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
.set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry, .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
#endif /*NVGPU_GRAPHICS */ #endif /*CONFIG_NVGPU_GRAPHICS */
.init_fs_state = gm20b_ltc_init_fs_state, .init_fs_state = gm20b_ltc_init_fs_state,
.flush = gm20b_flush_ltc, .flush = gm20b_flush_ltc,
.set_enabled = gm20b_ltc_set_enabled, .set_enabled = gm20b_ltc_set_enabled,
@@ -150,7 +150,7 @@ static const struct gpu_ops gm20b_ops = {
.isr_nonstall = gk20a_ce2_nonstall_isr, .isr_nonstall = gk20a_ce2_nonstall_isr,
}, },
.gr = { .gr = {
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.get_gr_status = gr_gm20b_get_gr_status, .get_gr_status = gr_gm20b_get_gr_status,
.set_alpha_circular_buffer_size = .set_alpha_circular_buffer_size =
gr_gm20b_set_alpha_circular_buffer_size, gr_gm20b_set_alpha_circular_buffer_size,
@@ -196,7 +196,7 @@ static const struct gpu_ops gm20b_ops = {
.esr_bpt_pending_events = gm20b_gr_esr_bpt_pending_events, .esr_bpt_pending_events = gm20b_gr_esr_bpt_pending_events,
.disable_ctxsw = nvgpu_gr_disable_ctxsw, .disable_ctxsw = nvgpu_gr_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_enable_ctxsw, .enable_ctxsw = nvgpu_gr_enable_ctxsw,
#endif /* NVGPU_DEBUGGER */ #endif /* CONFIG_NVGPU_DEBUGGER */
.ctxsw_prog = { .ctxsw_prog = {
.hw_get_fecs_header_size = .hw_get_fecs_header_size =
gm20b_ctxsw_prog_hw_get_fecs_header_size, gm20b_ctxsw_prog_hw_get_fecs_header_size,
@@ -213,14 +213,14 @@ static const struct gpu_ops gm20b_ops = {
.get_patch_count = gm20b_ctxsw_prog_get_patch_count, .get_patch_count = gm20b_ctxsw_prog_get_patch_count,
.set_patch_count = gm20b_ctxsw_prog_set_patch_count, .set_patch_count = gm20b_ctxsw_prog_set_patch_count,
.set_patch_addr = gm20b_ctxsw_prog_set_patch_addr, .set_patch_addr = gm20b_ctxsw_prog_set_patch_addr,
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.set_zcull_ptr = gm20b_ctxsw_prog_set_zcull_ptr, .set_zcull_ptr = gm20b_ctxsw_prog_set_zcull_ptr,
.set_zcull = gm20b_ctxsw_prog_set_zcull, .set_zcull = gm20b_ctxsw_prog_set_zcull,
.set_zcull_mode_no_ctxsw = .set_zcull_mode_no_ctxsw =
gm20b_ctxsw_prog_set_zcull_mode_no_ctxsw, gm20b_ctxsw_prog_set_zcull_mode_no_ctxsw,
.is_zcull_mode_separate_buffer = .is_zcull_mode_separate_buffer =
gm20b_ctxsw_prog_is_zcull_mode_separate_buffer, gm20b_ctxsw_prog_is_zcull_mode_separate_buffer,
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
.set_pm_ptr = gm20b_ctxsw_prog_set_pm_ptr, .set_pm_ptr = gm20b_ctxsw_prog_set_pm_ptr,
.set_pm_mode = gm20b_ctxsw_prog_set_pm_mode, .set_pm_mode = gm20b_ctxsw_prog_set_pm_mode,
.set_pm_smpc_mode = gm20b_ctxsw_prog_set_pm_smpc_mode, .set_pm_smpc_mode = gm20b_ctxsw_prog_set_pm_smpc_mode,
@@ -249,7 +249,7 @@ static const struct gpu_ops gm20b_ops = {
.get_ppc_info = gm20b_ctxsw_prog_get_ppc_info, .get_ppc_info = gm20b_ctxsw_prog_get_ppc_info,
.get_local_priv_register_ctl_offset = .get_local_priv_register_ctl_offset =
gm20b_ctxsw_prog_get_local_priv_register_ctl_offset, gm20b_ctxsw_prog_get_local_priv_register_ctl_offset,
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
.hw_get_ts_tag_invalid_timestamp = .hw_get_ts_tag_invalid_timestamp =
gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp, gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp,
.hw_get_ts_tag = gm20b_ctxsw_prog_hw_get_ts_tag, .hw_get_ts_tag = gm20b_ctxsw_prog_hw_get_ts_tag,
@@ -268,16 +268,16 @@ static const struct gpu_ops gm20b_ops = {
.get_gpc_tpc_mask = gm20b_gr_config_get_gpc_tpc_mask, .get_gpc_tpc_mask = gm20b_gr_config_get_gpc_tpc_mask,
.get_tpc_count_in_gpc = .get_tpc_count_in_gpc =
gm20b_gr_config_get_tpc_count_in_gpc, gm20b_gr_config_get_tpc_count_in_gpc,
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.get_zcull_count_in_gpc = .get_zcull_count_in_gpc =
gm20b_gr_config_get_zcull_count_in_gpc, gm20b_gr_config_get_zcull_count_in_gpc,
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
.get_pes_tpc_mask = gm20b_gr_config_get_pes_tpc_mask, .get_pes_tpc_mask = gm20b_gr_config_get_pes_tpc_mask,
.get_pd_dist_skip_table_size = .get_pd_dist_skip_table_size =
gm20b_gr_config_get_pd_dist_skip_table_size, gm20b_gr_config_get_pd_dist_skip_table_size,
.init_sm_id_table = gm20b_gr_config_init_sm_id_table, .init_sm_id_table = gm20b_gr_config_init_sm_id_table,
}, },
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
.fecs_trace = { .fecs_trace = {
.alloc_user_buffer = nvgpu_gr_fecs_trace_ring_alloc, .alloc_user_buffer = nvgpu_gr_fecs_trace_ring_alloc,
.free_user_buffer = nvgpu_gr_fecs_trace_ring_free, .free_user_buffer = nvgpu_gr_fecs_trace_ring_free,
@@ -300,15 +300,15 @@ static const struct gpu_ops gm20b_ops = {
.get_write_index = gm20b_fecs_trace_get_write_index, .get_write_index = gm20b_fecs_trace_get_write_index,
.set_read_index = gm20b_fecs_trace_set_read_index, .set_read_index = gm20b_fecs_trace_set_read_index,
}, },
#endif /* CONFIG_GK20A_CTXSW_TRACE */ #endif /* CONFIG_NVGPU_FECS_TRACE */
.setup = { .setup = {
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.bind_ctxsw_zcull = nvgpu_gr_setup_bind_ctxsw_zcull, .bind_ctxsw_zcull = nvgpu_gr_setup_bind_ctxsw_zcull,
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
.alloc_obj_ctx = nvgpu_gr_setup_alloc_obj_ctx, .alloc_obj_ctx = nvgpu_gr_setup_alloc_obj_ctx,
.free_gr_ctx = nvgpu_gr_setup_free_gr_ctx, .free_gr_ctx = nvgpu_gr_setup_free_gr_ctx,
}, },
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.zbc = { .zbc = {
.add_color = gm20b_gr_zbc_add_color, .add_color = gm20b_gr_zbc_add_color,
.add_depth = gm20b_gr_zbc_add_depth, .add_depth = gm20b_gr_zbc_add_depth,
@@ -323,7 +323,7 @@ static const struct gpu_ops gm20b_ops = {
.get_zcull_info = gm20b_gr_get_zcull_info, .get_zcull_info = gm20b_gr_get_zcull_info,
.program_zcull_mapping = gm20b_gr_program_zcull_mapping, .program_zcull_mapping = gm20b_gr_program_zcull_mapping,
}, },
#endif /* NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
.init = { .init = {
.get_no_of_sm = nvgpu_gr_get_no_of_sm, .get_no_of_sm = nvgpu_gr_get_no_of_sm,
.wait_initialized = nvgpu_gr_wait_initialized, .wait_initialized = nvgpu_gr_wait_initialized,
@@ -338,7 +338,7 @@ static const struct gpu_ops gm20b_ops = {
.sm_id_config = gm20b_gr_init_sm_id_config, .sm_id_config = gm20b_gr_init_sm_id_config,
.sm_id_numbering = gm20b_gr_init_sm_id_numbering, .sm_id_numbering = gm20b_gr_init_sm_id_numbering,
.tpc_mask = gm20b_gr_init_tpc_mask, .tpc_mask = gm20b_gr_init_tpc_mask,
#ifdef NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
.rop_mapping = gm20b_gr_init_rop_mapping, .rop_mapping = gm20b_gr_init_rop_mapping,
#endif #endif
.fs_state = gm20b_gr_init_fs_state, .fs_state = gm20b_gr_init_fs_state,
@@ -719,7 +719,7 @@ static const struct gpu_ops gm20b_ops = {
.userd = { .userd = {
.setup_sw = nvgpu_userd_setup_sw, .setup_sw = nvgpu_userd_setup_sw,
.cleanup_sw = nvgpu_userd_cleanup_sw, .cleanup_sw = nvgpu_userd_cleanup_sw,
#ifdef NVGPU_USERD #ifdef CONFIG_NVGPU_USERD
.init_mem = gk20a_userd_init_mem, .init_mem = gk20a_userd_init_mem,
.gp_get = gk20a_userd_gp_get, .gp_get = gk20a_userd_gp_get,
.gp_put = gk20a_userd_gp_put, .gp_put = gk20a_userd_gp_put,
@@ -756,11 +756,11 @@ static const struct gpu_ops gm20b_ops = {
nvgpu_tsg_unbind_channel_check_ctx_reload, nvgpu_tsg_unbind_channel_check_ctx_reload,
.unbind_channel_check_eng_faulted = NULL, .unbind_channel_check_eng_faulted = NULL,
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout, .check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
.force_reset = nvgpu_tsg_force_reset_ch, .force_reset = nvgpu_tsg_force_reset_ch,
.post_event_id = nvgpu_tsg_post_event_id, .post_event_id = nvgpu_tsg_post_event_id,
#endif #endif
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
.set_timeslice = nvgpu_tsg_set_timeslice, .set_timeslice = nvgpu_tsg_set_timeslice,
#endif #endif
.default_timeslice_us = nvgpu_tsg_default_timeslice_us, .default_timeslice_us = nvgpu_tsg_default_timeslice_us,
@@ -802,7 +802,7 @@ static const struct gpu_ops gm20b_ops = {
.idle_slowdown_enable = gm20b_therm_idle_slowdown_enable, .idle_slowdown_enable = gm20b_therm_idle_slowdown_enable,
.idle_slowdown_disable = gm20b_therm_idle_slowdown_disable, .idle_slowdown_disable = gm20b_therm_idle_slowdown_disable,
}, },
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
.pmu = { .pmu = {
.is_pmu_supported = gm20b_is_pmu_supported, .is_pmu_supported = gm20b_is_pmu_supported,
.falcon_base_addr = gk20a_pmu_falcon_base_addr, .falcon_base_addr = gk20a_pmu_falcon_base_addr,
@@ -853,7 +853,7 @@ static const struct gpu_ops gm20b_ops = {
.pll_reg_write = gm20b_clk_pll_reg_write, .pll_reg_write = gm20b_clk_pll_reg_write,
.get_pll_debug_data = gm20b_clk_get_pll_debug_data, .get_pll_debug_data = gm20b_clk_get_pll_debug_data,
}, },
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.regops = { .regops = {
.exec_regops = exec_regops_gk20a, .exec_regops = exec_regops_gk20a,
.get_global_whitelist_ranges = .get_global_whitelist_ranges =
@@ -897,7 +897,7 @@ static const struct gpu_ops gm20b_ops = {
.debug = { .debug = {
.show_dump = gk20a_debug_show_dump, .show_dump = gk20a_debug_show_dump,
}, },
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
.debugger = { .debugger = {
.post_events = nvgpu_dbg_gpu_post_events, .post_events = nvgpu_dbg_gpu_post_events,
.dbg_set_powergate = nvgpu_dbg_set_powergate, .dbg_set_powergate = nvgpu_dbg_set_powergate,
@@ -935,7 +935,7 @@ static const struct gpu_ops gm20b_ops = {
.read_ptimer = gk20a_read_ptimer, .read_ptimer = gk20a_read_ptimer,
.get_timestamps_zipper = nvgpu_get_timestamps_zipper, .get_timestamps_zipper = nvgpu_get_timestamps_zipper,
}, },
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
.css = { .css = {
.enable_snapshot = nvgpu_css_enable_snapshot, .enable_snapshot = nvgpu_css_enable_snapshot,
.disable_snapshot = nvgpu_css_disable_snapshot, .disable_snapshot = nvgpu_css_disable_snapshot,
@@ -1038,7 +1038,7 @@ int gm20b_init_hal(struct gk20a *g)
gops->netlist = gm20b_ops.netlist; gops->netlist = gm20b_ops.netlist;
gops->mm = gm20b_ops.mm; gops->mm = gm20b_ops.mm;
gops->therm = gm20b_ops.therm; gops->therm = gm20b_ops.therm;
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
gops->pmu = gm20b_ops.pmu; gops->pmu = gm20b_ops.pmu;
#endif #endif
/* /*
@@ -1056,7 +1056,7 @@ int gm20b_init_hal(struct gk20a *g)
gops->mc = gm20b_ops.mc; gops->mc = gm20b_ops.mc;
gops->debug = gm20b_ops.debug; gops->debug = gm20b_ops.debug;
#ifdef NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
gops->debugger = gm20b_ops.debugger; gops->debugger = gm20b_ops.debugger;
gops->regops = gm20b_ops.regops; gops->regops = gm20b_ops.regops;
gops->perf = gm20b_ops.perf; gops->perf = gm20b_ops.perf;
@@ -1064,7 +1064,7 @@ int gm20b_init_hal(struct gk20a *g)
#endif #endif
gops->bus = gm20b_ops.bus; gops->bus = gm20b_ops.bus;
gops->ptimer = gm20b_ops.ptimer; gops->ptimer = gm20b_ops.ptimer;
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
gops->css = gm20b_ops.css; gops->css = gm20b_ops.css;
#endif #endif
gops->falcon = gm20b_ops.falcon; gops->falcon = gm20b_ops.falcon;
@@ -1098,7 +1098,7 @@ int gm20b_init_hal(struct gk20a *g)
nvgpu_gr_falcon_load_secure_ctxsw_ucode; nvgpu_gr_falcon_load_secure_ctxsw_ucode;
} else { } else {
/* Inherit from gk20a */ /* Inherit from gk20a */
#ifdef NVGPU_FEATURE_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
gops->pmu.setup_apertures = gops->pmu.setup_apertures =
gm20b_pmu_ns_setup_apertures; gm20b_pmu_ns_setup_apertures;
#endif #endif

Some files were not shown because too many files have changed in this diff Show More