gpu: nvgpu: compile out vidmem from safety build

Safety build does not support vidmem. This patch compiles out vidmem
related changes - vidmem, dma alloc, cbc/acr/pmu alloc based on
vidmem and corresponding tests like pramin, page allocator &
gmmu_map_unmap_vidmem..
As vidmem is applicable only in case of DGPUs the code is compiled
out using CONFIG_NVGPU_DGPU.

JIRA NVGPU-3524

Change-Id: Ic623801112484ffc071195e828ab9f290f945d4d
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2132773
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-06-07 19:58:11 +05:30
committed by mobile promotions
parent c2eb26436a
commit a16cc2dde3
56 changed files with 253 additions and 444 deletions

View File

@@ -127,15 +127,6 @@ config GK20A_TRACE_PRINTK
Enable nvgpu debug facility to redirect debug spew to ftrace. This Enable nvgpu debug facility to redirect debug spew to ftrace. This
affects kernel memory use, so should not be enabled by default. affects kernel memory use, so should not be enabled by default.
config GK20A_VIDMEM
bool "Support separate video memory on nvgpu"
depends on GK20A
default n
help
Enable support for using and allocating buffers in a distinct video
memory aperture (in contrast to general system memory), available on
GPUs that have their own banks. PCIe GPUs have this, for example.
config NVGPU_SUPPORT_CDE config NVGPU_SUPPORT_CDE
bool "Support extraction of comptags for CDE" bool "Support extraction of comptags for CDE"
depends on GK20A depends on GK20A
@@ -145,7 +136,7 @@ config NVGPU_SUPPORT_CDE
config NVGPU_USE_TEGRA_ALLOC_FD config NVGPU_USE_TEGRA_ALLOC_FD
bool "Use tegra_alloc_fd() for allocating dma_buf fds for vidmem" bool "Use tegra_alloc_fd() for allocating dma_buf fds for vidmem"
depends on GK20A && GK20A_VIDMEM depends on GK20A
default y default y
help help
Use tegra_alloc_fd() for allocating dma_buf fds. This allocates Use tegra_alloc_fd() for allocating dma_buf fds. This allocates

View File

@@ -414,9 +414,7 @@ nvgpu-y += \
os/linux/ecc_sysfs.o \ os/linux/ecc_sysfs.o \
os/linux/os_ops_tu104.o \ os/linux/os_ops_tu104.o \
os/linux/bsearch.o \ os/linux/bsearch.o \
os/linux/sdl/sdl_stub.o os/linux/sdl/sdl_stub.o \
nvgpu-$(CONFIG_GK20A_VIDMEM) += \
os/linux/dmabuf_vidmem.o os/linux/dmabuf_vidmem.o
nvgpu-$(CONFIG_DEBUG_FS) += \ nvgpu-$(CONFIG_DEBUG_FS) += \
@@ -533,6 +531,7 @@ nvgpu-y += \
common/mm/nvgpu_sgt.o \ common/mm/nvgpu_sgt.o \
common/mm/mm.o \ common/mm/mm.o \
common/mm/dma.o \ common/mm/dma.o \
common/mm/vidmem.o \
common/pramin.o \ common/pramin.o \
common/vbios/bios.o \ common/vbios/bios.o \
common/vbios/nvlink_bios.o \ common/vbios/nvlink_bios.o \
@@ -576,10 +575,7 @@ nvgpu-y += \
common/fence/fence.o \ common/fence/fence.o \
common/ecc.o \ common/ecc.o \
common/ce/ce.o \ common/ce/ce.o \
common/debugger.o \ common/debugger.o
nvgpu-$(CONFIG_GK20A_VIDMEM) += \
common/mm/vidmem.o
nvgpu-y += \ nvgpu-y += \
hal/gr/config/gr_config_gm20b.o \ hal/gr/config/gr_config_gm20b.o \

View File

@@ -59,7 +59,6 @@ NVGPU_COMMON_CFLAGS += \
-DCONFIG_TEGRA_GK20A_PMU=1 \ -DCONFIG_TEGRA_GK20A_PMU=1 \
-DCONFIG_TEGRA_ACR=1 \ -DCONFIG_TEGRA_ACR=1 \
-DCONFIG_TEGRA_GR_VIRTUALIZATION \ -DCONFIG_TEGRA_GR_VIRTUALIZATION \
-DCONFIG_GK20A_VIDMEM=1 \
-DCONFIG_PCI_MSI -DCONFIG_PCI_MSI
# Enable debugger APIs for safety build until devctl whitelisting is done # Enable debugger APIs for safety build until devctl whitelisting is done

View File

@@ -38,8 +38,7 @@ srcs += os/posix/nvgpu.c \
os/posix/stubs.c \ os/posix/stubs.c \
os/posix/posix-nvhost.c \ os/posix/posix-nvhost.c \
os/posix/posix-vgpu.c \ os/posix/posix-vgpu.c \
os/posix/posix-dt.c \ os/posix/posix-dt.c
os/posix/posix-vidmem.c
ifdef CONFIG_NVGPU_FECS_TRACE ifdef CONFIG_NVGPU_FECS_TRACE
srcs += os/posix/fecs_trace_posix.c srcs += os/posix/fecs_trace_posix.c
@@ -60,6 +59,10 @@ endif
ifeq ($(CONFIG_NVGPU_SIM),1) ifeq ($(CONFIG_NVGPU_SIM),1)
srcs += os/posix/posix-sim.c srcs += os/posix/posix-sim.c
endif endif
ifeq ($(CONFIG_NVGPU_DGPU),1)
srcs += os/posix/posix-vidmem.c
endif
endif endif
# POSIX sources shared between the POSIX and QNX builds. # POSIX sources shared between the POSIX and QNX builds.
@@ -82,7 +85,6 @@ srcs += common/utils/enabled.c \
common/mm/allocators/nvgpu_allocator.c \ common/mm/allocators/nvgpu_allocator.c \
common/mm/allocators/bitmap_allocator.c \ common/mm/allocators/bitmap_allocator.c \
common/mm/allocators/buddy_allocator.c \ common/mm/allocators/buddy_allocator.c \
common/mm/allocators/page_allocator.c \
common/mm/allocators/lockless_allocator.c \ common/mm/allocators/lockless_allocator.c \
common/mm/gmmu/page_table.c \ common/mm/gmmu/page_table.c \
common/mm/gmmu/pd_cache.c \ common/mm/gmmu/pd_cache.c \
@@ -93,10 +95,8 @@ srcs += common/utils/enabled.c \
common/mm/nvgpu_sgt.c \ common/mm/nvgpu_sgt.c \
common/mm/mm.c \ common/mm/mm.c \
common/mm/dma.c \ common/mm/dma.c \
common/mm/vidmem.c \
common/xve/xve_gp106.c \ common/xve/xve_gp106.c \
common/therm/therm.c \ common/therm/therm.c \
common/pramin.c \
common/ltc/ltc.c \ common/ltc/ltc.c \
common/fbp/fbp.c \ common/fbp/fbp.c \
common/io/io.c \ common/io/io.c \
@@ -269,8 +269,6 @@ srcs += common/utils/enabled.c \
hal/sync/syncpt_cmdbuf_gv11b.c \ hal/sync/syncpt_cmdbuf_gv11b.c \
hal/pmu/pmu_gp106.c \ hal/pmu/pmu_gp106.c \
hal/pmu/pmu_gv11b.c \ hal/pmu/pmu_gv11b.c \
hal/pramin/pramin_gp10b.c \
hal/pramin/pramin_init.c \
hal/top/top_gm20b.c \ hal/top/top_gm20b.c \
hal/top/top_gp10b.c \ hal/top/top_gp10b.c \
hal/top/top_gp106.c \ hal/top/top_gp106.c \
@@ -503,6 +501,9 @@ srcs += common/sec2/sec2.c \
common/falcon/falcon_sw_tu104.c \ common/falcon/falcon_sw_tu104.c \
common/acr/acr_sw_gv100.c \ common/acr/acr_sw_gv100.c \
common/acr/acr_sw_tu104.c \ common/acr/acr_sw_tu104.c \
common/mm/allocators/page_allocator.c \
common/mm/vidmem.c \
common/pramin.c \
hal/mm/mm_gv100.c \ hal/mm/mm_gv100.c \
hal/mm/mm_tu104.c \ hal/mm/mm_tu104.c \
hal/mc/mc_gv100.c \ hal/mc/mc_gv100.c \
@@ -545,7 +546,9 @@ srcs += common/sec2/sec2.c \
hal/gsp/gsp_gv100.c \ hal/gsp/gsp_gv100.c \
hal/sec2/sec2_gp106.c \ hal/sec2/sec2_gp106.c \
hal/sec2/sec2_tu104.c \ hal/sec2/sec2_tu104.c \
hal/pramin/pramin_gp10b.c \
hal/pramin/pramin_gv100.c \ hal/pramin/pramin_gv100.c \
hal/pramin/pramin_init.c \
hal/pramin/pramin_tu104.c \ hal/pramin/pramin_tu104.c \
hal/bios/bios_tu104.c \ hal/bios/bios_tu104.c \
hal/top/top_gv100.c hal/top/top_gv100.c

View File

@@ -54,6 +54,7 @@ bool nvgpu_acr_is_lsf_lazy_bootstrap(struct gk20a *g, struct nvgpu_acr *acr,
return acr->lsf[falcon_id].is_lazy_bootstrap; return acr->lsf[falcon_id].is_lazy_bootstrap;
} }
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr, int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr,
size_t size) size_t size)
{ {
@@ -69,6 +70,7 @@ int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr,
return acr->alloc_blob_space(g, size, &acr->ucode_blob); return acr->alloc_blob_space(g, size, &acr->ucode_blob);
} }
#endif
/* ACR blob construct & bootstrap */ /* ACR blob construct & bootstrap */
int nvgpu_acr_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr) int nvgpu_acr_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)

View File

@@ -173,8 +173,10 @@ void nvgpu_gv100_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
nvgpu_gv100_acr_default_sw_init(g, &acr->acr); nvgpu_gv100_acr_default_sw_init(g, &acr->acr);
acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob_v1; acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob_v1;
#ifdef CONFIG_NVGPU_DGPU
acr->get_wpr_info = nvgpu_acr_wpr_info_vid; acr->get_wpr_info = nvgpu_acr_wpr_info_vid;
acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_vid; acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_vid;
#endif
acr->bootstrap_hs_acr = nvgpu_acr_bootstrap_hs_ucode; acr->bootstrap_hs_acr = nvgpu_acr_bootstrap_hs_ucode;
acr->patch_wpr_info_to_ucode = acr->patch_wpr_info_to_ucode =
gv100_acr_patch_wpr_info_to_ucode; gv100_acr_patch_wpr_info_to_ucode;

View File

@@ -85,6 +85,7 @@ int nvgpu_cbc_alloc(struct gk20a *g, size_t compbit_backing_size,
return 0; return 0;
} }
#ifdef CONFIG_NVGPU_DGPU
if (vidmem_alloc == true) { if (vidmem_alloc == true) {
/* /*
* Backing store MUST be physically contiguous and allocated in * Backing store MUST be physically contiguous and allocated in
@@ -97,7 +98,9 @@ int nvgpu_cbc_alloc(struct gk20a *g, size_t compbit_backing_size,
return nvgpu_dma_alloc_vid(g, return nvgpu_dma_alloc_vid(g,
compbit_backing_size, compbit_backing_size,
&cbc->compbit_store.mem); &cbc->compbit_store.mem);
} else { } else
#endif
{
return nvgpu_dma_alloc_flags_sys(g, return nvgpu_dma_alloc_flags_sys(g,
NVGPU_DMA_PHYSICALLY_ADDRESSED, NVGPU_DMA_PHYSICALLY_ADDRESSED,
compbit_backing_size, compbit_backing_size,

View File

@@ -36,6 +36,7 @@
static inline u32 nvgpu_ce_get_valid_launch_flags(struct gk20a *g, static inline u32 nvgpu_ce_get_valid_launch_flags(struct gk20a *g,
u32 launch_flags) u32 launch_flags)
{ {
#ifdef CONFIG_NVGPU_DGPU
/* /*
* there is no local memory available, * there is no local memory available,
* don't allow local memory related CE flags * don't allow local memory related CE flags
@@ -44,6 +45,7 @@ static inline u32 nvgpu_ce_get_valid_launch_flags(struct gk20a *g,
launch_flags &= ~(NVGPU_CE_SRC_LOCATION_LOCAL_FB | launch_flags &= ~(NVGPU_CE_SRC_LOCATION_LOCAL_FB |
NVGPU_CE_DST_LOCATION_LOCAL_FB); NVGPU_CE_DST_LOCATION_LOCAL_FB);
} }
#endif
return launch_flags; return launch_flags;
} }

View File

@@ -261,11 +261,12 @@ static void nvgpu_channel_usermode_deinit(struct nvgpu_channel *ch)
static void nvgpu_channel_kernelmode_deinit(struct nvgpu_channel *ch) static void nvgpu_channel_kernelmode_deinit(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g;
struct vm_gk20a *ch_vm = ch->vm; struct vm_gk20a *ch_vm = ch->vm;
nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem); nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem);
nvgpu_big_free(g, ch->gpfifo.pipe); #ifdef CONFIG_NVGPU_DGPU
nvgpu_big_free(ch->g, ch->gpfifo.pipe);
#endif
(void) memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); (void) memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
nvgpu_channel_free_priv_cmd_q(ch); nvgpu_channel_free_priv_cmd_q(ch);
@@ -1307,6 +1308,7 @@ static int nvgpu_channel_setup_kernelmode(struct nvgpu_channel *c,
goto clean_up; goto clean_up;
} }
#ifdef CONFIG_NVGPU_DGPU
if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) { if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) {
c->gpfifo.pipe = nvgpu_big_malloc(g, c->gpfifo.pipe = nvgpu_big_malloc(g,
(size_t)gpfifo_size * (size_t)gpfifo_size *
@@ -1316,6 +1318,7 @@ static int nvgpu_channel_setup_kernelmode(struct nvgpu_channel *c,
goto clean_up_unmap; goto clean_up_unmap;
} }
} }
#endif
gpfifo_gpu_va = c->gpfifo.mem.gpu_va; gpfifo_gpu_va = c->gpfifo.mem.gpu_va;
c->gpfifo.entry_num = gpfifo_size; c->gpfifo.entry_num = gpfifo_size;
@@ -1383,7 +1386,9 @@ clean_up_sync:
c->sync = NULL; c->sync = NULL;
} }
clean_up_unmap: clean_up_unmap:
#ifdef CONFIG_NVGPU_DGPU
nvgpu_big_free(g, c->gpfifo.pipe); nvgpu_big_free(g, c->gpfifo.pipe);
#endif
nvgpu_dma_unmap_free(c->vm, &c->gpfifo.mem); nvgpu_dma_unmap_free(c->vm, &c->gpfifo.mem);
clean_up: clean_up:
(void) memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); (void) memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));

View File

@@ -286,10 +286,13 @@ static int nvgpu_submit_append_gpfifo(struct nvgpu_channel *c,
struct nvgpu_gpfifo_userdata userdata, struct nvgpu_gpfifo_userdata userdata,
u32 num_entries) u32 num_entries)
{ {
struct gk20a *g = c->g;
int err; int err;
if ((kern_gpfifo == NULL) && (c->gpfifo.pipe == NULL)) { if ((kern_gpfifo == NULL)
#ifdef CONFIG_NVGPU_DGPU
&& (c->gpfifo.pipe == NULL)
#endif
) {
/* /*
* This path (from userspace to sysmem) is special in order to * This path (from userspace to sysmem) is special in order to
* avoid two copies unnecessarily (from user to pipe, then from * avoid two copies unnecessarily (from user to pipe, then from
@@ -300,17 +303,21 @@ static int nvgpu_submit_append_gpfifo(struct nvgpu_channel *c,
if (err != 0) { if (err != 0) {
return err; return err;
} }
} else if (kern_gpfifo == NULL) { }
#ifdef CONFIG_NVGPU_DGPU
else if (kern_gpfifo == NULL) {
/* from userspace to vidmem, use the common path */ /* from userspace to vidmem, use the common path */
err = g->os_channel.copy_user_gpfifo(c->gpfifo.pipe, userdata, err = c->g->os_channel.copy_user_gpfifo(c->gpfifo.pipe,
0, num_entries); userdata, 0, num_entries);
if (err != 0) { if (err != 0) {
return err; return err;
} }
nvgpu_submit_append_gpfifo_common(c, c->gpfifo.pipe, nvgpu_submit_append_gpfifo_common(c, c->gpfifo.pipe,
num_entries); num_entries);
} else { }
#endif
else {
/* from kernel to either sysmem or vidmem, don't need /* from kernel to either sysmem or vidmem, don't need
* copy_user_gpfifo so use the common path */ * copy_user_gpfifo so use the common path */
nvgpu_submit_append_gpfifo_common(c, kern_gpfifo, num_entries); nvgpu_submit_append_gpfifo_common(c, kern_gpfifo, num_entries);

View File

@@ -190,10 +190,12 @@ int nvgpu_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
err = nvgpu_buddy_allocator_init(g, na, vm, name, base, length, err = nvgpu_buddy_allocator_init(g, na, vm, name, base, length,
blk_size, max_order, flags); blk_size, max_order, flags);
break; break;
#ifdef CONFIG_NVGPU_DGPU
case PAGE_ALLOCATOR: case PAGE_ALLOCATOR:
err = nvgpu_page_allocator_init(g, na, name, base, length, err = nvgpu_page_allocator_init(g, na, name, base, length,
blk_size, flags); blk_size, flags);
break; break;
#endif
case BITMAP_ALLOCATOR: case BITMAP_ALLOCATOR:
err = nvgpu_bitmap_allocator_init(g, na, name, base, length, err = nvgpu_bitmap_allocator_init(g, na, name, base, length,
blk_size, flags); blk_size, flags);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -35,6 +35,7 @@ int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
struct nvgpu_mem *mem) struct nvgpu_mem *mem)
{ {
#ifdef CONFIG_NVGPU_DGPU
if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) {
/* /*
* Force the no-kernel-mapping flag on because we don't support * Force the no-kernel-mapping flag on because we don't support
@@ -59,6 +60,7 @@ int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
* vidmem is exhausted. * vidmem is exhausted.
*/ */
} }
#endif
return nvgpu_dma_alloc_flags_sys(g, flags, size, mem); return nvgpu_dma_alloc_flags_sys(g, flags, size, mem);
} }
@@ -68,6 +70,7 @@ int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
return nvgpu_dma_alloc_flags_sys(g, 0, size, mem); return nvgpu_dma_alloc_flags_sys(g, 0, size, mem);
} }
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem) int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
{ {
return nvgpu_dma_alloc_flags_vid(g, return nvgpu_dma_alloc_flags_vid(g,
@@ -86,6 +89,7 @@ int nvgpu_dma_alloc_vid_at(struct gk20a *g,
return nvgpu_dma_alloc_flags_vid_at(g, return nvgpu_dma_alloc_flags_vid_at(g,
NVGPU_DMA_NO_KERNEL_MAPPING, size, mem, at); NVGPU_DMA_NO_KERNEL_MAPPING, size, mem, at);
} }
#endif
int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size, int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
struct nvgpu_mem *mem) struct nvgpu_mem *mem)
@@ -96,6 +100,7 @@ int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
size_t size, struct nvgpu_mem *mem) size_t size, struct nvgpu_mem *mem)
{ {
#ifdef CONFIG_NVGPU_DGPU
if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) { if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) {
/* /*
* Force the no-kernel-mapping flag on because we don't support * Force the no-kernel-mapping flag on because we don't support
@@ -116,6 +121,7 @@ int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
* vidmem is exhausted. * vidmem is exhausted.
*/ */
} }
#endif
return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem); return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
} }
@@ -150,6 +156,7 @@ fail_free:
return err; return err;
} }
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size, int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
struct nvgpu_mem *mem) struct nvgpu_mem *mem)
{ {
@@ -180,6 +187,7 @@ fail_free:
nvgpu_dma_free(vm->mm->g, mem); nvgpu_dma_free(vm->mm->g, mem);
return err; return err;
} }
#endif
void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem) void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
{ {
@@ -187,9 +195,11 @@ void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
case APERTURE_SYSMEM: case APERTURE_SYSMEM:
nvgpu_dma_free_sys(g, mem); nvgpu_dma_free_sys(g, mem);
break; break;
#ifdef CONFIG_NVGPU_DGPU
case APERTURE_VIDMEM: case APERTURE_VIDMEM:
nvgpu_dma_free_vid(g, mem); nvgpu_dma_free_vid(g, mem);
break; break;
#endif
default: default:
/* like free() on "null" memory */ /* like free() on "null" memory */
break; break;

View File

@@ -41,7 +41,9 @@ int nvgpu_mm_suspend(struct gk20a *g)
nvgpu_log_info(g, "MM suspend running..."); nvgpu_log_info(g, "MM suspend running...");
#ifdef CONFIG_NVGPU_DGPU
nvgpu_vidmem_thread_pause_sync(&g->mm); nvgpu_vidmem_thread_pause_sync(&g->mm);
#endif
#ifdef CONFIG_NVGPU_COMPRESSION #ifdef CONFIG_NVGPU_COMPRESSION
g->ops.mm.cache.cbc_clean(g); g->ops.mm.cache.cbc_clean(g);
@@ -114,6 +116,7 @@ static int nvgpu_alloc_sysmem_flush(struct gk20a *g)
#ifdef CONFIG_NVGPU_CE #ifdef CONFIG_NVGPU_CE
static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm) static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm)
{ {
#ifdef CONFIG_NVGPU_DGPU
struct gk20a *g = gk20a_from_mm(mm); struct gk20a *g = gk20a_from_mm(mm);
if (mm->vidmem.ce_ctx_id != NVGPU_CE_INVAL_CTX_ID) { if (mm->vidmem.ce_ctx_id != NVGPU_CE_INVAL_CTX_ID) {
@@ -122,6 +125,7 @@ static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm)
mm->vidmem.ce_ctx_id = NVGPU_CE_INVAL_CTX_ID; mm->vidmem.ce_ctx_id = NVGPU_CE_INVAL_CTX_ID;
nvgpu_vm_put(mm->ce.vm); nvgpu_vm_put(mm->ce.vm);
#endif
} }
#endif #endif
@@ -162,7 +166,9 @@ static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
} }
nvgpu_semaphore_sea_destroy(g); nvgpu_semaphore_sea_destroy(g);
#ifdef CONFIG_NVGPU_DGPU
nvgpu_vidmem_destroy(g); nvgpu_vidmem_destroy(g);
#endif
nvgpu_pd_cache_fini(g); nvgpu_pd_cache_fini(g);
if (g->ops.ramin.deinit_pdb_cache_war != NULL) { if (g->ops.ramin.deinit_pdb_cache_war != NULL) {
@@ -297,7 +303,7 @@ static int nvgpu_init_mmu_debug(struct mm_gk20a *mm)
#ifdef CONFIG_NVGPU_CE #ifdef CONFIG_NVGPU_CE
void nvgpu_init_mm_ce_context(struct gk20a *g) void nvgpu_init_mm_ce_context(struct gk20a *g)
{ {
#if defined(CONFIG_GK20A_VIDMEM) #if defined(CONFIG_NVGPU_DGPU)
if (g->mm.vidmem.size > 0U && if (g->mm.vidmem.size > 0U &&
(g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)) { (g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)) {
g->mm.vidmem.ce_ctx_id = g->mm.vidmem.ce_ctx_id =
@@ -421,10 +427,11 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
U32(mm->channel.user_size >> U64(20)), U32(mm->channel.user_size >> U64(20)),
U32(mm->channel.kernel_size >> U64(20))); U32(mm->channel.kernel_size >> U64(20)));
nvgpu_init_pramin(mm); #ifdef CONFIG_NVGPU_DGPU
mm->vidmem.ce_ctx_id = NVGPU_CE_INVAL_CTX_ID; mm->vidmem.ce_ctx_id = NVGPU_CE_INVAL_CTX_ID;
nvgpu_init_pramin(mm);
err = nvgpu_vidmem_init(mm); err = nvgpu_vidmem_init(mm);
if (err != 0) { if (err != 0) {
return err; return err;
@@ -441,6 +448,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
return err; return err;
} }
} }
#endif
err = nvgpu_alloc_sysmem_flush(g); err = nvgpu_alloc_sysmem_flush(g);
if (err != 0) { if (err != 0) {

View File

@@ -44,6 +44,7 @@ u32 nvgpu_aperture_mask_raw(struct gk20a *g, enum nvgpu_aperture aperture,
nvgpu_do_assert_print(g, "Bad aperture"); nvgpu_do_assert_print(g, "Bad aperture");
return 0; return 0;
} }
/* /*
* Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the * Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the
* "sysmem" aperture should really be translated to VIDMEM. * "sysmem" aperture should really be translated to VIDMEM.
@@ -96,7 +97,9 @@ bool nvgpu_mem_is_sysmem(struct nvgpu_mem *mem)
u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys) u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys)
{ {
/* ensure it is not vidmem allocation */ /* ensure it is not vidmem allocation */
#ifdef CONFIG_NVGPU_DGPU
WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys)); WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys));
#endif
if (nvgpu_iommuable(g) && g->ops.mm.gmmu.get_iommu_bit != NULL) { if (nvgpu_iommuable(g) && g->ops.mm.gmmu.get_iommu_bit != NULL) {
return phys | 1ULL << g->ops.mm.gmmu.get_iommu_bit(g); return phys | 1ULL << g->ops.mm.gmmu.get_iommu_bit(g);
@@ -114,10 +117,14 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
WARN_ON(ptr == NULL); WARN_ON(ptr == NULL);
data = ptr[w]; data = ptr[w];
} else if (mem->aperture == APERTURE_VIDMEM) { }
#ifdef CONFIG_NVGPU_DGPU
else if (mem->aperture == APERTURE_VIDMEM) {
nvgpu_pramin_rd_n(g, mem, w * (u32)sizeof(u32), nvgpu_pramin_rd_n(g, mem, w * (u32)sizeof(u32),
(u32)sizeof(u32), &data); (u32)sizeof(u32), &data);
} else { }
#endif
else {
nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem");
} }
@@ -149,9 +156,13 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
WARN_ON(mem->cpu_va == NULL); WARN_ON(mem->cpu_va == NULL);
nvgpu_memcpy((u8 *)dest, src, size); nvgpu_memcpy((u8 *)dest, src, size);
} else if (mem->aperture == APERTURE_VIDMEM) { }
#ifdef CONFIG_NVGPU_DGPU
else if (mem->aperture == APERTURE_VIDMEM) {
nvgpu_pramin_rd_n(g, mem, offset, size, dest); nvgpu_pramin_rd_n(g, mem, offset, size, dest);
} else { }
#endif
else {
nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem");
} }
} }
@@ -163,13 +174,17 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
WARN_ON(ptr == NULL); WARN_ON(ptr == NULL);
ptr[w] = data; ptr[w] = data;
} else if (mem->aperture == APERTURE_VIDMEM) { }
#ifdef CONFIG_NVGPU_DGPU
else if (mem->aperture == APERTURE_VIDMEM) {
nvgpu_pramin_wr_n(g, mem, w * (u32)sizeof(u32), nvgpu_pramin_wr_n(g, mem, w * (u32)sizeof(u32),
(u32)sizeof(u32), &data); (u32)sizeof(u32), &data);
if (!mem->skip_wmb) { if (!mem->skip_wmb) {
nvgpu_wmb(); nvgpu_wmb();
} }
} else { }
#endif
else {
nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem");
} }
} }
@@ -191,12 +206,16 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
WARN_ON(mem->cpu_va == NULL); WARN_ON(mem->cpu_va == NULL);
nvgpu_memcpy(dest, (u8 *)src, size); nvgpu_memcpy(dest, (u8 *)src, size);
} else if (mem->aperture == APERTURE_VIDMEM) { }
#ifdef CONFIG_NVGPU_DGPU
else if (mem->aperture == APERTURE_VIDMEM) {
nvgpu_pramin_wr_n(g, mem, offset, size, src); nvgpu_pramin_wr_n(g, mem, offset, size, src);
if (!mem->skip_wmb) { if (!mem->skip_wmb) {
nvgpu_wmb(); nvgpu_wmb();
} }
} else { }
#endif
else {
nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem");
} }
} }
@@ -215,14 +234,18 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
WARN_ON(mem->cpu_va == NULL); WARN_ON(mem->cpu_va == NULL);
(void) memset(dest, (int)c, size); (void) memset(dest, (int)c, size);
} else if (mem->aperture == APERTURE_VIDMEM) { }
#ifdef CONFIG_NVGPU_DGPU
else if (mem->aperture == APERTURE_VIDMEM) {
u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
nvgpu_pramin_memset(g, mem, offset, size, repeat_value); nvgpu_pramin_memset(g, mem, offset, size, repeat_value);
if (!mem->skip_wmb) { if (!mem->skip_wmb) {
nvgpu_wmb(); nvgpu_wmb();
} }
} else { }
#endif
else {
nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem");
} }
} }

View File

@@ -213,11 +213,16 @@ static int pmu_payload_allocate(struct gk20a *g, struct pmu_sequence *seq,
goto clean_up; goto clean_up;
} }
#ifdef CONFIG_NVGPU_DGPU
err = nvgpu_pmu_vidmem_surface_alloc(g, alloc->fb_surface, err = nvgpu_pmu_vidmem_surface_alloc(g, alloc->fb_surface,
alloc->fb_size); alloc->fb_size);
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
} }
#else
err = -ENOMEM;
goto clean_up;
#endif
} }
if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) { if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {

View File

@@ -72,6 +72,7 @@ void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24U); fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24U);
} }
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
u32 size) u32 size)
{ {
@@ -87,6 +88,7 @@ int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
return 0; return 0;
} }
#endif
int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
u32 size) u32 size)

View File

@@ -619,9 +619,11 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_mmu_levels = gp10b_mm_get_mmu_levels, .get_mmu_levels = gp10b_mm_get_mmu_levels,
}, },
}, },
#ifdef CONFIG_NVGPU_DGPU
.pramin = { .pramin = {
.data032_r = NULL, .data032_r = NULL,
}, },
#endif
.therm = { .therm = {
.init_therm_setup_hw = NULL, .init_therm_setup_hw = NULL,
.init_elcg_mode = NULL, .init_elcg_mode = NULL,
@@ -737,7 +739,9 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.isr = NULL, .isr = NULL,
.bar1_bind = NULL, .bar1_bind = NULL,
.bar2_bind = NULL, .bar2_bind = NULL,
#ifdef CONFIG_NVGPU_DGPU
.set_bar0_window = NULL, .set_bar0_window = NULL,
#endif
}, },
.ptimer = { .ptimer = {
.isr = NULL, .isr = NULL,
@@ -816,7 +820,9 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
gops->pbdma_status = vgpu_gp10b_ops.pbdma_status; gops->pbdma_status = vgpu_gp10b_ops.pbdma_status;
gops->netlist = vgpu_gp10b_ops.netlist; gops->netlist = vgpu_gp10b_ops.netlist;
gops->mm = vgpu_gp10b_ops.mm; gops->mm = vgpu_gp10b_ops.mm;
#ifdef CONFIG_NVGPU_DGPU
gops->pramin = vgpu_gp10b_ops.pramin; gops->pramin = vgpu_gp10b_ops.pramin;
#endif
gops->therm = vgpu_gp10b_ops.therm; gops->therm = vgpu_gp10b_ops.therm;
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
gops->pmu = vgpu_gp10b_ops.pmu; gops->pmu = vgpu_gp10b_ops.pmu;

View File

@@ -834,7 +834,9 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.isr = NULL, .isr = NULL,
.bar1_bind = NULL, .bar1_bind = NULL,
.bar2_bind = NULL, .bar2_bind = NULL,
#ifdef CONFIG_NVGPU_DGPU
.set_bar0_window = NULL, .set_bar0_window = NULL,
#endif
}, },
.ptimer = { .ptimer = {
.isr = NULL, .isr = NULL,

View File

@@ -80,6 +80,7 @@ void gk20a_bus_isr(struct gk20a *g)
gk20a_writel(g, bus_intr_0_r(), val); gk20a_writel(g, bus_intr_0_r(), val);
} }
#ifdef CONFIG_NVGPU_DGPU
u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem, u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem,
struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, u32 w) struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, u32 w)
{ {
@@ -110,3 +111,4 @@ u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem,
return lo; return lo;
} }
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -31,9 +31,11 @@ struct nvgpu_sgl;
void gk20a_bus_isr(struct gk20a *g); void gk20a_bus_isr(struct gk20a *g);
void gk20a_bus_init_hw(struct gk20a *g); void gk20a_bus_init_hw(struct gk20a *g);
#ifdef CONFIG_NVGPU_DGPU
u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem, u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem,
struct nvgpu_sgt *sgt, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl, struct nvgpu_sgl *sgl,
u32 w); u32 w);
#endif
#endif /* BUS_GK20A_H */ #endif /* BUS_GK20A_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,6 +53,7 @@ void gp106_fb_init_fs_state(struct gk20a *g)
gk20a_writel(g, fb_mmu_priv_level_mask_r(), val); gk20a_writel(g, fb_mmu_priv_level_mask_r(), val);
} }
#ifdef CONFIG_NVGPU_DGPU
size_t gp106_fb_get_vidmem_size(struct gk20a *g) size_t gp106_fb_get_vidmem_size(struct gk20a *g)
{ {
u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r()); u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
@@ -67,3 +68,4 @@ size_t gp106_fb_get_vidmem_size(struct gk20a *g)
return bytes; return bytes;
} }
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,7 @@
struct gpu_ops; struct gpu_ops;
void gp106_fb_init_fs_state(struct gk20a *g); void gp106_fb_init_fs_state(struct gk20a *g);
#ifdef CONFIG_NVGPU_DGPU
size_t gp106_fb_get_vidmem_size(struct gk20a *g); size_t gp106_fb_get_vidmem_size(struct gk20a *g);
#endif
#endif #endif

View File

@@ -166,6 +166,7 @@ int gv100_fb_enable_nvlink(struct gk20a *g)
return 0; return 0;
} }
#ifdef CONFIG_NVGPU_DGPU
size_t gv100_fb_get_vidmem_size(struct gk20a *g) size_t gv100_fb_get_vidmem_size(struct gk20a *g)
{ {
u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r()); u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
@@ -180,3 +181,4 @@ size_t gv100_fb_get_vidmem_size(struct gk20a *g)
return bytes; return bytes;
} }
#endif

View File

@@ -31,6 +31,7 @@ void gv100_fb_reset(struct gk20a *g);
int gv100_fb_memory_unlock(struct gk20a *g); int gv100_fb_memory_unlock(struct gk20a *g);
int gv100_fb_init_nvlink(struct gk20a *g); int gv100_fb_init_nvlink(struct gk20a *g);
int gv100_fb_enable_nvlink(struct gk20a *g); int gv100_fb_enable_nvlink(struct gk20a *g);
#ifdef CONFIG_NVGPU_DGPU
size_t gv100_fb_get_vidmem_size(struct gk20a *g); size_t gv100_fb_get_vidmem_size(struct gk20a *g);
#endif
#endif /* NVGPU_FB_GV100_H */ #endif /* NVGPU_FB_GV100_H */

View File

@@ -262,6 +262,7 @@ int tu104_fb_apply_pdb_cache_war(struct gk20a *g)
return 0; return 0;
} }
#ifdef CONFIG_NVGPU_DGPU
size_t tu104_fb_get_vidmem_size(struct gk20a *g) size_t tu104_fb_get_vidmem_size(struct gk20a *g)
{ {
u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r()); u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
@@ -283,7 +284,7 @@ size_t tu104_fb_get_vidmem_size(struct gk20a *g)
return bytes; return bytes;
} }
#endif
int tu104_fb_enable_nvlink(struct gk20a *g) int tu104_fb_enable_nvlink(struct gk20a *g)
{ {

View File

@@ -34,7 +34,9 @@ struct nvgpu_cbc;
void tu104_fb_cbc_configure(struct gk20a *g, struct nvgpu_cbc *cbc); void tu104_fb_cbc_configure(struct gk20a *g, struct nvgpu_cbc *cbc);
#endif #endif
int tu104_fb_apply_pdb_cache_war(struct gk20a *g); int tu104_fb_apply_pdb_cache_war(struct gk20a *g);
#ifdef CONFIG_NVGPU_DGPU
size_t tu104_fb_get_vidmem_size(struct gk20a *g); size_t tu104_fb_get_vidmem_size(struct gk20a *g);
#endif
int tu104_fb_enable_nvlink(struct gk20a *g); int tu104_fb_enable_nvlink(struct gk20a *g);
#endif /* NVGPU_FB_TU104_H */ #endif /* NVGPU_FB_TU104_H */

View File

@@ -939,7 +939,9 @@ static const struct gpu_ops gm20b_ops = {
.init_hw = gk20a_bus_init_hw, .init_hw = gk20a_bus_init_hw,
.isr = gk20a_bus_isr, .isr = gk20a_bus_isr,
.bar1_bind = gm20b_bus_bar1_bind, .bar1_bind = gm20b_bus_bar1_bind,
#ifdef CONFIG_NVGPU_DGPU
.set_bar0_window = gk20a_bus_set_bar0_window, .set_bar0_window = gk20a_bus_set_bar0_window,
#endif
}, },
.ptimer = { .ptimer = {
.isr = gk20a_ptimer_isr, .isr = gk20a_ptimer_isr,

View File

@@ -1026,7 +1026,9 @@ static const struct gpu_ops gp10b_ops = {
.isr = gk20a_bus_isr, .isr = gk20a_bus_isr,
.bar1_bind = gm20b_bus_bar1_bind, .bar1_bind = gm20b_bus_bar1_bind,
.bar2_bind = gp10b_bus_bar2_bind, .bar2_bind = gp10b_bus_bar2_bind,
#ifdef CONFIG_NVGPU_DGPU
.set_bar0_window = gk20a_bus_set_bar0_window, .set_bar0_window = gk20a_bus_set_bar0_window,
#endif
}, },
.ptimer = { .ptimer = {
.isr = gk20a_ptimer_isr, .isr = gk20a_ptimer_isr,
@@ -1181,7 +1183,9 @@ int gp10b_init_hal(struct gk20a *g)
nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, false); nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, false);
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
nvgpu_pramin_ops_init(g); nvgpu_pramin_ops_init(g);
#endif
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */ /* Read fuses to check if gpu needs to boot in secure/non-secure mode */
if (gops->fuse.check_priv_security(g) != 0) { if (gops->fuse.check_priv_security(g) != 0) {

View File

@@ -1198,7 +1198,9 @@ static const struct gpu_ops gv11b_ops = {
.isr = gk20a_bus_isr, .isr = gk20a_bus_isr,
.bar1_bind = gm20b_bus_bar1_bind, .bar1_bind = gm20b_bus_bar1_bind,
.bar2_bind = gp10b_bus_bar2_bind, .bar2_bind = gp10b_bus_bar2_bind,
#ifdef CONFIG_NVGPU_DGPU
.set_bar0_window = gk20a_bus_set_bar0_window, .set_bar0_window = gk20a_bus_set_bar0_window,
#endif
}, },
.ptimer = { .ptimer = {
.isr = gk20a_ptimer_isr, .isr = gk20a_ptimer_isr,

View File

@@ -782,7 +782,9 @@ static const struct gpu_ops tu104_ops = {
.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled, .is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw, .fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
.fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw, .fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
#ifdef CONFIG_NVGPU_DGPU
.get_vidmem_size = tu104_fb_get_vidmem_size, .get_vidmem_size = tu104_fb_get_vidmem_size,
#endif
.apply_pdb_cache_war = tu104_fb_apply_pdb_cache_war, .apply_pdb_cache_war = tu104_fb_apply_pdb_cache_war,
.intr = { .intr = {
.enable = tu104_fb_intr_enable, .enable = tu104_fb_intr_enable,
@@ -1239,7 +1241,9 @@ static const struct gpu_ops tu104_ops = {
.isr = gk20a_bus_isr, .isr = gk20a_bus_isr,
.bar1_bind = NULL, .bar1_bind = NULL,
.bar2_bind = bus_tu104_bar2_bind, .bar2_bind = bus_tu104_bar2_bind,
#ifdef CONFIG_NVGPU_DGPU
.set_bar0_window = gk20a_bus_set_bar0_window, .set_bar0_window = gk20a_bus_set_bar0_window,
#endif
.read_sw_scratch = gv100_bus_read_sw_scratch, .read_sw_scratch = gv100_bus_read_sw_scratch,
.write_sw_scratch = gv100_bus_write_sw_scratch, .write_sw_scratch = gv100_bus_write_sw_scratch,
}, },
@@ -1528,8 +1532,9 @@ int tu104_init_hal(struct gk20a *g)
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
gops->clk.perf_pmu_vfe_load = nvgpu_perf_pmu_vfe_load_ps35; gops->clk.perf_pmu_vfe_load = nvgpu_perf_pmu_vfe_load_ps35;
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
nvgpu_pramin_ops_init(g); nvgpu_pramin_ops_init(g);
#endif
/* dGpu VDK support */ /* dGpu VDK support */
#ifdef CONFIG_NVGPU_SIM #ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)){ if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)){

View File

@@ -29,8 +29,10 @@ struct nvgpu_firmware;
struct nvgpu_acr; struct nvgpu_acr;
int nvgpu_acr_init(struct gk20a *g, struct nvgpu_acr **acr); int nvgpu_acr_init(struct gk20a *g, struct nvgpu_acr **acr);
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr, int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr,
size_t size); size_t size);
#endif
int nvgpu_acr_construct_execute(struct gk20a *g, struct nvgpu_acr *acr); int nvgpu_acr_construct_execute(struct gk20a *g, struct nvgpu_acr *acr);
int nvgpu_acr_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr); int nvgpu_acr_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr);
bool nvgpu_acr_is_lsf_lazy_bootstrap(struct gk20a *g, struct nvgpu_acr *acr, bool nvgpu_acr_is_lsf_lazy_bootstrap(struct gk20a *g, struct nvgpu_acr *acr,

View File

@@ -197,7 +197,9 @@ nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node)
enum nvgpu_allocator_type { enum nvgpu_allocator_type {
BUDDY_ALLOCATOR = 0, BUDDY_ALLOCATOR = 0,
#ifdef CONFIG_NVGPU_DGPU
PAGE_ALLOCATOR, PAGE_ALLOCATOR,
#endif
BITMAP_ALLOCATOR BITMAP_ALLOCATOR
}; };
@@ -226,12 +228,14 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
const char *name, u64 base, u64 length, const char *name, u64 base, u64 length,
u64 blk_size, u64 flags); u64 blk_size, u64 flags);
#ifdef CONFIG_NVGPU_DGPU
/* /*
* Page allocator initializers. * Page allocator initializers.
*/ */
int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
const char *name, u64 base, u64 length, const char *name, u64 base, u64 length,
u64 blk_size, u64 flags); u64 blk_size, u64 flags);
#endif
/* /*
* Common init function for any type of allocator. * Common init function for any type of allocator.

View File

@@ -90,9 +90,11 @@ struct gpfifo_desc {
u32 get; u32 get;
u32 put; u32 put;
#ifdef CONFIG_NVGPU_DGPU
/* if gpfifo lives in vidmem or is forced to go via PRAMIN, first copy /* if gpfifo lives in vidmem or is forced to go via PRAMIN, first copy
* from userspace to pipe and then from pipe to gpu buffer */ * from userspace to pipe and then from pipe to gpu buffer */
void *pipe; void *pipe;
#endif
}; };
struct nvgpu_channel_hw_state { struct nvgpu_channel_hw_state {

View File

@@ -145,6 +145,7 @@ int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
size_t size, struct nvgpu_mem *mem); size_t size, struct nvgpu_mem *mem);
#ifdef CONFIG_NVGPU_DGPU
/** /**
* nvgpu_dma_alloc_vid - Allocate DMA memory * nvgpu_dma_alloc_vid - Allocate DMA memory
* *
@@ -216,6 +217,7 @@ int nvgpu_dma_alloc_vid_at(struct gk20a *g,
*/ */
int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
size_t size, struct nvgpu_mem *mem, u64 at); size_t size, struct nvgpu_mem *mem, u64 at);
#endif
/** /**
* nvgpu_dma_free - Free a DMA allocation * nvgpu_dma_free - Free a DMA allocation

View File

@@ -973,7 +973,9 @@ struct gpu_ops {
void (*fault_buf_set_state_hw)(struct gk20a *g, void (*fault_buf_set_state_hw)(struct gk20a *g,
u32 index, u32 state); u32 index, u32 state);
void (*fault_buf_configure_hw)(struct gk20a *g, u32 index); void (*fault_buf_configure_hw)(struct gk20a *g, u32 index);
#ifdef CONFIG_NVGPU_DGPU
size_t (*get_vidmem_size)(struct gk20a *g); size_t (*get_vidmem_size)(struct gk20a *g);
#endif
int (*apply_pdb_cache_war)(struct gk20a *g); int (*apply_pdb_cache_war)(struct gk20a *g);
struct { struct {
void (*enable)(struct gk20a *g); void (*enable)(struct gk20a *g);
@@ -1361,9 +1363,11 @@ struct gpu_ops {
size_t size, size_t size,
void (**fn)(struct gk20a *g, struct nvgpu_mem *mem)); void (**fn)(struct gk20a *g, struct nvgpu_mem *mem));
#ifdef CONFIG_NVGPU_DGPU
struct { struct {
u32 (*data032_r)(u32 i); u32 (*data032_r)(u32 i);
} pramin; } pramin;
#endif
struct { struct {
int (*init_therm_setup_hw)(struct gk20a *g); int (*init_therm_setup_hw)(struct gk20a *g);
void (*init_elcg_mode)(struct gk20a *g, u32 mode, u32 engine); void (*init_elcg_mode)(struct gk20a *g, u32 mode, u32 engine);
@@ -1604,9 +1608,11 @@ struct gpu_ops {
void (*isr)(struct gk20a *g); void (*isr)(struct gk20a *g);
int (*bar1_bind)(struct gk20a *g, struct nvgpu_mem *bar1_inst); int (*bar1_bind)(struct gk20a *g, struct nvgpu_mem *bar1_inst);
int (*bar2_bind)(struct gk20a *g, struct nvgpu_mem *bar1_inst); int (*bar2_bind)(struct gk20a *g, struct nvgpu_mem *bar1_inst);
#ifdef CONFIG_NVGPU_DGPU
u32 (*set_bar0_window)(struct gk20a *g, struct nvgpu_mem *mem, u32 (*set_bar0_window)(struct gk20a *g, struct nvgpu_mem *mem,
struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl,
u32 w); u32 w);
#endif
u32 (*read_sw_scratch)(struct gk20a *g, u32 index); u32 (*read_sw_scratch)(struct gk20a *g, u32 index);
void (*write_sw_scratch)(struct gk20a *g, u32 index, u32 val); void (*write_sw_scratch)(struct gk20a *g, u32 index, u32 val);
} bus; } bus;

View File

@@ -90,7 +90,9 @@ static inline void nvgpu_log_dbg_impl(struct gk20a *g, u64 log_mask,
#define gpu_dbg_alloc BIT(21) /* Allocator debugging. */ #define gpu_dbg_alloc BIT(21) /* Allocator debugging. */
#define gpu_dbg_dma BIT(22) /* DMA allocation prints. */ #define gpu_dbg_dma BIT(22) /* DMA allocation prints. */
#define gpu_dbg_sgl BIT(23) /* SGL related traces. */ #define gpu_dbg_sgl BIT(23) /* SGL related traces. */
#ifdef CONFIG_NVGPU_DGPU
#define gpu_dbg_vidmem BIT(24) /* VIDMEM tracing. */ #define gpu_dbg_vidmem BIT(24) /* VIDMEM tracing. */
#endif
#define gpu_dbg_nvlink BIT(25) /* nvlink Operation tracing. */ #define gpu_dbg_nvlink BIT(25) /* nvlink Operation tracing. */
#define gpu_dbg_clk_arb BIT(26) /* Clk arbiter debugging. */ #define gpu_dbg_clk_arb BIT(26) /* Clk arbiter debugging. */
#define gpu_dbg_event BIT(27) /* Events to User debugging. */ #define gpu_dbg_event BIT(27) /* Events to User debugging. */

View File

@@ -269,6 +269,7 @@ struct mm_gk20a {
struct nvgpu_mem sysmem_flush; struct nvgpu_mem sysmem_flush;
#ifdef CONFIG_NVGPU_DGPU
u32 pramin_window; u32 pramin_window;
struct nvgpu_spinlock pramin_window_lock; struct nvgpu_spinlock pramin_window_lock;
@@ -295,7 +296,7 @@ struct mm_gk20a {
nvgpu_atomic64_t bytes_pending; nvgpu_atomic64_t bytes_pending;
} vidmem; } vidmem;
#endif
struct nvgpu_mem mmu_wr_mem; struct nvgpu_mem mmu_wr_mem;
struct nvgpu_mem mmu_rd_mem; struct nvgpu_mem mmu_rd_mem;
}; };

View File

@@ -120,12 +120,14 @@ struct nvgpu_mem {
*/ */
void *cpu_va; void *cpu_va;
#ifdef CONFIG_NVGPU_DGPU
/* /*
* Fields only populated for vidmem allocations. * Fields only populated for vidmem allocations.
*/ */
struct nvgpu_page_alloc *vidmem_alloc; struct nvgpu_page_alloc *vidmem_alloc;
struct nvgpu_allocator *allocator; struct nvgpu_allocator *allocator;
struct nvgpu_list_node clear_list_entry; struct nvgpu_list_node clear_list_entry;
#endif
/* /*
* Fields for direct "physical" nvgpu_mem structs. * Fields for direct "physical" nvgpu_mem structs.
@@ -139,6 +141,7 @@ struct nvgpu_mem {
struct nvgpu_mem_priv priv; struct nvgpu_mem_priv priv;
}; };
#ifdef CONFIG_NVGPU_DGPU
static inline struct nvgpu_mem * static inline struct nvgpu_mem *
nvgpu_mem_from_clear_list_entry(struct nvgpu_list_node *node) nvgpu_mem_from_clear_list_entry(struct nvgpu_list_node *node)
{ {
@@ -146,6 +149,7 @@ nvgpu_mem_from_clear_list_entry(struct nvgpu_list_node *node)
((uintptr_t)node - offsetof(struct nvgpu_mem, ((uintptr_t)node - offsetof(struct nvgpu_mem,
clear_list_entry)); clear_list_entry));
}; };
#endif
static inline const char *nvgpu_aperture_str(struct gk20a *g, static inline const char *nvgpu_aperture_str(struct gk20a *g,
enum nvgpu_aperture aperture) enum nvgpu_aperture aperture)

View File

@@ -23,6 +23,8 @@
#ifndef PAGE_ALLOCATOR_PRIV_H #ifndef PAGE_ALLOCATOR_PRIV_H
#define PAGE_ALLOCATOR_PRIV_H #define PAGE_ALLOCATOR_PRIV_H
#ifdef CONFIG_NVGPU_DGPU
#include <nvgpu/allocator.h> #include <nvgpu/allocator.h>
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
#include <nvgpu/nvgpu_sgt.h> #include <nvgpu/nvgpu_sgt.h>
@@ -184,3 +186,4 @@ static inline struct nvgpu_allocator *palloc_owner(
} }
#endif #endif
#endif

View File

@@ -40,8 +40,10 @@ void nvgpu_pmu_dmem_allocator_destroy(struct nvgpu_allocator *dmem);
void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem); void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem);
void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
struct flcn_mem_desc_v0 *fb); struct flcn_mem_desc_v0 *fb);
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
u32 size); u32 size);
#endif
int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
u32 size); u32 size);
#endif /* NVGPU_PMU_ALLOCATOR_H */ #endif /* NVGPU_PMU_ALLOCATOR_H */

View File

@@ -23,23 +23,10 @@
#ifndef NVGPU_POSIX_VIDMEM_H #ifndef NVGPU_POSIX_VIDMEM_H
#define NVGPU_POSIX_VIDMEM_H #define NVGPU_POSIX_VIDMEM_H
#ifdef CONFIG_GK20A_VIDMEM #ifdef CONFIG_NVGPU_DGPU
void nvgpu_vidmem_set_page_alloc(struct nvgpu_mem_sgl *sgl, u64 addr); void nvgpu_vidmem_set_page_alloc(struct nvgpu_mem_sgl *sgl, u64 addr);
struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct nvgpu_mem_sgl *sgl); struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct nvgpu_mem_sgl *sgl);
#else
static inline void nvgpu_vidmem_set_page_alloc(struct nvgpu_mem_sgl *sgl,
u64 addr)
{
}
static inline struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(
struct nvgpu_mem_sgl *sgl)
{
return NULL;
}
#endif #endif
#endif /* NVGPU_POSIX_VIDMEM_H */ #endif /* NVGPU_POSIX_VIDMEM_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,8 @@
#ifndef NVGPU_PRAMIN_H #ifndef NVGPU_PRAMIN_H
#define NVGPU_PRAMIN_H #define NVGPU_PRAMIN_H
#ifdef CONFIG_NVGPU_DGPU
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
@@ -36,4 +38,6 @@ void nvgpu_pramin_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 start, u32
void nvgpu_init_pramin(struct mm_gk20a *mm); void nvgpu_init_pramin(struct mm_gk20a *mm);
#endif
#endif /* NVGPU_PRAMIN_H */ #endif /* NVGPU_PRAMIN_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,8 @@
#ifndef NVGPU_VIDMEM_H #ifndef NVGPU_VIDMEM_H
#define NVGPU_VIDMEM_H #define NVGPU_VIDMEM_H
#if defined(CONFIG_NVGPU_DGPU)
#include <nvgpu/types.h> #include <nvgpu/types.h>
#include <nvgpu/errno.h> #include <nvgpu/errno.h>
@@ -47,8 +49,6 @@ struct nvgpu_vidmem_buf {
void *priv; void *priv;
}; };
#if defined(CONFIG_GK20A_VIDMEM)
/** /**
* nvgpu_vidmem_user_alloc - Allocates a vidmem buffer for userspace * nvgpu_vidmem_user_alloc - Allocates a vidmem buffer for userspace
* *
@@ -83,66 +83,11 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem);
void nvgpu_vidmem_thread_pause_sync(struct mm_gk20a *mm); void nvgpu_vidmem_thread_pause_sync(struct mm_gk20a *mm);
void nvgpu_vidmem_thread_unpause(struct mm_gk20a *mm); void nvgpu_vidmem_thread_unpause(struct mm_gk20a *mm);
#else /* !defined(CONFIG_GK20A_VIDMEM) */
/*
* When VIDMEM support is not present this interface is used.
*/
static inline bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
{
return false;
}
static inline int nvgpu_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
{
return -ENOSYS;
}
static inline void nvgpu_vidmem_buf_free(struct gk20a *g,
struct nvgpu_vidmem_buf *buf)
{
}
static inline int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space)
{
return -ENOSYS;
}
static inline void nvgpu_vidmem_destroy(struct gk20a *g)
{
}
static inline int nvgpu_vidmem_init(struct mm_gk20a *mm)
{
return 0;
}
static inline int nvgpu_vidmem_clear_all(struct gk20a *g)
{
return -ENOSYS;
}
static inline int nvgpu_vidmem_clear(struct gk20a *g,
struct nvgpu_mem *mem)
{
return -ENOSYS;
}
static inline void nvgpu_vidmem_thread_pause_sync(struct mm_gk20a *mm)
{
}
static inline void nvgpu_vidmem_thread_unpause(struct mm_gk20a *mm)
{
}
#endif /* !defined(CONFIG_GK20A_VIDMEM) */
/* /*
* Simple macro for VIDMEM debugging. * Simple macro for VIDMEM debugging.
*/ */
#define vidmem_dbg(g, fmt, args...) \ #define vidmem_dbg(g, fmt, args...) \
nvgpu_log(g, gpu_dbg_vidmem, fmt, ##args); \ nvgpu_log(g, gpu_dbg_vidmem, fmt, ##args); \
#endif /* CONFIG_NVGPU_DGPU */
#endif /* NVGPU_VIDMEM_H */ #endif /* NVGPU_VIDMEM_H */

View File

@@ -10,7 +10,6 @@ gv11b_fb_write_mmu_fault_buffer_size
find_first_bit find_first_bit
find_first_zero_bit find_first_zero_bit
find_next_bit find_next_bit
gk20a_bus_set_bar0_window
gk20a_runlist_get_ch_entry gk20a_runlist_get_ch_entry
gk20a_runlist_get_tsg_entry gk20a_runlist_get_tsg_entry
nvgpu_gmmu_map_locked nvgpu_gmmu_map_locked
@@ -40,7 +39,6 @@ gv11b_mm_l2_flush
gv11b_mm_mmu_fault_disable_hw gv11b_mm_mmu_fault_disable_hw
gv11b_mm_mmu_fault_info_mem_destroy gv11b_mm_mmu_fault_info_mem_destroy
gv11b_mc_is_mmu_fault_pending gv11b_mc_is_mmu_fault_pending
nvgpu_addr_is_vidmem_page_alloc
nvgpu_alloc nvgpu_alloc
nvgpu_alloc_base nvgpu_alloc_base
nvgpu_alloc_common_init nvgpu_alloc_common_init
@@ -75,7 +73,6 @@ nvgpu_channel_setup_sw
nvgpu_channel_sync_create nvgpu_channel_sync_create
nvgpu_dma_alloc nvgpu_dma_alloc
nvgpu_dma_alloc_get_fault_injection nvgpu_dma_alloc_get_fault_injection
nvgpu_dma_alloc_vid_at
nvgpu_dma_free nvgpu_dma_free
nvgpu_fifo_init_support nvgpu_fifo_init_support
nvgpu_free nvgpu_free
@@ -91,7 +88,6 @@ nvgpu_hw_semaphore_init
nvgpu_init_enabled_flags nvgpu_init_enabled_flags
nvgpu_init_hal nvgpu_init_hal
nvgpu_init_mm_support nvgpu_init_mm_support
nvgpu_init_pramin
nvgpu_inst_block_addr nvgpu_inst_block_addr
nvgpu_is_enabled nvgpu_is_enabled
nvgpu_kfree_impl nvgpu_kfree_impl
@@ -128,7 +124,6 @@ nvgpu_mutex_destroy
nvgpu_mutex_init nvgpu_mutex_init
nvgpu_mutex_release nvgpu_mutex_release
nvgpu_mutex_tryacquire nvgpu_mutex_tryacquire
nvgpu_page_allocator_init
nvgpu_pd_alloc nvgpu_pd_alloc
nvgpu_pd_cache_fini nvgpu_pd_cache_fini
nvgpu_pd_cache_init nvgpu_pd_cache_init
@@ -155,9 +150,6 @@ nvgpu_posix_io_writel_reg_space
nvgpu_posix_is_fault_injection_triggered nvgpu_posix_is_fault_injection_triggered
nvgpu_posix_probe nvgpu_posix_probe
nvgpu_posix_register_io nvgpu_posix_register_io
nvgpu_pramin_memset
nvgpu_pramin_rd_n
nvgpu_pramin_wr_n
nvgpu_raw_spinlock_acquire nvgpu_raw_spinlock_acquire
nvgpu_raw_spinlock_init nvgpu_raw_spinlock_init
nvgpu_raw_spinlock_release nvgpu_raw_spinlock_release

View File

@@ -77,10 +77,13 @@ enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
} else if (buf_owner != g) { } else if (buf_owner != g) {
/* Someone else's vidmem */ /* Someone else's vidmem */
return APERTURE_INVALID; return APERTURE_INVALID;
} else { }
#ifdef CONFIG_NVGPU_DGPU
else {
/* Yay, buf_owner == g */ /* Yay, buf_owner == g */
return APERTURE_VIDMEM; return APERTURE_VIDMEM;
} }
#endif
} }
struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf, struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -17,6 +17,8 @@
#ifndef __NVGPU_LINUX_DMABUF_VIDMEM_H__ #ifndef __NVGPU_LINUX_DMABUF_VIDMEM_H__
#define __NVGPU_LINUX_DMABUF_VIDMEM_H__ #define __NVGPU_LINUX_DMABUF_VIDMEM_H__
#ifdef CONFIG_NVGPU_DGPU
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct dma_buf; struct dma_buf;
@@ -24,8 +26,6 @@ struct dma_buf;
struct gk20a; struct gk20a;
struct scatterlist; struct scatterlist;
#ifdef CONFIG_GK20A_VIDMEM
struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf); struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf);
int nvgpu_vidmem_export_linux(struct gk20a *g, size_t bytes); int nvgpu_vidmem_export_linux(struct gk20a *g, size_t bytes);
@@ -35,40 +35,6 @@ struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct scatterlist *sgl);
int nvgpu_vidmem_buf_access_memory(struct gk20a *g, struct dma_buf *dmabuf, int nvgpu_vidmem_buf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
void *buffer, u64 offset, u64 size, u32 cmd); void *buffer, u64 offset, u64 size, u32 cmd);
#else /* !CONFIG_GK20A_VIDMEM */
static inline struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf)
{
return NULL;
}
static inline int nvgpu_vidmem_export_linux(struct gk20a *g, size_t bytes)
{
return -ENOSYS;
}
static inline void nvgpu_vidmem_set_page_alloc(struct scatterlist *sgl,
u64 addr)
{
}
static inline struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(
struct scatterlist *sgl)
{
return NULL;
}
static inline int nvgpu_vidmem_buf_access_memory(struct gk20a *g,
struct dma_buf *dmabuf,
void *buffer, u64 offset,
u64 size, u32 cmd)
{
return -ENOSYS;
}
#endif
struct nvgpu_vidmem_linux { struct nvgpu_vidmem_linux {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
void *dmabuf_priv; void *dmabuf_priv;
@@ -76,3 +42,4 @@ struct nvgpu_vidmem_linux {
}; };
#endif #endif
#endif

View File

@@ -136,7 +136,7 @@ static void nvgpu_dma_print_err(struct gk20a *g, size_t size,
#define dma_dbg_free_done(g, size, type) \ #define dma_dbg_free_done(g, size, type) \
__dma_dbg_done(g, size, type, "free") __dma_dbg_done(g, size, type, "free")
#if defined(CONFIG_GK20A_VIDMEM) #if defined(CONFIG_NVGPU_DGPU)
static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at, static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at,
size_t size) size_t size)
{ {
@@ -338,10 +338,10 @@ print_dma_err:
return err; return err;
} }
#if defined(CONFIG_NVGPU_DGPU)
int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
size_t size, struct nvgpu_mem *mem, u64 at) size_t size, struct nvgpu_mem *mem, u64 at)
{ {
#if defined(CONFIG_GK20A_VIDMEM)
u64 addr; u64 addr;
int err; int err;
struct nvgpu_allocator *vidmem_alloc = g->mm.vidmem.cleared ? struct nvgpu_allocator *vidmem_alloc = g->mm.vidmem.cleared ?
@@ -423,10 +423,8 @@ fail_physfree:
print_dma_err: print_dma_err:
nvgpu_dma_print_err(g, size, "vidmem", "alloc", flags); nvgpu_dma_print_err(g, size, "vidmem", "alloc", flags);
return err; return err;
#else
return -ENOSYS;
#endif
} }
#endif
void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
{ {
@@ -485,7 +483,7 @@ void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
{ {
#if defined(CONFIG_GK20A_VIDMEM) #if defined(CONFIG_NVGPU_DGPU)
size_t mem_size = mem->size; size_t mem_size = mem->size;
dma_dbg_free(g, mem->size, mem->priv.flags, "vidmem"); dma_dbg_free(g, mem->size, mem->priv.flags, "vidmem");

View File

@@ -125,12 +125,14 @@ u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem)
*/ */
u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem) u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
{ {
#ifdef CONFIG_NVGPU_DGPU
/* /*
* For a VIDMEM buf, this is identical to simply get_addr() so just fall * For a VIDMEM buf, this is identical to simply get_addr() so just fall
* back to that. * back to that.
*/ */
if (mem->aperture == APERTURE_VIDMEM) if (mem->aperture == APERTURE_VIDMEM)
return nvgpu_mem_get_addr(g, mem); return nvgpu_mem_get_addr(g, mem);
#endif
return __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)mem->priv.sgt->sgl); return __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)mem->priv.sgt->sgl);
} }

View File

@@ -47,19 +47,6 @@ struct nvgpu_posix_fault_inj *nvgpu_dma_alloc_get_fault_injection(void)
return &dma_fi; return &dma_fi;
} }
static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at,
size_t size)
{
u64 addr = 0;
if (at)
addr = nvgpu_alloc_fixed(allocator, at, size, 0);
else
addr = nvgpu_alloc(allocator, size);
return addr;
}
/* /*
* In userspace vidmem vs sysmem is just a difference in what is placed in the * In userspace vidmem vs sysmem is just a difference in what is placed in the
* aperture field. * aperture field.
@@ -88,9 +75,10 @@ static int __nvgpu_do_dma_alloc(struct gk20a *g, unsigned long flags,
mem->aligned_size = PAGE_ALIGN(size); mem->aligned_size = PAGE_ALIGN(size);
mem->gpu_va = 0ULL; mem->gpu_va = 0ULL;
mem->skip_wmb = true; mem->skip_wmb = true;
#ifdef CONFIG_NVGPU_DGPU
mem->vidmem_alloc = NULL; mem->vidmem_alloc = NULL;
mem->allocator = NULL; mem->allocator = NULL;
#endif
return 0; return 0;
} }
@@ -108,6 +96,20 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
return __nvgpu_do_dma_alloc(g, flags, size, mem, APERTURE_SYSMEM); return __nvgpu_do_dma_alloc(g, flags, size, mem, APERTURE_SYSMEM);
} }
#ifdef CONFIG_NVGPU_DGPU
static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at,
size_t size)
{
u64 addr = 0;
if (at)
addr = nvgpu_alloc_fixed(allocator, at, size, 0);
else
addr = nvgpu_alloc(allocator, size);
return addr;
}
static size_t mock_fb_get_vidmem_size(struct gk20a *g) static size_t mock_fb_get_vidmem_size(struct gk20a *g)
{ {
return SZ_4G; return SZ_4G;
@@ -212,15 +214,6 @@ dma_err:
return err; return err;
} }
void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
{
if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY)) {
free(mem->cpu_va);
}
(void) memset(mem, 0, sizeof(*mem));
}
void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
{ {
@@ -238,3 +231,13 @@ void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
nvgpu_cond_destroy(&g->mm.vidmem.clearing_thread_cond); nvgpu_cond_destroy(&g->mm.vidmem.clearing_thread_cond);
nvgpu_thread_stop_graceful(&g->mm.vidmem.clearing_thread, NULL, NULL); nvgpu_thread_stop_graceful(&g->mm.vidmem.clearing_thread, NULL, NULL);
} }
#endif
void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
{
if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY)) {
free(mem->cpu_va);
}
(void) memset(mem, 0, sizeof(*mem));
}

View File

@@ -40,11 +40,9 @@ CONFIGS := \
-DNVCPU_IS_AARCH64=1 \ -DNVCPU_IS_AARCH64=1 \
-DCONFIG_TEGRA_IOVMM=0 \ -DCONFIG_TEGRA_IOVMM=0 \
-DCONFIG_ARCH_TEGRA_18x_SOC=1 \ -DCONFIG_ARCH_TEGRA_18x_SOC=1 \
-DCONFIG_GK20A_VIDMEM=1 \
-DCONFIG_PCI_MSI \ -DCONFIG_PCI_MSI \
-DCONFIG_SUPPORT_PMU_PSTATE \ -DCONFIG_SUPPORT_PMU_PSTATE \
-DNVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT -DNVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
# Uncomment these to enable the config. # Uncomment these to enable the config.
# CONFIGS += -DCONFIG_NVGPU_TRACK_MEM_USAGE=y # CONFIGS += -DCONFIG_NVGPU_TRACK_MEM_USAGE=y
# CONFIGS += -DCONFIG_GK20A_VIDMEM

View File

@@ -589,11 +589,6 @@
"test_level": 0, "test_level": 0,
"unit": "nvgpu_mem" "unit": "nvgpu_mem"
}, },
{
"test": "nvgpu_mem_vidmem",
"test_level": 0,
"unit": "nvgpu_mem"
},
{ {
"test": "nvgpu_mem_wr_rd", "test": "nvgpu_mem_wr_rd",
"test_level": 0, "test_level": 0,
@@ -804,206 +799,6 @@
"test_level": 0, "test_level": 0,
"unit": "nvgpu_tsg" "unit": "nvgpu_tsg"
}, },
{
"test": "add_empty_slab",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "add_full_slab",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "add_partial_slab",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "add_second_full_slab",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "add_second_partial_slab",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "alloc_fault_at_alloc_cache",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "alloc_fault_at_nvgpu_alloc",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "alloc_fault_at_sgl_alloc",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "alloc_fixed_no_scatter_gather",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "alloc_more_than_available",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "alloc_no_scatter_gather",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "contiguous_alloc_512K",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "destroy",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "destroy_slabs",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "fixed_alloc_128K",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "fixed_alloc_8K",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "fixed_alloc_fault_at_alloc_cache",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "fixed_alloc_fault_at_sgl_alloc",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "free_32K_alloc",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "free_alloc_512K",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "free_fixed_128K",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "free_fixed_no_scatter_gather",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "free_fixed_no_scatter_gather_again",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "free_no_scatter_gather",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "free_no_scatter_gather_again",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "free_slab",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "init",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "init_slabs",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "no_more_slabs",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "ops",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "revert_partial_slab",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "revert_second_partial_slab",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "sgt_ops",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "simple_32K_alloc",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "simple_alloc_512K",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "slabs_alloc_32K",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "slabs_alloc_8K",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "slabs_fault_at_alloc_cache",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "slabs_fault_at_page_cache",
"test_level": 0,
"unit": "page_allocator"
},
{
"test": "slabs_fault_at_sgl_alloc",
"test_level": 0,
"unit": "page_allocator"
},
{ {
"test": "gmmu_clean", "test": "gmmu_clean",
"test_level": 0, "test_level": 0,
@@ -1094,11 +889,6 @@
"test_level": 0, "test_level": 0,
"unit": "page_table" "unit": "page_table"
}, },
{
"test": "gmmu_map_unmap_vidmem",
"test_level": 0,
"unit": "page_table"
},
{ {
"test": "gmmu_set_pte", "test": "gmmu_set_pte",
"test_level": 0, "test_level": 0,
@@ -1517,31 +1307,6 @@
"test_level": 0, "test_level": 0,
"unit": "posix_mockio" "unit": "posix_mockio"
}, },
{
"test": "nvgpu_pramin_dying",
"test_level": 0,
"unit": "pramin"
},
{
"test": "nvgpu_pramin_free_test_env",
"test_level": 0,
"unit": "pramin"
},
{
"test": "nvgpu_pramin_memset",
"test_level": 0,
"unit": "pramin"
},
{
"test": "nvgpu_pramin_rd_n_1_sgl",
"test_level": 0,
"unit": "pramin"
},
{
"test": "nvgpu_pramin_wr_n_3_sgl",
"test_level": 0,
"unit": "pramin"
},
{ {
"test": "batch", "test": "batch",
"test_level": 0, "test_level": 0,

View File

@@ -73,9 +73,11 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
gp10b_mm_get_default_big_page_size; gp10b_mm_get_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
#ifdef CONFIG_NVGPU_DGPU
/* Minimum HAL init for PRAMIN */ /* Minimum HAL init for PRAMIN */
g->ops.bus.set_bar0_window = gk20a_bus_set_bar0_window; g->ops.bus.set_bar0_window = gk20a_bus_set_bar0_window;
g->ops.pramin.data032_r = pram_data032_r; g->ops.pramin.data032_r = pram_data032_r;
#endif
/* vm should init with SYSMEM */ /* vm should init with SYSMEM */
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true); nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);

View File

@@ -33,6 +33,8 @@
#include <nvgpu/page_allocator.h> #include <nvgpu/page_allocator.h>
#ifdef CONFIG_NVGPU_DGPU
#define BA_DEFAULT_BASE SZ_4K #define BA_DEFAULT_BASE SZ_4K
#define BA_DEFAULT_LENGTH SZ_1M #define BA_DEFAULT_LENGTH SZ_1M
#define BA_DEFAULT_BLK_SIZE SZ_4K #define BA_DEFAULT_BLK_SIZE SZ_4K
@@ -589,8 +591,10 @@ static int test_nvgpu_page_allocator_init(struct unit_module *m,
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }
#endif
struct unit_module_test page_allocator_tests[] = { struct unit_module_test page_allocator_tests[] = {
#ifdef CONFIG_NVGPU_DGPU
/* These tests create and evaluate page_allocator w/o 4K VIDMEM pages */ /* These tests create and evaluate page_allocator w/o 4K VIDMEM pages */
UNIT_TEST(init, test_nvgpu_page_allocator_init, NULL, 0), UNIT_TEST(init, test_nvgpu_page_allocator_init, NULL, 0),
UNIT_TEST(ops, test_nvgpu_page_allocator_ops, NULL, 0), UNIT_TEST(ops, test_nvgpu_page_allocator_ops, NULL, 0),
@@ -668,6 +672,7 @@ struct unit_module_test page_allocator_tests[] = {
UNIT_TEST(no_more_slabs, test_page_alloc, (void *) &failing_alloc_16K, 0), UNIT_TEST(no_more_slabs, test_page_alloc, (void *) &failing_alloc_16K, 0),
UNIT_TEST(destroy_slabs, test_nvgpu_page_allocator_destroy, NULL, 0), UNIT_TEST(destroy_slabs, test_nvgpu_page_allocator_destroy, NULL, 0),
#endif
}; };
UNIT_MODULE(page_allocator, page_allocator_tests, UNIT_PRIO_NVGPU_TEST); UNIT_MODULE(page_allocator, page_allocator_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -212,6 +212,7 @@ static struct test_parameters test_iommu_sysmem_adv_small_sparse = {
.special_null_phys = true, .special_null_phys = true,
}; };
#ifdef CONFIG_NVGPU_DGPU
static struct test_parameters test_no_iommu_vidmem = { static struct test_parameters test_no_iommu_vidmem = {
.aperture = APERTURE_VIDMEM, .aperture = APERTURE_VIDMEM,
.is_iommuable = false, .is_iommuable = false,
@@ -219,6 +220,7 @@ static struct test_parameters test_no_iommu_vidmem = {
.flags = NVGPU_VM_MAP_CACHEABLE, .flags = NVGPU_VM_MAP_CACHEABLE,
.priv = false, .priv = false,
}; };
#endif
static struct test_parameters test_no_iommu_sysmem_noncacheable = { static struct test_parameters test_no_iommu_sysmem_noncacheable = {
.aperture = APERTURE_SYSMEM, .aperture = APERTURE_SYSMEM,
@@ -1190,8 +1192,10 @@ struct unit_module_test nvgpu_gmmu_tests[] = {
(void *) &test_iommu_sysmem_ro_fixed, 0), (void *) &test_iommu_sysmem_ro_fixed, 0),
UNIT_TEST(gmmu_map_unmap_no_iommu_sysmem, test_nvgpu_gmmu_map_unmap, UNIT_TEST(gmmu_map_unmap_no_iommu_sysmem, test_nvgpu_gmmu_map_unmap,
(void *) &test_no_iommu_sysmem, 0), (void *) &test_no_iommu_sysmem, 0),
#ifdef CONFIG_NVGPU_DGPU
UNIT_TEST(gmmu_map_unmap_vidmem, test_nvgpu_gmmu_map_unmap, UNIT_TEST(gmmu_map_unmap_vidmem, test_nvgpu_gmmu_map_unmap,
(void *) &test_no_iommu_vidmem, 0), (void *) &test_no_iommu_vidmem, 0),
#endif
UNIT_TEST(gmmu_map_unmap_iommu_sysmem_coh, test_nvgpu_gmmu_map_unmap, UNIT_TEST(gmmu_map_unmap_iommu_sysmem_coh, test_nvgpu_gmmu_map_unmap,
(void *) &test_iommu_sysmem_coh, 0), (void *) &test_iommu_sysmem_coh, 0),
UNIT_TEST(gmmu_set_pte, test_nvgpu_gmmu_set_pte, UNIT_TEST(gmmu_set_pte, test_nvgpu_gmmu_set_pte,

View File

@@ -58,6 +58,7 @@
static struct nvgpu_mem *test_mem; static struct nvgpu_mem *test_mem;
#ifdef CONFIG_NVGPU_DGPU
/* /*
* Pramin write callback (for all nvgpu_writel calls). * Pramin write callback (for all nvgpu_writel calls).
* No-op as callbacks/functions are already tested in pramin module. * No-op as callbacks/functions are already tested in pramin module.
@@ -197,6 +198,7 @@ static int test_nvgpu_mem_vidmem(struct unit_module *m,
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }
#endif
/* /*
* Test nvgpu_aperture_mask() * Test nvgpu_aperture_mask()
@@ -210,6 +212,7 @@ static int test_nvgpu_aperture_mask(struct unit_module *m,
u32 vidmem_mask = 4; u32 vidmem_mask = 4;
u32 ret_ap_mask; u32 ret_ap_mask;
#ifdef CONFIG_NVGPU_DGPU
/* Case: APERTURE_VIDMEM */ /* Case: APERTURE_VIDMEM */
test_mem->aperture = APERTURE_VIDMEM; test_mem->aperture = APERTURE_VIDMEM;
ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask, ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask,
@@ -217,6 +220,7 @@ static int test_nvgpu_aperture_mask(struct unit_module *m,
if (ret_ap_mask != vidmem_mask) { if (ret_ap_mask != vidmem_mask) {
unit_return_fail(m, "Vidmem mask returned incorrect\n"); unit_return_fail(m, "Vidmem mask returned incorrect\n");
} }
#endif
/* /*
* NVGPU_MM_HONORS_APERTURE enabled * NVGPU_MM_HONORS_APERTURE enabled
@@ -262,6 +266,7 @@ static int test_nvgpu_aperture_mask(struct unit_module *m,
*/ */
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, false); nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, false);
#ifdef CONFIG_NVGPU_DGPU
/* Case: APERTURE_SYSMEM */ /* Case: APERTURE_SYSMEM */
test_mem->aperture = APERTURE_SYSMEM; test_mem->aperture = APERTURE_SYSMEM;
ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask, ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask,
@@ -279,6 +284,7 @@ static int test_nvgpu_aperture_mask(struct unit_module *m,
unit_return_fail(m, "MM_HONORS disabled: " unit_return_fail(m, "MM_HONORS disabled: "
"Incorrect mask returned for sysmem_coh\n"); "Incorrect mask returned for sysmem_coh\n");
} }
#endif
/* Case: APERTURE_INVALID */ /* Case: APERTURE_INVALID */
test_mem->aperture = APERTURE_INVALID; test_mem->aperture = APERTURE_INVALID;
@@ -773,8 +779,9 @@ struct unit_module_test nvgpu_mem_tests[] = {
* Tests covering VIDMEM branches * Tests covering VIDMEM branches
*/ */
UNIT_TEST(nvgpu_aperture_mask, test_nvgpu_aperture_mask, NULL, 0), UNIT_TEST(nvgpu_aperture_mask, test_nvgpu_aperture_mask, NULL, 0),
#ifdef CONFIG_NVGPU_DGPU
UNIT_TEST(nvgpu_mem_vidmem, test_nvgpu_mem_vidmem, NULL, 0), UNIT_TEST(nvgpu_mem_vidmem, test_nvgpu_mem_vidmem, NULL, 0),
#endif
/* /*
* Free test should be executed at the end to free allocated memory. * Free test should be executed at the end to free allocated memory.
* As nvgpu_mem doesn't not have an explicit free function for sysmem, * As nvgpu_mem doesn't not have an explicit free function for sysmem,

View File

@@ -43,6 +43,8 @@
#include <os/linux/driver_common.h> #include <os/linux/driver_common.h>
#ifdef CONFIG_NVGPU_DGPU
static u32 *rand_test_data; static u32 *rand_test_data;
static u32 *vidmem; static u32 *vidmem;
@@ -497,13 +499,16 @@ static int test_pramin_nvgpu_dying(struct unit_module *m, struct gk20a *g,
nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, false); nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, false);
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }
#endif
struct unit_module_test pramin_tests[] = { struct unit_module_test pramin_tests[] = {
#ifdef CONFIG_NVGPU_DGPU
UNIT_TEST(nvgpu_pramin_rd_n_1_sgl, test_pramin_rd_n_single, NULL, 0), UNIT_TEST(nvgpu_pramin_rd_n_1_sgl, test_pramin_rd_n_single, NULL, 0),
UNIT_TEST(nvgpu_pramin_wr_n_3_sgl, test_pramin_wr_n_multi, NULL, 0), UNIT_TEST(nvgpu_pramin_wr_n_3_sgl, test_pramin_wr_n_multi, NULL, 0),
UNIT_TEST(nvgpu_pramin_memset, test_pramin_memset, NULL, 0), UNIT_TEST(nvgpu_pramin_memset, test_pramin_memset, NULL, 0),
UNIT_TEST(nvgpu_pramin_dying, test_pramin_nvgpu_dying, NULL, 0), UNIT_TEST(nvgpu_pramin_dying, test_pramin_nvgpu_dying, NULL, 0),
UNIT_TEST(nvgpu_pramin_free_test_env, free_test_env, NULL, 0), UNIT_TEST(nvgpu_pramin_free_test_env, free_test_env, NULL, 0),
#endif
}; };
UNIT_MODULE(pramin, pramin_tests, UNIT_PRIO_NVGPU_TEST); UNIT_MODULE(pramin, pramin_tests, UNIT_PRIO_NVGPU_TEST);