gpu: nvgpu: Move gv11b MMU fault handling to HAL

Move the gv11b MMU fault handling code into a new mm.mmu_fault HAL.
Also move the existing gmmu_mmu_fault HAL code into this HAL as they
are basically the same logical entity.

JIRA NVGPU-2042
JIRA NVGPU-1313

Change-Id: I41d3e180c762f191d4de3237e9052bdc456f9e4c
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2109693
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2019-05-01 13:26:30 -07:00
committed by mobile promotions
parent cfb4ff0bfb
commit c053bc0226
17 changed files with 167 additions and 232 deletions

View File

@@ -155,11 +155,11 @@ nvgpu-y += \
hal/mm/mm_gv11b.o \
hal/mm/cache/flush_gk20a.o \
hal/mm/cache/flush_gv11b.o \
hal/mm/gmmu/gmmu_mmu_fault_gv11b.o \
hal/mm/gmmu/gmmu_gk20a.o \
hal/mm/gmmu/gmmu_gm20b.o \
hal/mm/gmmu/gmmu_gp10b.o \
hal/mm/gmmu/gmmu_gv11b.o \
hal/mm/mmu_fault/mmu_fault_gv11b.o \
hal/mc/mc_gm20b.o \
hal/mc/mc_gp10b.o \
hal/mc/mc_gv11b.o \
@@ -601,6 +601,5 @@ nvgpu-$(CONFIG_GK20A_CYCLE_STATS) += \
common/cyclestats/cyclestats.o
nvgpu-y += \
gv11b/mmu_fault_gv11b.o \
gv100/mm_gv100.o \
tu104/mm_tu104.o \

View File

@@ -243,7 +243,6 @@ srcs += common/sim/sim.c \
common/nvlink/nvlink_tu104.c \
common/nvlink/nvlink.c \
common/fence/fence.c \
gv11b/mmu_fault_gv11b.c \
gv100/mm_gv100.c \
tu104/mm_tu104.c \
hal/mm/mm_gk20a.c \
@@ -252,7 +251,7 @@ srcs += common/sim/sim.c \
hal/mm/mm_gv11b.c \
hal/mm/cache/flush_gk20a.c \
hal/mm/cache/flush_gv11b.c \
hal/mm/gmmu/gmmu_mmu_fault_gv11b.c \
hal/mm/mmu_fault/mmu_fault_gv11b.c \
hal/mm/gmmu/gmmu_gk20a.c \
hal/mm/gmmu/gmmu_gm20b.c \
hal/mm/gmmu/gmmu_gp10b.c \

View File

@@ -135,8 +135,8 @@ int nvgpu_mm_suspend(struct gk20a *g)
g->ops.fb.intr.disable(g);
}
if (g->ops.mm.mmu_fault_disable_hw != NULL) {
g->ops.mm.mmu_fault_disable_hw(g);
if (g->ops.mm.mmu_fault.disable_hw != NULL) {
g->ops.mm.mmu_fault.disable_hw(g);
}
nvgpu_log_info(g, "MM suspend done!");
@@ -210,8 +210,8 @@ static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
nvgpu_dma_free(g, &mm->mmu_wr_mem);
nvgpu_dma_free(g, &mm->mmu_rd_mem);
if (g->ops.mm.fault_info_mem_destroy != NULL) {
g->ops.mm.fault_info_mem_destroy(g);
if (g->ops.mm.mmu_fault.info_mem_destroy != NULL) {
g->ops.mm.mmu_fault.info_mem_destroy(g);
}
if (g->ops.mm.remove_bar2_vm != NULL) {

View File

@@ -28,6 +28,7 @@
#include "hal/mm/gmmu/gmmu_gk20a.h"
#include "hal/mm/gmmu/gmmu_gm20b.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/regops/regops_gv11b.h"
#include "hal/class/class_gv11b.h"
#include "hal/fifo/fifo_gv11b.h"
@@ -670,10 +671,12 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.init_inst_block = gv11b_mm_init_inst_block,
.init_bar2_vm = gp10b_mm_init_bar2_vm,
.remove_bar2_vm = gp10b_mm_remove_bar2_vm,
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
.bar1_map_userd = vgpu_mm_bar1_map_userd,
.vm_as_alloc_share = vgpu_vm_as_alloc_share,
.vm_as_free_share = vgpu_vm_as_free_share,
.mmu_fault = {
.info_mem_destroy = gv11b_mm_mmu_fault_info_mem_destroy,
},
.cache = {
.fb_flush = vgpu_mm_fb_flush,
.l2_invalidate = vgpu_mm_l2_invalidate,

View File

@@ -1,35 +0,0 @@
/*
* GV11B MM
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MM_GV11B_H
#define MM_GV11B_H
struct gk20a;
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g);
void gv11b_mm_mmu_fault_disable_hw(struct gk20a *g);
void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g);
int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g);
#endif

View File

@@ -1,156 +0,0 @@
/*
* GV11B MMU
*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/kmem.h>
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include <nvgpu/mm.h>
#include <nvgpu/enabled.h>
#include <nvgpu/gk20a.h>
#include "mm_gv11b.h"
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
void gv11b_mm_mmu_fault_disable_hw(struct gk20a *g)
{
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
if ((g->ops.fb.is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX))) {
g->ops.fb.fault_buf_set_state_hw(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX,
NVGPU_MMU_FAULT_BUF_DISABLED);
}
if ((g->ops.fb.is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_REPLAY_REG_INDX))) {
g->ops.fb.fault_buf_set_state_hw(g,
NVGPU_MMU_FAULT_REPLAY_REG_INDX,
NVGPU_MMU_FAULT_BUF_DISABLED);
}
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
}
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX]);
}
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX]);
}
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
}
static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g)
{
return 0;
}
static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
int err = 0;
size_t fb_size;
/* Max entries take care of 1 entry used for full detection */
fb_size = ((size_t)g->ops.channel.count(g) + (size_t)1) *
(size_t)gmmu_fault_buf_size_v();
if (!nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX]);
if (err != 0) {
nvgpu_err(g,
"Error in hw mmu fault buf [0] alloc in bar2 vm ");
/* Fault will be snapped in pri reg but not in buffer */
return;
}
}
if (!nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX]);
if (err != 0) {
nvgpu_err(g,
"Error in hw mmu fault buf [1] alloc in bar2 vm ");
/* Fault will be snapped in pri reg but not in buffer */
return;
}
}
}
void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
{
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
g->ops.fb.fault_buf_configure_hw(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX);
}
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
g->ops.fb.fault_buf_configure_hw(g,
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
}
}
int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
{
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&g->mm.hub_isr_mutex);
if (err != 0) {
nvgpu_err(g, "Error in hub_isr_mutex initialization");
return err;
}
err = gv11b_mm_mmu_fault_info_buf_init(g);
if (err == 0) {
gv11b_mm_mmu_hw_fault_buf_init(g);
}
return err;
}

View File

@@ -41,7 +41,7 @@
#include <nvgpu/rc.h>
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/mm/gmmu/gmmu_mmu_fault_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "fb_gm20b.h"
#include "fb_gp10b.h"

View File

@@ -33,7 +33,7 @@
#include "hal/mc/mc_tu104.h"
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/fb/fb_mmu_fault_tu104.h"
#include "hal/mm/gmmu/gmmu_mmu_fault_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/func/func_tu104.h"
#include "nvgpu/hw/tu104/hw_fb_tu104.h"

View File

@@ -29,6 +29,7 @@
#include "hal/mm/gmmu/gmmu_gm20b.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/gmmu/gmmu_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/mc/mc_gm20b.h"
#include "hal/mc/mc_gp10b.h"
#include "hal/mc/mc_gv11b.h"
@@ -153,8 +154,6 @@
#include "common/vbios/bios_sw_gp106.h"
#include "common/vbios/bios_sw_gv100.h"
#include "gv11b/mm_gv11b.h"
#include "hal_gv100.h"
#include "hal_gv100_litter.h"
@@ -978,13 +977,13 @@ static const struct gpu_ops gv100_ops = {
.init_inst_block = gv11b_mm_init_inst_block,
.init_bar2_vm = gp10b_mm_init_bar2_vm,
.remove_bar2_vm = gp10b_mm_remove_bar2_vm,
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw,
.get_flush_retries = gv100_mm_get_flush_retries,
.bar1_map_userd = NULL,
.mmu_fault = {
.setup_sw = gv11b_mm_mmu_fault_setup_sw,
.setup_hw = gv11b_mm_mmu_fault_setup_hw,
.info_mem_destroy = gv11b_mm_mmu_fault_info_mem_destroy,
.disable_hw = gv11b_mm_mmu_fault_disable_hw,
},
.cache = {
.fb_flush = gk20a_mm_fb_flush,

View File

@@ -37,6 +37,7 @@
#include "hal/mm/gmmu/gmmu_gm20b.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/gmmu/gmmu_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/mc/mc_gm20b.h"
#include "hal/mc/mc_gp10b.h"
#include "hal/mc/mc_gv11b.h"
@@ -145,7 +146,6 @@
#include "hal_gv11b.h"
#include "hal_gv11b_litter.h"
#include "gv11b/mm_gv11b.h"
#include <nvgpu/ptimer.h>
#include <nvgpu/error_notifier.h>
@@ -949,12 +949,12 @@ static const struct gpu_ops gv11b_ops = {
.init_inst_block = gv11b_mm_init_inst_block,
.init_bar2_vm = gp10b_mm_init_bar2_vm,
.remove_bar2_vm = gp10b_mm_remove_bar2_vm,
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw,
.bar1_map_userd = NULL,
.mmu_fault = {
.setup_sw = gv11b_mm_mmu_fault_setup_sw,
.setup_hw = gv11b_mm_mmu_fault_setup_hw,
.info_mem_destroy = gv11b_mm_mmu_fault_info_mem_destroy,
.disable_hw = gv11b_mm_mmu_fault_disable_hw,
},
.cache = {
.fb_flush = gk20a_mm_fb_flush,

View File

@@ -30,6 +30,7 @@
#include "hal/mm/gmmu/gmmu_gm20b.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/gmmu/gmmu_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/mc/mc_gm20b.h"
#include "hal/mc/mc_gp10b.h"
#include "hal/mc/mc_gv11b.h"
@@ -175,8 +176,6 @@
#include "common/vbios/bios_sw_gp106.h"
#include "common/vbios/bios_sw_tu104.h"
#include "gv11b/mm_gv11b.h"
#include "tu104/mm_tu104.h"
#include "hal/fbpa/fbpa_tu104.h"
#include "hal_tu104.h"
@@ -1018,13 +1017,13 @@ static const struct gpu_ops tu104_ops = {
.init_inst_block = gv11b_mm_init_inst_block,
.init_bar2_vm = gp10b_mm_init_bar2_vm,
.remove_bar2_vm = gp10b_mm_remove_bar2_vm,
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw,
.get_flush_retries = tu104_mm_get_flush_retries,
.bar1_map_userd = NULL,
.mmu_fault = {
.setup_sw = gv11b_mm_mmu_fault_setup_sw,
.setup_hw = gv11b_mm_mmu_fault_setup_hw,
.info_mem_destroy = gv11b_mm_mmu_fault_info_mem_destroy,
.disable_hw = gv11b_mm_mmu_fault_disable_hw,
},
.cache = {
.fb_flush = gk20a_mm_fb_flush,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -41,11 +41,11 @@
#include <nvgpu/rc.h>
#include <nvgpu/mmu_fault.h>
#include <hal/fb/fb_mmu_fault_gv11b.h>
#include <hal/mm/gmmu/gmmu_mmu_fault_gv11b.h>
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
static int gv11b_fb_fix_page_fault(struct gk20a *g,
struct mmu_fault_info *mmufault);
@@ -594,3 +594,123 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g,
pte[1], pte[0]);
return err;
}
void gv11b_mm_mmu_fault_disable_hw(struct gk20a *g)
{
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
if ((g->ops.fb.is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX))) {
g->ops.fb.fault_buf_set_state_hw(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX,
NVGPU_MMU_FAULT_BUF_DISABLED);
}
if ((g->ops.fb.is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_REPLAY_REG_INDX))) {
g->ops.fb.fault_buf_set_state_hw(g,
NVGPU_MMU_FAULT_REPLAY_REG_INDX,
NVGPU_MMU_FAULT_BUF_DISABLED);
}
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
}
void gv11b_mm_mmu_fault_info_mem_destroy(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX]);
}
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX]);
}
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
}
static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g)
{
return 0;
}
static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
int err = 0;
size_t fb_size;
/* Max entries take care of 1 entry used for full detection */
fb_size = ((size_t)g->ops.channel.count(g) + (size_t)1) *
(size_t)gmmu_fault_buf_size_v();
if (!nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX]);
if (err != 0) {
nvgpu_err(g,
"Error in hw mmu fault buf [0] alloc in bar2 vm ");
/* Fault will be snapped in pri reg but not in buffer */
return;
}
}
if (!nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX]);
if (err != 0) {
nvgpu_err(g,
"Error in hw mmu fault buf [1] alloc in bar2 vm ");
/* Fault will be snapped in pri reg but not in buffer */
return;
}
}
}
void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
{
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
g->ops.fb.fault_buf_configure_hw(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX);
}
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
g->ops.fb.fault_buf_configure_hw(g,
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
}
}
int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
{
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&g->mm.hub_isr_mutex);
if (err != 0) {
nvgpu_err(g, "Error in hub_isr_mutex initialization");
return err;
}
err = gv11b_mm_mmu_fault_info_buf_init(g);
if (err == 0) {
gv11b_mm_mmu_hw_fault_buf_init(g);
}
return err;
}

View File

@@ -1,4 +1,5 @@
/*
* GV11B MM
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -20,12 +21,18 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_MM_GMMU_MMU_FAULT_GV11B_H
#define NVGPU_MM_GMMU_MMU_FAULT_GV11B_H
#ifndef HAL_MM_MMU_FAULT_MMU_FAULT_GV11B_H
#define HAL_MM_MMU_FAULT_MMU_FAULT_GV11B_H
struct gk20a;
struct mmu_fault_info;
void gv11b_mm_mmu_fault_info_mem_destroy(struct gk20a *g);
void gv11b_mm_mmu_fault_disable_hw(struct gk20a *g);
void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g);
int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g);
void gv11b_gmmu_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
u32 fault_status, u32 index);
void gv11b_gmmu_handle_mmu_fault_common(struct gk20a *g,
@@ -34,4 +41,4 @@ void gv11b_gmmu_handle_other_fault_notify(struct gk20a *g, u32 fault_status);
void gv11b_gmmu_parse_mmu_fault_info(struct mmu_fault_info *mmufault);
#endif /* NVGPU_MM_GMMU_MMU_FAULT_GV11B_H */
#endif

View File

@@ -1269,8 +1269,6 @@ struct gpu_ops {
void (*remove_bar2_vm)(struct gk20a *g);
void (*init_inst_block)(struct nvgpu_mem *inst_block,
struct vm_gk20a *vm, u32 big_page_size);
void (*fault_info_mem_destroy)(struct gk20a *g);
void (*mmu_fault_disable_hw)(struct gk20a *g);
u32 (*get_flush_retries)(struct gk20a *g,
enum nvgpu_flush_op op);
u64 (*bar1_map_userd)(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
@@ -1279,6 +1277,8 @@ struct gpu_ops {
struct {
int (*setup_sw)(struct gk20a *g);
void (*setup_hw)(struct gk20a *g);
void (*info_mem_destroy)(struct gk20a *g);
void (*disable_hw)(struct gk20a *g);
} mmu_fault;
struct {
int (*fb_flush)(struct gk20a *g);

View File

@@ -41,11 +41,11 @@ gv11b_fb_init_hw
gv11b_fb_is_fault_buf_enabled
gv11b_fb_intr_is_mmu_fault_pending
gv11b_gpu_phys_addr
gv11b_mm_fault_info_mem_destroy
gv11b_mm_is_bar1_supported
gv11b_mm_init_inst_block
gv11b_mm_l2_flush
gv11b_mm_mmu_fault_disable_hw
gv11b_mm_mmu_fault_info_mem_destroy
gv11b_mc_is_mmu_fault_pending
nvgpu_addr_is_vidmem_page_alloc
nvgpu_alloc

View File

@@ -35,8 +35,7 @@
#include <os/posix/os_posix.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include <gv11b/mm_gv11b.h>
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include <hal/mm/mm_gv11b.h>
#include <hal/mm/cache/flush_gk20a.h>
#include <hal/mm/cache/flush_gv11b.h>

View File

@@ -35,9 +35,9 @@
#include <nvgpu/nvgpu_sgt.h>
#include <nvgpu/fifo.h>
#include "os/posix/os_posix.h"
#include "gv11b/mm_gv11b.h"
#include "common/fifo/channel_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/cache/flush_gv11b.h"
@@ -133,8 +133,9 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
/* New HALs for fault testing */
g->ops.mc.is_mmu_fault_pending = gv11b_mc_is_mmu_fault_pending;
g->ops.mm.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy;
g->ops.mm.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw;
g->ops.mm.mmu_fault.info_mem_destroy =
gv11b_mm_mmu_fault_info_mem_destroy;
g->ops.mm.mmu_fault.disable_hw = gv11b_mm_mmu_fault_disable_hw;
g->ops.mm.setup_hw = nvgpu_mm_setup_hw;
g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
@@ -293,7 +294,7 @@ static int test_page_faults_pending(struct unit_module *m, struct gk20a *g,
static int test_page_faults_disable_hw(struct unit_module *m, struct gk20a *g,
void *args)
{
g->ops.mm.mmu_fault_disable_hw(g);
g->ops.mm.mmu_fault.disable_hw(g);
if (g->ops.fb.is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX)) {
unit_return_fail(m, "Non-replay buf still enabled\n");
@@ -304,7 +305,7 @@ static int test_page_faults_disable_hw(struct unit_module *m, struct gk20a *g,
}
/* Call disable again to test some branches */
g->ops.mm.mmu_fault_disable_hw(g);
g->ops.mm.mmu_fault.disable_hw(g);
return UNIT_SUCCESS;
}
@@ -351,12 +352,12 @@ static int test_page_faults_clean(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
g->ops.mm.fault_info_mem_destroy(g);
g->ops.mm.mmu_fault.info_mem_destroy(g);
nvgpu_vm_put(g->mm.pmu.vm);
nvgpu_vm_put(g->mm.bar2.vm);
/* Call again to test some branches */
g->ops.mm.fault_info_mem_destroy(g);
g->ops.mm.mmu_fault.info_mem_destroy(g);
return UNIT_SUCCESS;
}