gpu: nvgpu: add mmu_fault_tu104.[ch]

Move
mmu fault related functions from fb_tu104.c
to hal/fb/mmu_fault_tu104.c

Rename
mmfault to mmufault
fb_tu104_write_mmu_fault_buffer_lo_hi -> tu104_fb_write_mmu_fault_buffer_lo_hi
fb_tu104_read_mmu_fault_buffer_get -> tu104_fb_read_mmu_fault_buffer_get
fb_tu104_write_mmu_fault_buffer_get -> tu104_fb_write_mmu_fault_buffer_get
fb_tu104_read_mmu_fault_buffer_put -> tu104_fb_read_mmu_fault_buffer_put
fb_tu104_read_mmu_fault_buffer_size -> tu104_fb_read_mmu_fault_buffer_size
fb_tu104_write_mmu_fault_buffer_size -> tu104_fb_write_mmu_fault_buffer_size
fb_tu104_read_mmu_fault_addr_lo_hi -> tu104_fb_read_mmu_fault_addr_lo_hi
fb_tu104_read_mmu_fault_inst_lo_hi -> tu104_fb_read_mmu_fault_inst_lo_hi
fb_tu104_read_mmu_fault_info -> tu104_fb_read_mmu_fault_info
fb_tu104_read_mmu_fault_status -> tu104_fb_read_mmu_fault_status
fb_tu104_write_mmu_fault_status -> tu104_fb_write_mmu_fault_status
fb_tu104_mmu_invalidate_replay -> tu104_fb_mmu_invalidate_replay

JIRA NVGPU-1313

Change-Id: I01a8d3dfb9d2c7a92987076b7beabea8f3e9f0a5
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2107773
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-04-17 16:07:40 -07:00
committed by mobile promotions
parent 1a85ecf1ed
commit dab5e445c9
8 changed files with 313 additions and 240 deletions

View File

@@ -251,6 +251,7 @@ nvgpu-y += \
hal/fb/fb_gv100.o \
hal/fb/fb_tu104.o \
hal/fb/fb_mmu_fault_gv11b.o \
hal/fb/fb_mmu_fault_tu104.o \
hal/fb/intr/fb_intr_gv100.o \
hal/fb/intr/fb_intr_gv11b.o \
hal/fb/intr/fb_intr_tu104.o \

View File

@@ -353,6 +353,7 @@ srcs += common/sim/sim.c \
hal/fb/fb_gv11b.c \
hal/fb/fb_tu104.c \
hal/fb/fb_mmu_fault_gv11b.c \
hal/fb/fb_mmu_fault_tu104.c \
hal/fb/intr/fb_intr_gv100.c \
hal/fb/intr/fb_intr_gv11b.c \
hal/fb/intr/fb_intr_tu104.c \

View File

@@ -0,0 +1,241 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/log.h>
#include <nvgpu/types.h>
#include <nvgpu/timers.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/io.h>
#include <nvgpu/utils.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/bug.h>
#include <nvgpu/mmu_fault.h>
#include "hal/mc/mc_tu104.h"
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/fb/fb_mmu_fault_tu104.h"
#include "hal/mm/gmmu/gmmu_mmu_fault_gv11b.h"
#include "hal/func/func_tu104.h"
#include "nvgpu/hw/tu104/hw_fb_tu104.h"
#include "nvgpu/hw/tu104/hw_func_tu104.h"
void tu104_fb_handle_mmu_fault(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_NONREPLAY_REG_INDX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_REPLAY_REG_INDX));
u32 fault_status = g->ops.fb.read_mmu_fault_status(g);
nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status);
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault));
gv11b_fb_handle_dropped_mmu_fault(g, fault_status);
gv11b_gmmu_handle_other_fault_notify(g, fault_status);
}
if (gv11b_fb_is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX)) {
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(
nonreplay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_notify_v(
nonreplay_fault));
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
fault_status,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX);
/*
* When all the faults are processed,
* GET and PUT will have same value and mmu fault status
* bit will be reset by HW
*/
}
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault));
gv11b_fb_handle_nonreplay_fault_overflow(g,
fault_status);
}
}
if (gv11b_fb_is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_REPLAY_REG_INDX)) {
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(replay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_notify_v(replay_fault));
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
fault_status,
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
}
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(replay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_error_v(replay_fault));
gv11b_fb_handle_replay_fault_overflow(g,
fault_status);
}
}
nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status");
g->ops.fb.write_mmu_fault_status(g,
fb_mmu_fault_status_valid_clear_f());
}
void tu104_fb_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
u32 addr_lo, u32 addr_hi)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_lo_r(index), addr_lo);
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_hi_r(index), addr_hi);
}
u32 tu104_fb_read_mmu_fault_buffer_get(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_get_r(index));
}
void tu104_fb_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
u32 reg_val)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_get_r(index),
reg_val);
}
u32 tu104_fb_read_mmu_fault_buffer_put(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_put_r(index));
}
u32 tu104_fb_read_mmu_fault_buffer_size(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_size_r(index));
}
void tu104_fb_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
u32 reg_val)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_size_r(index),
reg_val);
}
void tu104_fb_read_mmu_fault_addr_lo_hi(struct gk20a *g,
u32 *addr_lo, u32 *addr_hi)
{
*addr_lo = nvgpu_func_readl(g,
func_priv_mmu_fault_addr_lo_r());
*addr_hi = nvgpu_func_readl(g,
func_priv_mmu_fault_addr_hi_r());
}
void tu104_fb_read_mmu_fault_inst_lo_hi(struct gk20a *g,
u32 *inst_lo, u32 *inst_hi)
{
*inst_lo = nvgpu_func_readl(g,
func_priv_mmu_fault_inst_lo_r());
*inst_hi = nvgpu_func_readl(g,
func_priv_mmu_fault_inst_hi_r());
}
u32 tu104_fb_read_mmu_fault_info(struct gk20a *g)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_info_r());
}
u32 tu104_fb_read_mmu_fault_status(struct gk20a *g)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_status_r());
}
void tu104_fb_write_mmu_fault_status(struct gk20a *g, u32 reg_val)
{
nvgpu_func_writel(g, func_priv_mmu_fault_status_r(),
reg_val);
}
int tu104_fb_mmu_invalidate_replay(struct gk20a *g,
u32 invalidate_replay_val)
{
int err = -ETIMEDOUT;
u32 reg_val;
struct nvgpu_timeout timeout;
nvgpu_log_fn(g, " ");
/* retry 200 times */
err = nvgpu_timeout_init(g, &timeout, 200U, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_mutex_acquire(&g->mm.tlb_lock);
reg_val = nvgpu_func_readl(g, func_priv_mmu_invalidate_r());
reg_val |= fb_mmu_invalidate_all_va_true_f() |
fb_mmu_invalidate_all_pdb_true_f() |
invalidate_replay_val |
fb_mmu_invalidate_trigger_true_f();
nvgpu_func_writel(g, func_priv_mmu_invalidate_r(), reg_val);
do {
reg_val = nvgpu_func_readl(g,
func_priv_mmu_invalidate_r());
if (fb_mmu_invalidate_trigger_v(reg_val) !=
fb_mmu_invalidate_trigger_true_v()) {
err = 0;
break;
}
nvgpu_udelay(5);
} while (nvgpu_timeout_expired_msg(&timeout,
"invalidate replay failed on 0x%llx") == 0);
if (err != 0) {
nvgpu_err(g, "invalidate replay timedout");
}
nvgpu_mutex_release(&g->mm.tlb_lock);
return err;
}

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FB_MMU_FAULT_TU104_H
#define NVGPU_FB_MMU_FAULT_TU104_H
#include <nvgpu/types.h>
struct gk20a;
void tu104_fb_handle_mmu_fault(struct gk20a *g);
void tu104_fb_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
u32 addr_lo, u32 addr_hi);
u32 tu104_fb_read_mmu_fault_buffer_get(struct gk20a *g, u32 index);
void tu104_fb_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
u32 reg_val);
u32 tu104_fb_read_mmu_fault_buffer_put(struct gk20a *g, u32 index);
u32 tu104_fb_read_mmu_fault_buffer_size(struct gk20a *g, u32 index);
void tu104_fb_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
u32 reg_val);
void tu104_fb_read_mmu_fault_addr_lo_hi(struct gk20a *g,
u32 *addr_lo, u32 *addr_hi);
void tu104_fb_read_mmu_fault_inst_lo_hi(struct gk20a *g,
u32 *inst_lo, u32 *inst_hi);
u32 tu104_fb_read_mmu_fault_info(struct gk20a *g);
u32 tu104_fb_read_mmu_fault_status(struct gk20a *g);
void tu104_fb_write_mmu_fault_status(struct gk20a *g, u32 reg_val);
int tu104_fb_mmu_invalidate_replay(struct gk20a *g,
u32 invalidate_replay_val);
#endif /* NVGPU_FB_MMU_FAULT_TU104_H */

View File

@@ -32,8 +32,6 @@
#include <nvgpu/bug.h>
#include "hal/fb/fb_gv11b.h"
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/mm/gmmu/gmmu_mmu_fault_gv11b.h"
#include "hal/fb/fb_gv100.h"
#include "hal/mc/mc_tu104.h"
@@ -44,160 +42,6 @@
#include "nvgpu/hw/tu104/hw_fb_tu104.h"
#include "nvgpu/hw/tu104/hw_func_tu104.h"
void tu104_fb_handle_mmu_fault(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_NONREPLAY_REG_INDX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_REPLAY_REG_INDX));
u32 fault_status = g->ops.fb.read_mmu_fault_status(g);
nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status);
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault));
gv11b_fb_handle_dropped_mmu_fault(g, fault_status);
gv11b_gmmu_handle_other_fault_notify(g, fault_status);
}
if (gv11b_fb_is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX)) {
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault));
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
fault_status,
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX);
/*
* When all the faults are processed,
* GET and PUT will have same value and mmu fault status
* bit will be reset by HW
*/
}
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault));
gv11b_fb_handle_nonreplay_fault_overflow(g,
fault_status);
}
}
if (gv11b_fb_is_fault_buf_enabled(g,
NVGPU_MMU_FAULT_REPLAY_REG_INDX)) {
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(replay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_notify_v(replay_fault));
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
fault_status,
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
}
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(replay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_error_v(replay_fault));
gv11b_fb_handle_replay_fault_overflow(g,
fault_status);
}
}
nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status");
g->ops.fb.write_mmu_fault_status(g,
fb_mmu_fault_status_valid_clear_f());
}
void fb_tu104_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
u32 addr_lo, u32 addr_hi)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_lo_r(index), addr_lo);
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_hi_r(index), addr_hi);
}
u32 fb_tu104_read_mmu_fault_buffer_get(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_get_r(index));
}
void fb_tu104_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
u32 reg_val)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_get_r(index),
reg_val);
}
u32 fb_tu104_read_mmu_fault_buffer_put(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_put_r(index));
}
u32 fb_tu104_read_mmu_fault_buffer_size(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_size_r(index));
}
void fb_tu104_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
u32 reg_val)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_size_r(index),
reg_val);
}
void fb_tu104_read_mmu_fault_addr_lo_hi(struct gk20a *g,
u32 *addr_lo, u32 *addr_hi)
{
*addr_lo = nvgpu_func_readl(g,
func_priv_mmu_fault_addr_lo_r());
*addr_hi = nvgpu_func_readl(g,
func_priv_mmu_fault_addr_hi_r());
}
void fb_tu104_read_mmu_fault_inst_lo_hi(struct gk20a *g,
u32 *inst_lo, u32 *inst_hi)
{
*inst_lo = nvgpu_func_readl(g,
func_priv_mmu_fault_inst_lo_r());
*inst_hi = nvgpu_func_readl(g,
func_priv_mmu_fault_inst_hi_r());
}
u32 fb_tu104_read_mmu_fault_info(struct gk20a *g)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_info_r());
}
u32 fb_tu104_read_mmu_fault_status(struct gk20a *g)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_status_r());
}
void fb_tu104_write_mmu_fault_status(struct gk20a *g, u32 reg_val)
{
nvgpu_func_writel(g, func_priv_mmu_fault_status_r(),
reg_val);
}
int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
{
struct nvgpu_timeout timeout;
@@ -257,51 +101,6 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
return err;
}
int fb_tu104_mmu_invalidate_replay(struct gk20a *g,
u32 invalidate_replay_val)
{
int err = -ETIMEDOUT;
u32 reg_val;
struct nvgpu_timeout timeout;
nvgpu_log_fn(g, " ");
/* retry 200 times */
err = nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_mutex_acquire(&g->mm.tlb_lock);
reg_val = nvgpu_func_readl(g, func_priv_mmu_invalidate_r());
reg_val |= fb_mmu_invalidate_all_va_true_f() |
fb_mmu_invalidate_all_pdb_true_f() |
invalidate_replay_val |
fb_mmu_invalidate_trigger_true_f();
nvgpu_func_writel(g, func_priv_mmu_invalidate_r(), reg_val);
do {
reg_val = nvgpu_func_readl(g,
func_priv_mmu_invalidate_r());
if (fb_mmu_invalidate_trigger_v(reg_val) !=
fb_mmu_invalidate_trigger_true_v()) {
err = 0;
break;
}
nvgpu_udelay(5);
} while (nvgpu_timeout_expired_msg(&timeout,
"invalidate replay failed on 0x%llx") == 0);
if (err != 0) {
nvgpu_err(g, "invalidate replay timedout");
}
nvgpu_mutex_release(&g->mm.tlb_lock);
return err;
}
void tu104_fb_cbc_configure(struct gk20a *g, struct nvgpu_cbc *cbc)
{
u64 base_divisor;

View File

@@ -26,37 +26,13 @@
#include <nvgpu/types.h>
struct gk20a;
struct gr_gk20a;
struct nvgpu_mem;
struct nvgpu_cbc;
void tu104_fb_handle_mmu_fault(struct gk20a *g);
void fb_tu104_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
u32 addr_lo, u32 addr_hi);
u32 fb_tu104_read_mmu_fault_buffer_get(struct gk20a *g, u32 index);
void fb_tu104_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
u32 reg_val);
u32 fb_tu104_read_mmu_fault_buffer_put(struct gk20a *g, u32 index);
u32 fb_tu104_read_mmu_fault_buffer_size(struct gk20a *g, u32 index);
void fb_tu104_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
u32 reg_val);
void fb_tu104_read_mmu_fault_addr_lo_hi(struct gk20a *g,
u32 *addr_lo, u32 *addr_hi);
void fb_tu104_read_mmu_fault_inst_lo_hi(struct gk20a *g,
u32 *inst_lo, u32 *inst_hi);
u32 fb_tu104_read_mmu_fault_info(struct gk20a *g);
u32 fb_tu104_read_mmu_fault_status(struct gk20a *g);
void fb_tu104_write_mmu_fault_status(struct gk20a *g, u32 reg_val);
int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb);
int fb_tu104_mmu_invalidate_replay(struct gk20a *g,
u32 invalidate_replay_val);
int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb);
void tu104_fb_cbc_configure(struct gk20a *g, struct nvgpu_cbc *cbc);
int tu104_fb_apply_pdb_cache_war(struct gk20a *g);
int tu104_fb_apply_pdb_cache_war(struct gk20a *g);
size_t tu104_fb_get_vidmem_size(struct gk20a *g);
int tu104_fb_enable_nvlink(struct gk20a *g);
int tu104_fb_enable_nvlink(struct gk20a *g);
#endif /* NVGPU_FB_TU104_H */

View File

@@ -25,6 +25,7 @@
#include <nvgpu/gk20a.h>
#include "hal/fb/fb_tu104.h"
#include "hal/fb/fb_mmu_fault_tu104.h"
#include "hal/mc/mc_tu104.h"
#include "fb_intr_ecc_gv11b.h"

View File

@@ -58,6 +58,7 @@
#include "hal/fb/fb_gv100.h"
#include "hal/fb/fb_tu104.h"
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/fb/fb_mmu_fault_tu104.h"
#include "hal/fb/intr/fb_intr_tu104.h"
#include "hal/ptimer/ptimer_gk20a.h"
#include "hal/regops/regops_tu104.h"
@@ -872,23 +873,23 @@ static const struct gpu_ops tu104_ops = {
.init_fbpa = tu104_fbpa_init,
.handle_fbpa_intr = tu104_fbpa_handle_intr,
.write_mmu_fault_buffer_lo_hi =
fb_tu104_write_mmu_fault_buffer_lo_hi,
tu104_fb_write_mmu_fault_buffer_lo_hi,
.write_mmu_fault_buffer_get =
fb_tu104_write_mmu_fault_buffer_get,
tu104_fb_write_mmu_fault_buffer_get,
.write_mmu_fault_buffer_size =
fb_tu104_write_mmu_fault_buffer_size,
.write_mmu_fault_status = fb_tu104_write_mmu_fault_status,
tu104_fb_write_mmu_fault_buffer_size,
.write_mmu_fault_status = tu104_fb_write_mmu_fault_status,
.read_mmu_fault_buffer_get =
fb_tu104_read_mmu_fault_buffer_get,
tu104_fb_read_mmu_fault_buffer_get,
.read_mmu_fault_buffer_put =
fb_tu104_read_mmu_fault_buffer_put,
tu104_fb_read_mmu_fault_buffer_put,
.read_mmu_fault_buffer_size =
fb_tu104_read_mmu_fault_buffer_size,
.read_mmu_fault_addr_lo_hi = fb_tu104_read_mmu_fault_addr_lo_hi,
.read_mmu_fault_inst_lo_hi = fb_tu104_read_mmu_fault_inst_lo_hi,
.read_mmu_fault_info = fb_tu104_read_mmu_fault_info,
.read_mmu_fault_status = fb_tu104_read_mmu_fault_status,
.mmu_invalidate_replay = fb_tu104_mmu_invalidate_replay,
tu104_fb_read_mmu_fault_buffer_size,
.read_mmu_fault_addr_lo_hi = tu104_fb_read_mmu_fault_addr_lo_hi,
.read_mmu_fault_inst_lo_hi = tu104_fb_read_mmu_fault_inst_lo_hi,
.read_mmu_fault_info = tu104_fb_read_mmu_fault_info,
.read_mmu_fault_status = tu104_fb_read_mmu_fault_status,
.mmu_invalidate_replay = tu104_fb_mmu_invalidate_replay,
.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
.fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,