gpu: nvgpu: move trigger_mmu_fault to hal/fifo

trigger_mmu_fault function is moved to hal/fifo/mmu_fault_gm20b.c

JIRA NVGPU-1313

Change-Id: Ie31d53935d5b18e5788ffbac444ca90d0594258b
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2083090
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-03-27 11:45:19 -07:00
committed by mobile promotions
parent e3e8138404
commit 823ce5df82
9 changed files with 141 additions and 71 deletions

View File

@@ -244,6 +244,7 @@ nvgpu-y += \
hal/fifo/userd_gv11b.o \
hal/fifo/fifo_intr_gk20a.o \
hal/fifo/fifo_intr_gv11b.o \
hal/fifo/mmu_fault_gm20b.o \
hal/fifo/ctxsw_timeout_gk20a.o \
hal/fifo/ctxsw_timeout_gv11b.o \
hal/falcon/falcon_gk20a.o \

View File

@@ -376,6 +376,7 @@ srcs += common/sim.c \
hal/fifo/userd_gv11b.c \
hal/fifo/fifo_intr_gk20a.c \
hal/fifo/fifo_intr_gv11b.c \
hal/fifo/mmu_fault_gm20b.c \
hal/fifo/ctxsw_timeout_gk20a.c \
hal/fifo/ctxsw_timeout_gv11b.c \
hal/falcon/falcon_gk20a.c \

View File

@@ -42,72 +42,6 @@
#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
#include <nvgpu/hw/gm20b/hw_pbdma_gm20b.h>
static inline u32 gm20b_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
{
u32 fault_id = FIFO_INVAL_MMU_ID;
struct fifo_engine_info_gk20a *engine_info;
engine_info = nvgpu_engine_get_active_eng_info(g, engine_id);
if (engine_info != NULL) {
fault_id = engine_info->fault_id;
} else {
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
}
return fault_id;
}
void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
unsigned long engine_ids)
{
unsigned long delay = POLL_DELAY_MIN_US;
unsigned long engine_id;
int ret;
struct nvgpu_timeout timeout;
/* trigger faults for all bad engines */
for_each_set_bit(engine_id, &engine_ids, 32UL) {
if (!nvgpu_engine_check_valid_id(g, (u32)engine_id)) {
nvgpu_err(g, "faulting unknown engine %ld", engine_id);
} else {
u32 mmu_id = gm20b_engine_id_to_mmu_id(g,
engine_id);
if (mmu_id != FIFO_INVAL_MMU_ID) {
gk20a_writel(g, fifo_trigger_mmu_fault_r(mmu_id),
fifo_trigger_mmu_fault_enable_f(1U));
}
}
}
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "timeout init failed err=%d", ret);
}
/* Wait for MMU fault to trigger */
ret = -EBUSY;
do {
if ((gk20a_readl(g, fifo_intr_0_r()) &
fifo_intr_0_mmu_fault_pending_f()) != 0U) {
ret = 0;
break;
}
nvgpu_usleep_range(delay, delay * 2UL);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {
nvgpu_err(g, "mmu fault timeout");
}
/* release mmu fault trigger */
for_each_set_bit(engine_id, &engine_ids, 32UL) {
gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
}
}
void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch)
{
struct gk20a *g = ch->g;

View File

@@ -1,7 +1,7 @@
/*
* GM20B Fifo
*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,8 +29,6 @@ struct gk20a;
struct fifo_gk20a;
struct mmu_fault_info;
void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
unsigned long engine_ids);
void gm20b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f);
void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch);
void gm20b_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault);

View File

@@ -64,6 +64,7 @@
#include "hal/fifo/userd_gk20a.h"
#include "hal/fifo/fifo_intr_gk20a.h"
#include "hal/fifo/ctxsw_timeout_gk20a.h"
#include "hal/fifo/mmu_fault_gm20b.h"
#include "hal/gr/zbc/zbc_gm20b.h"
#include "hal/gr/zcull/zcull_gm20b.h"
#include "hal/gr/falcon/gr_falcon_gm20b.h"

View File

@@ -75,6 +75,7 @@
#include "hal/fifo/tsg_gk20a.h"
#include "hal/fifo/userd_gk20a.h"
#include "hal/fifo/fifo_intr_gk20a.h"
#include "hal/fifo/mmu_fault_gm20b.h"
#include "hal/fifo/ctxsw_timeout_gk20a.h"
#include "hal/gr/fecs_trace/fecs_trace_gm20b.h"
#include "hal/gr/fecs_trace/fecs_trace_gp10b.h"

View File

@@ -0,0 +1,102 @@
/*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/timers.h>
#include <nvgpu/log.h>
#include <nvgpu/io.h>
#include <nvgpu/fifo.h>
#include <nvgpu/engines.h>
#include <hal/fifo/mmu_fault_gm20b.h>
#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
static inline u32 gm20b_engine_id_to_fault_id(struct gk20a *g,
u32 engine_id)
{
u32 fault_id = INVAL_ID;
struct fifo_engine_info_gk20a *engine_info;
engine_info = nvgpu_engine_get_active_eng_info(g, engine_id);
if (engine_info != NULL) {
fault_id = engine_info->fault_id;
} else {
nvgpu_err(g, "engine_id is not in active list/invalid %d",
engine_id);
}
return fault_id;
}
void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
unsigned long engine_ids_bitmask)
{
unsigned long poll_delay = POLL_DELAY_MIN_US;
unsigned long engine_id;
int ret;
struct nvgpu_timeout timeout;
u32 fault_id;
/* set trigger mmu fault */
for_each_set_bit(engine_id, &engine_ids_bitmask, 32UL) {
if (!nvgpu_engine_check_valid_id(g, (u32)engine_id)) {
nvgpu_err(g, "faulting unknown engine %ld", engine_id);
continue;
}
fault_id = gm20b_engine_id_to_fault_id(g, (u32)engine_id);
if (fault_id == INVAL_ID) {
continue;
}
nvgpu_writel(g, fifo_trigger_mmu_fault_r(fault_id),
fifo_trigger_mmu_fault_enable_f(1U));
}
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "timeout init failed err=%d", ret);
}
/* Wait for MMU fault to trigger */
ret = -EBUSY;
do {
if ((nvgpu_readl(g, fifo_intr_0_r()) &
fifo_intr_0_mmu_fault_pending_f()) != 0U) {
ret = 0;
break;
}
nvgpu_usleep_range(poll_delay, poll_delay * 2UL);
poll_delay = min_t(u32, poll_delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {
nvgpu_err(g, "timeout: failed to trigger mmu fault");
}
/* release trigger mmu fault */
for_each_set_bit(engine_id, &engine_ids_bitmask, 32UL) {
nvgpu_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
}
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FIFO_MMU_FAULT_GM20B_H
#define NVGPU_FIFO_MMU_FAULT_GM20B_H
struct gk20a;
void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
unsigned long engine_ids_bitmask);
#endif /* NVGPU_FIFO_MMU_FAULT_GM20B_H */

View File

@@ -934,8 +934,6 @@ struct gpu_ops {
int (*tsg_verify_channel_status)(struct channel_gk20a *ch);
void (*tsg_verify_status_ctx_reload)(struct channel_gk20a *ch);
void (*tsg_verify_status_faulted)(struct channel_gk20a *ch);
void (*trigger_mmu_fault)(struct gk20a *g,
unsigned long engine_ids);
void (*get_mmu_fault_info)(struct gk20a *g, u32 mmu_fault_id,
struct mmu_fault_info *mmfault);
void (*get_mmu_fault_desc)(struct mmu_fault_info *mmfault);
@@ -1002,6 +1000,9 @@ struct gpu_ops {
bool (*handle_sched_error)(struct gk20a *g);
void (*ctxsw_timeout_enable)(struct gk20a *g, bool enable);
bool (*handle_ctxsw_timeout)(struct gk20a *g);
/* mmu fault hals */
void (*trigger_mmu_fault)(struct gk20a *g,
unsigned long engine_ids_bitmask);
} fifo;
struct {