mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: Enable falcon debug flag for safety debug
Falcon safety debug flag was previously disabled for safety debug profile. This patch enables the flag support for safety debug. copy_from_dmem function is required to copy the debug info from dmem debug buffer whenever there's an error generated. Hence, moved copy_from_dmem function to fusa file from non-fusa and added ifdef condition to only enable when non-fusa or falcon debug flag is set. Also, some fixes for type conversion error in falcon_debug.c during compilation. Bug 3482988 Change-Id: Ic0ea32b3227b84d4ba0835e6e1aeb40f58ec7327 Signed-off-by: mpoojary <mpoojary@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2673900 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
2b2beb7fb6
commit
e7c082aa66
@@ -149,13 +149,14 @@ ifneq ($(profile),safety_release)
|
|||||||
CONFIG_NVGPU_TRACE := 1
|
CONFIG_NVGPU_TRACE := 1
|
||||||
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_TRACE
|
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_TRACE
|
||||||
|
|
||||||
|
CONFIG_NVGPU_FALCON_DEBUG := 1
|
||||||
|
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_FALCON_DEBUG
|
||||||
|
|
||||||
#
|
#
|
||||||
# Flags enabled only for regular build profile.
|
# Flags enabled only for regular build profile.
|
||||||
#
|
#
|
||||||
ifneq ($(profile),safety_debug)
|
ifneq ($(profile),safety_debug)
|
||||||
|
|
||||||
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_FALCON_DEBUG
|
|
||||||
|
|
||||||
CONFIG_NVGPU_SYSFS := 1
|
CONFIG_NVGPU_SYSFS := 1
|
||||||
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_SYSFS
|
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_SYSFS
|
||||||
|
|
||||||
|
|||||||
@@ -639,6 +639,33 @@ void nvgpu_falcon_dump_stats(struct nvgpu_falcon *flcn)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_NVGPU_FALCON_DEBUG) || defined(CONFIG_NVGPU_FALCON_NON_FUSA)
|
||||||
|
int nvgpu_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
||||||
|
u32 src, u8 *dst, u32 size, u8 port)
|
||||||
|
{
|
||||||
|
int status = -EINVAL;
|
||||||
|
struct gk20a *g;
|
||||||
|
|
||||||
|
if (!is_falcon_valid(flcn)) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
g = flcn->g;
|
||||||
|
|
||||||
|
if (falcon_memcpy_params_check(flcn, src, size, MEM_DMEM, port) != 0) {
|
||||||
|
nvgpu_err(g, "incorrect parameters");
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
nvgpu_mutex_acquire(&flcn->dmem_lock);
|
||||||
|
status = g->ops.falcon.copy_from_dmem(flcn, src, dst, size, port);
|
||||||
|
nvgpu_mutex_release(&flcn->dmem_lock);
|
||||||
|
|
||||||
|
exit:
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
||||||
int nvgpu_falcon_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector)
|
int nvgpu_falcon_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector)
|
||||||
{
|
{
|
||||||
@@ -681,31 +708,6 @@ int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
|
||||||
u32 src, u8 *dst, u32 size, u8 port)
|
|
||||||
{
|
|
||||||
int status = -EINVAL;
|
|
||||||
struct gk20a *g;
|
|
||||||
|
|
||||||
if (!is_falcon_valid(flcn)) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
g = flcn->g;
|
|
||||||
|
|
||||||
if (falcon_memcpy_params_check(flcn, src, size, MEM_DMEM, port) != 0) {
|
|
||||||
nvgpu_err(g, "incorrect parameters");
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_mutex_acquire(&flcn->dmem_lock);
|
|
||||||
status = g->ops.falcon.copy_from_dmem(flcn, src, dst, size, port);
|
|
||||||
nvgpu_mutex_release(&flcn->dmem_lock);
|
|
||||||
|
|
||||||
exit:
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
int nvgpu_falcon_copy_from_imem(struct nvgpu_falcon *flcn,
|
int nvgpu_falcon_copy_from_imem(struct nvgpu_falcon *flcn,
|
||||||
u32 src, u8 *dst, u32 size, u8 port)
|
u32 src, u8 *dst, u32 size, u8 port)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -31,9 +31,9 @@
|
|||||||
|
|
||||||
#define NV_NVRISCV_DEBUG_BUFFER_MAGIC 0xf007ba11
|
#define NV_NVRISCV_DEBUG_BUFFER_MAGIC 0xf007ba11
|
||||||
|
|
||||||
#define FLCN_DMEM_ACCESS_ALIGNMENT (4)
|
#define FLCN_DMEM_ACCESS_ALIGNMENT (4U)
|
||||||
|
|
||||||
#define NV_ALIGN_DOWN(v, g) ((v) & ~((g) - 1))
|
#define NV_ALIGN_DOWN(v, g) ((v) & ~((g) - 1U))
|
||||||
|
|
||||||
#define NV_IS_ALIGNED(addr, align) ((addr & (align - 1U)) == 0U)
|
#define NV_IS_ALIGNED(addr, align) ((addr & (align - 1U)) == 0U)
|
||||||
|
|
||||||
@@ -98,7 +98,7 @@ int nvgpu_falcon_dbg_buf_init(struct nvgpu_falcon *flcn,
|
|||||||
* at the end of the buffer.
|
* at the end of the buffer.
|
||||||
*/
|
*/
|
||||||
debug_buffer->dmem_offset = g->ops.falcon.get_mem_size(flcn, MEM_DMEM) -
|
debug_buffer->dmem_offset = g->ops.falcon.get_mem_size(flcn, MEM_DMEM) -
|
||||||
sizeof(struct nvgpu_falcon_dbg_buf_metadata);
|
(u32)(sizeof(struct nvgpu_falcon_dbg_buf_metadata));
|
||||||
|
|
||||||
/* The DMEM offset must be 4-byte aligned */
|
/* The DMEM offset must be 4-byte aligned */
|
||||||
if (!NV_IS_ALIGNED(debug_buffer->dmem_offset, FLCN_DMEM_ACCESS_ALIGNMENT)) {
|
if (!NV_IS_ALIGNED(debug_buffer->dmem_offset, FLCN_DMEM_ACCESS_ALIGNMENT)) {
|
||||||
@@ -320,9 +320,9 @@ int nvgpu_falcon_dbg_buf_display(struct nvgpu_falcon *flcn)
|
|||||||
|
|
||||||
if (is_line_split) {
|
if (is_line_split) {
|
||||||
/* Logic to concat the split line into a temp buffer */
|
/* Logic to concat the split line into a temp buffer */
|
||||||
u32 first_chunk_len =
|
u32 first_chunk_len = (u32)
|
||||||
strlen((char *)&buffer_data[debug_buffer->read_offset]);
|
strlen((char *)&buffer_data[debug_buffer->read_offset]);
|
||||||
u32 second_chunk_len = strlen((char *)&buffer_data[0]);
|
u32 second_chunk_len = (u32)strlen((char *)&buffer_data[0]);
|
||||||
|
|
||||||
buf_size = first_chunk_len + second_chunk_len + 1;
|
buf_size = first_chunk_len + second_chunk_len + 1;
|
||||||
tmp_buf = nvgpu_kzalloc(g, buf_size);
|
tmp_buf = nvgpu_kzalloc(g, buf_size);
|
||||||
@@ -345,7 +345,7 @@ int nvgpu_falcon_dbg_buf_display(struct nvgpu_falcon *flcn)
|
|||||||
/* Reset line-split flag */
|
/* Reset line-split flag */
|
||||||
is_line_split = false;
|
is_line_split = false;
|
||||||
} else {
|
} else {
|
||||||
buf_size =
|
buf_size = (u32)
|
||||||
strlen((char *)&buffer_data[debug_buffer->read_offset]) + 1;
|
strlen((char *)&buffer_data[debug_buffer->read_offset]) + 1;
|
||||||
|
|
||||||
/* Set the byte array that gets printed as a string */
|
/* Set the byte array that gets printed as a string */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -49,40 +49,6 @@ bool gk20a_falcon_clear_halt_interrupt_status(struct nvgpu_falcon *flcn)
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
|
||||||
u32 src, u8 *dst, u32 size, u8 port)
|
|
||||||
{
|
|
||||||
struct gk20a *g = flcn->g;
|
|
||||||
u32 base_addr = flcn->flcn_base;
|
|
||||||
u32 i, words, bytes;
|
|
||||||
u32 data, addr_mask;
|
|
||||||
u32 *dst_u32 = (u32 *)dst;
|
|
||||||
|
|
||||||
nvgpu_log_fn(g, " src dmem offset - %x, size - %x", src, size);
|
|
||||||
|
|
||||||
words = size >> 2U;
|
|
||||||
bytes = size & 0x3U;
|
|
||||||
|
|
||||||
addr_mask = falcon_falcon_dmemc_offs_m() |
|
|
||||||
g->ops.falcon.dmemc_blk_mask();
|
|
||||||
|
|
||||||
src &= addr_mask;
|
|
||||||
|
|
||||||
nvgpu_writel(g, base_addr + falcon_falcon_dmemc_r(port),
|
|
||||||
src | falcon_falcon_dmemc_aincr_f(1));
|
|
||||||
|
|
||||||
for (i = 0; i < words; i++) {
|
|
||||||
dst_u32[i] = nvgpu_readl(g,
|
|
||||||
base_addr + falcon_falcon_dmemd_r(port));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bytes > 0U) {
|
|
||||||
data = nvgpu_readl(g, base_addr + falcon_falcon_dmemd_r(port));
|
|
||||||
nvgpu_memcpy(&dst[words << 2U], (u8 *)&data, bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gk20a_falcon_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
|
int gk20a_falcon_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
|
||||||
u8 *dst, u32 size, u8 port)
|
u8 *dst, u32 size, u8 port)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -88,10 +88,13 @@ void gk20a_falcon_set_irq(struct nvgpu_falcon *flcn, bool enable,
|
|||||||
void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn);
|
void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
#if defined(CONFIG_NVGPU_FALCON_DEBUG) || defined(CONFIG_NVGPU_FALCON_NON_FUSA)
|
||||||
bool gk20a_falcon_clear_halt_interrupt_status(struct nvgpu_falcon *flcn);
|
|
||||||
int gk20a_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
int gk20a_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
||||||
u32 src, u8 *dst, u32 size, u8 port);
|
u32 src, u8 *dst, u32 size, u8 port);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
||||||
|
bool gk20a_falcon_clear_halt_interrupt_status(struct nvgpu_falcon *flcn);
|
||||||
int gk20a_falcon_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
|
int gk20a_falcon_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
|
||||||
u8 *dst, u32 size, u8 port);
|
u8 *dst, u32 size, u8 port);
|
||||||
void gk20a_falcon_get_ctls(struct nvgpu_falcon *flcn, u32 *sctl,
|
void gk20a_falcon_get_ctls(struct nvgpu_falcon *flcn, u32 *sctl,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -20,6 +20,7 @@
|
|||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
#include <nvgpu/gk20a.h>
|
#include <nvgpu/gk20a.h>
|
||||||
|
#include <nvgpu/io.h>
|
||||||
#include <nvgpu/falcon.h>
|
#include <nvgpu/falcon.h>
|
||||||
#include <nvgpu/string.h>
|
#include <nvgpu/string.h>
|
||||||
#include <nvgpu/static_analysis.h>
|
#include <nvgpu/static_analysis.h>
|
||||||
@@ -391,6 +392,43 @@ void gk20a_falcon_set_irq(struct nvgpu_falcon *flcn, bool enable,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_NVGPU_FALCON_DEBUG) || defined(CONFIG_NVGPU_FALCON_NON_FUSA)
|
||||||
|
int gk20a_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
||||||
|
u32 src, u8 *dst, u32 size, u8 port)
|
||||||
|
{
|
||||||
|
struct gk20a *g = flcn->g;
|
||||||
|
u32 base_addr = flcn->flcn_base;
|
||||||
|
u32 i, words, bytes;
|
||||||
|
u32 data, addr_mask;
|
||||||
|
u32 *dst_u32 = (u32 *)dst;
|
||||||
|
|
||||||
|
nvgpu_log_fn(g, " src dmem offset - %x, size - %x", src, size);
|
||||||
|
|
||||||
|
words = size >> 2U;
|
||||||
|
bytes = size & 0x3U;
|
||||||
|
|
||||||
|
addr_mask = falcon_falcon_dmemc_offs_m() |
|
||||||
|
g->ops.falcon.dmemc_blk_mask();
|
||||||
|
|
||||||
|
src &= addr_mask;
|
||||||
|
|
||||||
|
nvgpu_writel(g, base_addr + falcon_falcon_dmemc_r(port),
|
||||||
|
src | falcon_falcon_dmemc_aincr_f(1));
|
||||||
|
|
||||||
|
for (i = 0; i < words; i++) {
|
||||||
|
dst_u32[i] = nvgpu_readl(g,
|
||||||
|
base_addr + falcon_falcon_dmemd_r(port));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bytes > 0U) {
|
||||||
|
data = nvgpu_readl(g, base_addr + falcon_falcon_dmemd_r(port));
|
||||||
|
nvgpu_memcpy(&dst[words << 2U], (u8 *)&data, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_FALCON_DEBUG
|
#ifdef CONFIG_NVGPU_FALCON_DEBUG
|
||||||
static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn)
|
static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1611,9 +1611,11 @@ static const struct gops_falcon ga10b_ops_falcon = {
|
|||||||
#ifdef CONFIG_NVGPU_FALCON_DEBUG
|
#ifdef CONFIG_NVGPU_FALCON_DEBUG
|
||||||
.dump_falcon_stats = ga10b_falcon_dump_stats,
|
.dump_falcon_stats = ga10b_falcon_dump_stats,
|
||||||
#endif
|
#endif
|
||||||
|
#if defined(CONFIG_NVGPU_FALCON_DEBUG) || defined(CONFIG_NVGPU_FALCON_NON_FUSA)
|
||||||
|
.copy_from_dmem = gk20a_falcon_copy_from_dmem,
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
||||||
.clear_halt_interrupt_status = gk20a_falcon_clear_halt_interrupt_status,
|
.clear_halt_interrupt_status = gk20a_falcon_clear_halt_interrupt_status,
|
||||||
.copy_from_dmem = gk20a_falcon_copy_from_dmem,
|
|
||||||
.copy_from_imem = gk20a_falcon_copy_from_imem,
|
.copy_from_imem = gk20a_falcon_copy_from_imem,
|
||||||
.get_falcon_ctls = gk20a_falcon_get_ctls,
|
.get_falcon_ctls = gk20a_falcon_get_ctls,
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -722,6 +722,11 @@ int nvgpu_falcon_copy_to_emem(struct nvgpu_falcon *flcn,
|
|||||||
void nvgpu_falcon_dump_stats(struct nvgpu_falcon *flcn);
|
void nvgpu_falcon_dump_stats(struct nvgpu_falcon *flcn);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_NVGPU_FALCON_DEBUG) || defined(CONFIG_NVGPU_FALCON_NON_FUSA)
|
||||||
|
int nvgpu_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
||||||
|
u32 src, u8 *dst, u32 size, u8 port);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
||||||
/**
|
/**
|
||||||
* @brief Bootstrap the falcon.
|
* @brief Bootstrap the falcon.
|
||||||
@@ -744,8 +749,6 @@ int nvgpu_falcon_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector);
|
|||||||
|
|
||||||
int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
|
int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
|
||||||
unsigned int timeout);
|
unsigned int timeout);
|
||||||
int nvgpu_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
|
||||||
u32 src, u8 *dst, u32 size, u8 port);
|
|
||||||
int nvgpu_falcon_copy_from_imem(struct nvgpu_falcon *flcn,
|
int nvgpu_falcon_copy_from_imem(struct nvgpu_falcon *flcn,
|
||||||
u32 src, u8 *dst, u32 size, u8 port);
|
u32 src, u8 *dst, u32 size, u8 port);
|
||||||
void nvgpu_falcon_print_dmem(struct nvgpu_falcon *flcn, u32 src, u32 size);
|
void nvgpu_falcon_print_dmem(struct nvgpu_falcon *flcn, u32 src, u32 size);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -71,10 +71,12 @@ struct gops_falcon {
|
|||||||
#ifdef CONFIG_NVGPU_FALCON_DEBUG
|
#ifdef CONFIG_NVGPU_FALCON_DEBUG
|
||||||
void (*dump_falcon_stats)(struct nvgpu_falcon *flcn);
|
void (*dump_falcon_stats)(struct nvgpu_falcon *flcn);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
#if defined(CONFIG_NVGPU_FALCON_DEBUG) || defined(CONFIG_NVGPU_FALCON_NON_FUSA)
|
||||||
bool (*clear_halt_interrupt_status)(struct nvgpu_falcon *flcn);
|
|
||||||
int (*copy_from_dmem)(struct nvgpu_falcon *flcn,
|
int (*copy_from_dmem)(struct nvgpu_falcon *flcn,
|
||||||
u32 src, u8 *dst, u32 size, u8 port);
|
u32 src, u8 *dst, u32 size, u8 port);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
|
||||||
|
bool (*clear_halt_interrupt_status)(struct nvgpu_falcon *flcn);
|
||||||
int (*copy_from_imem)(struct nvgpu_falcon *flcn,
|
int (*copy_from_imem)(struct nvgpu_falcon *flcn,
|
||||||
u32 src, u8 *dst, u32 size, u8 port);
|
u32 src, u8 *dst, u32 size, u8 port);
|
||||||
void (*get_falcon_ctls)(struct nvgpu_falcon *flcn,
|
void (*get_falcon_ctls)(struct nvgpu_falcon *flcn,
|
||||||
|
|||||||
Reference in New Issue
Block a user