gpu: nvgpu: Add a new device manager unit

This adds a new device management unit in the common code responsible
for facilitating the parsing of the GPU top device list and providing
that info to other units in nvgpu.

The basic idea is to read this list once from HW and store it in a
set of lists corresponding to each device type (graphics, LCE, etc).
Many of the HALs in top can be deleted and instead implemented using
common code parsing the SW representation.

Every time the driver queries the device list it does so using a
device type and instance ID. This is common code. The HAL is responsible
for populating the device list in such a way that the driver can
query it in a chip agnostic manner.

Also delete some of the unit tests for functions that no longer
exist. This code will require new unit tests in time; those should be
quite simple to write once unit testing is needed.

JIRA NVGPU-5421

Change-Id: Ie41cd255404b90ae0376098a2d6e9f9abdd3f5ea
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2319649
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2020-03-27 14:36:24 -06:00
parent f6298157bc
commit 319520ff57
33 changed files with 631 additions and 898 deletions

View File

@@ -162,6 +162,12 @@ pramin:
include/nvgpu/pramin.h ] include/nvgpu/pramin.h ]
deps: deps:
device:
safe: yes
owner: Alex W
sources: [ common/device.c,
include/nvgpu/device.h ]
ptimer: ptimer:
safe: yes safe: yes
owner: Terje B owner: Terje B

View File

@@ -778,11 +778,9 @@ func:
top_fusa: top_fusa:
safe: yes safe: yes
owner: Tejal K owner: Tejal K
sources: [ include/nvgpu/device.h, sources: [ include/nvgpu/gops_top.h,
include/nvgpu/gops_top.h,
hal/top/top_gm20b_fusa.c, hal/top/top_gm20b_fusa.c,
hal/top/top_gm20b.h, hal/top/top_gm20b.h,
hal/top/top_gp10b_fusa.c,
hal/top/top_gp10b.h, hal/top/top_gp10b.h,
hal/top/top_gv11b_fusa.c, hal/top/top_gv11b_fusa.c,
hal/top/top_gv11b.h ] hal/top/top_gv11b.h ]

View File

@@ -177,6 +177,7 @@ nvgpu-$(CONFIG_GK20A_PCI) += \
os/linux/pci_power.o os/linux/pci_power.o
nvgpu-y += \ nvgpu-y += \
common/device.o \
common/utils/enabled.o \ common/utils/enabled.o \
common/utils/rbtree.o \ common/utils/rbtree.o \
common/utils/string.o \ common/utils/string.o \
@@ -678,7 +679,6 @@ nvgpu-y += \
hal/ptimer/ptimer_gp10b.o \ hal/ptimer/ptimer_gp10b.o \
hal/therm/therm_gv11b_fusa.o \ hal/therm/therm_gv11b_fusa.o \
hal/top/top_gm20b_fusa.o \ hal/top/top_gm20b_fusa.o \
hal/top/top_gp10b_fusa.o \
hal/top/top_gv11b_fusa.o hal/top/top_gv11b_fusa.o
nvgpu-$(CONFIG_NVGPU_HAL_NON_FUSA) += \ nvgpu-$(CONFIG_NVGPU_HAL_NON_FUSA) += \

View File

@@ -90,7 +90,8 @@ srcs += os/posix/bug.c \
os/posix/file_ops.c \ os/posix/file_ops.c \
os/posix/queue.c os/posix/queue.c
srcs += common/utils/enabled.c \ srcs += common/device.c \
common/utils/enabled.c \
common/utils/rbtree.c \ common/utils/rbtree.c \
common/utils/string.c \ common/utils/string.c \
common/utils/worker.c \ common/utils/worker.c \
@@ -245,7 +246,6 @@ srcs += hal/mm/mm_gv11b_fusa.c \
hal/sync/syncpt_cmdbuf_gv11b_fusa.c \ hal/sync/syncpt_cmdbuf_gv11b_fusa.c \
hal/therm/therm_gv11b_fusa.c \ hal/therm/therm_gv11b_fusa.c \
hal/top/top_gm20b_fusa.c \ hal/top/top_gm20b_fusa.c \
hal/top/top_gp10b_fusa.c \
hal/top/top_gv11b_fusa.c hal/top/top_gv11b_fusa.c
# Source files below are not guaranteed to be functionaly safe (FuSa) and are # Source files below are not guaranteed to be functionaly safe (FuSa) and are

View File

@@ -0,0 +1,231 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/device.h>
#include <nvgpu/list.h>
#include <nvgpu/kmem.h>
#include <nvgpu/string.h>
#include <nvgpu/log.h>
static inline struct nvgpu_device *
nvgpu_device_from_dev_list_node(struct nvgpu_list_node *node)
{
return (struct nvgpu_device *)
((uintptr_t)node - offsetof(struct nvgpu_device,
dev_list_node));
};
struct nvgpu_device_list {
/**
* Array of lists of devices; each list corresponds to one type of
* device. By having this as an array it's trivial to go from device
* enum type in the HW to the relevant devlist.
*/
struct nvgpu_list_node devlist_heads[NVGPU_MAX_DEVTYPE];
/**
* Keep track of how many devices of each type exist.
*/
u32 dev_counts[NVGPU_MAX_DEVTYPE];
};
/*
* Faciliate the parsing of the TOP array describing the devices present in the
* GPU.
*/
static int nvgpu_device_parse_hw_table(struct gk20a *g)
{
int ret = 0;
u32 token = NVGPU_DEVICE_TOKEN_INIT;
struct nvgpu_device *dev;
struct nvgpu_list_node *devlist;
while (true) {
dev = g->ops.top.parse_next_device(g, &token);
if (dev == NULL) {
break;
}
nvgpu_log(g, gpu_dbg_info, "Parsed one device: %u", dev->type);
/*
* Otherwise we have a device - let's add it to the right device
* list.
*/
devlist = &g->devs->devlist_heads[dev->type];
nvgpu_list_add_tail(&dev->dev_list_node, devlist);
g->devs->dev_counts[dev->type] += 1;
}
return ret;
}
/*
* Faciliate reading the HW register table into a software abstraction. This is
* done only on the first boot as the table will never change dynamically.
*/
int nvgpu_device_init(struct gk20a *g)
{
u32 i;
/*
* Ground work - make sure we aren't doing this again and that we have
* all the necessary data structures.
*/
if (g->devs != NULL) {
return 0;
}
nvgpu_log(g, gpu_dbg_info, "Initialization GPU device list");
g->devs = nvgpu_kzalloc(g, sizeof(*g->devs));
if (g->devs == NULL) {
return -ENOMEM;
}
for (i = 0; i < NVGPU_MAX_DEVTYPE; i++) {
nvgpu_init_list_node(&g->devs->devlist_heads[i]);
}
return nvgpu_device_parse_hw_table(g);
}
static void nvgpu_device_cleanup_devtype(struct gk20a *g,
struct nvgpu_list_node *list)
{
struct nvgpu_device *dev;
while (!nvgpu_list_empty(list)) {
dev = nvgpu_list_first_entry(list,
nvgpu_device,
dev_list_node);
nvgpu_list_del(&dev->dev_list_node);
nvgpu_kfree(g, dev);
}
}
void nvgpu_device_cleanup(struct gk20a *g)
{
u32 i;
struct nvgpu_list_node *devlist;
for (i = 0; i < NVGPU_MAX_DEVTYPE; i++) {
devlist = &g->devs->devlist_heads[i];
if (devlist == NULL) {
continue;
}
nvgpu_device_cleanup_devtype(g, devlist);
}
nvgpu_kfree(g, g->devs);
g->devs = NULL;
}
/*
* Find the instance passed. Do this by simply traversing the linked list; it's
* not particularly efficient, but we aren't expecting there to ever be _that_
* many devices.
*
* Return a pointer to the device or NULL of the inst ID is out of range.
*/
static struct nvgpu_device *dev_instance_from_devlist(
struct nvgpu_list_node *devlist, u32 inst_id)
{
u32 i = 0U;
struct nvgpu_device *dev;
nvgpu_list_for_each_entry(dev, devlist, nvgpu_device, dev_list_node) {
if (inst_id == i) {
return dev;
}
i++;
}
return NULL;
}
int nvgpu_device_get(struct gk20a *g,
struct nvgpu_device *dev,
u32 type, u32 inst_id)
{
struct nvgpu_device *target;
struct nvgpu_list_node *device_list;
if (type >= NVGPU_MAX_DEVTYPE) {
return -EINVAL;
}
device_list = &g->devs->devlist_heads[type];
target = dev_instance_from_devlist(device_list, inst_id);
if (target == NULL) {
return -ENODEV;
}
nvgpu_memcpy((u8 *)dev, (const u8 *)target, sizeof(*dev));
/*
* Don't let the calling code get access to the underlying device table!
*/
nvgpu_init_list_node(&dev->dev_list_node);
return 0;
}
u32 nvgpu_device_count(struct gk20a *g, u32 type)
{
if (type >= NVGPU_MAX_DEVTYPE) {
return 0U;
}
return g->devs->dev_counts[type];
}
/*
* Note: this kind of bleeds HW details into the core code. Eventually this
* should be handled by a translation table. However, for now, HW has kept the
* device type values consistent across chips and nvgpu already has this present
* in core code.
*
* Once a per-chip translation table exists we can translate and then do a
* comparison.
*/
bool nvgpu_device_is_ce(struct gk20a *g, struct nvgpu_device *dev)
{
if (dev->type == NVGPU_DEVTYPE_COPY0 ||
dev->type == NVGPU_DEVTYPE_COPY1 ||
dev->type == NVGPU_DEVTYPE_COPY2 ||
dev->type == NVGPU_DEVTYPE_LCE) {
return true;
}
return false;
}
bool nvgpu_device_is_graphics(struct gk20a *g, struct nvgpu_device *dev)
{
return dev->type == NVGPU_DEVTYPE_GRAPHICS;
}

View File

@@ -46,26 +46,21 @@
#define FECS_METHOD_WFI_RESTORE 0x80000U #define FECS_METHOD_WFI_RESTORE 0x80000U
enum nvgpu_fifo_engine nvgpu_engine_enum_from_type(struct gk20a *g, enum nvgpu_fifo_engine nvgpu_engine_enum_from_dev(struct gk20a *g,
u32 engine_type) struct nvgpu_device *dev)
{ {
enum nvgpu_fifo_engine ret = NVGPU_ENGINE_INVAL; enum nvgpu_fifo_engine ret = NVGPU_ENGINE_INVAL;
if ((g->ops.top.is_engine_gr != NULL) && if (nvgpu_device_is_graphics(g, dev)) {
(g->ops.top.is_engine_ce != NULL)) { ret = NVGPU_ENGINE_GR;
if (g->ops.top.is_engine_gr(g, engine_type)) { } else if (nvgpu_device_is_ce(g, dev)) {
ret = NVGPU_ENGINE_GR; /* For now, all CE engines have separate runlists. We can
} else if (g->ops.top.is_engine_ce(g, engine_type)) { * identify the NVGPU_ENGINE_GRCE type CE using runlist_id
/* Lets consider all the CE engine have separate * comparsion logic with GR runlist_id in init_info()
* runlist at this point. We can identify the */
* NVGPU_ENGINE_GRCE type CE using runlist_id ret = NVGPU_ENGINE_ASYNC_CE;
* comparsion logic with GR runlist_id in } else {
* init_info() ret = NVGPU_ENGINE_INVAL;
*/
ret = NVGPU_ENGINE_ASYNC_CE;
} else {
ret = NVGPU_ENGINE_INVAL;
}
} }
return ret; return ret;
@@ -807,21 +802,16 @@ int nvgpu_engine_init_info(struct nvgpu_fifo *f)
enum nvgpu_fifo_engine engine_enum; enum nvgpu_fifo_engine engine_enum;
u32 pbdma_id = U32_MAX; u32 pbdma_id = U32_MAX;
bool found_pbdma_for_runlist = false; bool found_pbdma_for_runlist = false;
struct nvgpu_device_info dev_info; struct nvgpu_device dev_info;
struct nvgpu_engine_info *info; struct nvgpu_engine_info *info;
f->num_engines = 0; f->num_engines = 0;
if (g->ops.top.get_device_info == NULL) {
nvgpu_err(g, "unable to parse dev_info table");
return -EINVAL;
}
ret = g->ops.top.get_device_info(g, &dev_info, ret = nvgpu_device_get(g, &dev_info, NVGPU_DEVTYPE_GRAPHICS, 0);
NVGPU_ENGINE_GRAPHICS, 0);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, nvgpu_err(g,
"Failed to parse dev_info table for engine %d", "Failed to parse dev_info table for engine %d",
NVGPU_ENGINE_GRAPHICS); NVGPU_DEVTYPE_GRAPHICS);
return -EINVAL; return -EINVAL;
} }
@@ -833,7 +823,7 @@ int nvgpu_engine_init_info(struct nvgpu_fifo *f)
return -EINVAL; return -EINVAL;
} }
engine_enum = nvgpu_engine_enum_from_type(g, dev_info.engine_type); engine_enum = nvgpu_engine_enum_from_dev(g, &dev_info);
info = &g->fifo.engine_info[dev_info.engine_id]; info = &g->fifo.engine_info[dev_info.engine_id];
@@ -856,7 +846,7 @@ int nvgpu_engine_init_info(struct nvgpu_fifo *f)
dev_info.runlist_id, dev_info.runlist_id,
dev_info.intr_id, dev_info.intr_id,
dev_info.reset_id, dev_info.reset_id,
dev_info.engine_type, dev_info.type,
engine_enum, engine_enum,
dev_info.inst_id); dev_info.inst_id);

View File

@@ -735,8 +735,13 @@ static int nvgpu_init_active_runlist_mapping(struct gk20a *g)
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
if (!nvgpu_engine_is_valid_runlist_id(g, runlist_id)) { if (!nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
/* skip inactive runlist */ /* skip inactive runlist */
nvgpu_log(g, gpu_dbg_info, "Skipping invalid runlist: %d", runlist_id);
continue; continue;
} }
nvgpu_log(g, gpu_dbg_info, "Configuring runlist: %d", runlist_id);
runlist = &f->active_runlist_info[i]; runlist = &f->active_runlist_info[i];
runlist->runlist_id = runlist_id; runlist->runlist_id = runlist_id;
f->runlist_info[runlist_id] = runlist; f->runlist_info[runlist_id] = runlist;
@@ -760,9 +765,8 @@ static int nvgpu_init_active_runlist_mapping(struct gk20a *g)
runlist_size = (size_t)f->runlist_entry_size * runlist_size = (size_t)f->runlist_entry_size *
(size_t)f->num_runlist_entries; (size_t)f->num_runlist_entries;
nvgpu_log(g, gpu_dbg_info, nvgpu_log(g, gpu_dbg_info, " RL entries: %d", f->num_runlist_entries);
"runlist_entries %d runlist size %zu", nvgpu_log(g, gpu_dbg_info, " RL size %zu", runlist_size);
f->num_runlist_entries, runlist_size);
for (j = 0; j < MAX_RUNLIST_BUFFERS; j++) { for (j = 0; j < MAX_RUNLIST_BUFFERS; j++) {
err = nvgpu_dma_alloc_flags_sys(g, err = nvgpu_dma_alloc_flags_sys(g,
@@ -816,6 +820,7 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
if (nvgpu_engine_is_valid_runlist_id(g, runlist_id)) { if (nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
num_runlists = nvgpu_safe_add_u32(num_runlists, 1U); num_runlists = nvgpu_safe_add_u32(num_runlists, 1U);
nvgpu_log(g, gpu_dbg_info, "Valid runlist: %d", runlist_id);
} }
} }
f->num_runlists = num_runlists; f->num_runlists = num_runlists;
@@ -826,7 +831,7 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
err = -ENOMEM; err = -ENOMEM;
goto clean_up_runlist; goto clean_up_runlist;
} }
nvgpu_log_info(g, "num_runlists=%u", num_runlists); nvgpu_log(g, gpu_dbg_info, "num_runlists: %u", num_runlists);
err = nvgpu_init_active_runlist_mapping(g); err = nvgpu_init_active_runlist_mapping(g);
if (err != 0) { if (err != 0) {

View File

@@ -39,6 +39,7 @@
#include <nvgpu/trace.h> #include <nvgpu/trace.h>
#include <nvgpu/nvhost.h> #include <nvgpu/nvhost.h>
#include <nvgpu/fb.h> #include <nvgpu/fb.h>
#include <nvgpu/device.h>
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/pmu_pstate.h> #include <nvgpu/pmu/pmu_pstate.h>
@@ -325,6 +326,8 @@ int nvgpu_prepare_poweroff(struct gk20a *g)
} }
#endif #endif
nvgpu_device_cleanup(g);
/* Disable GPCPLL */ /* Disable GPCPLL */
if (g->ops.clk.suspend_clk_support != NULL) { if (g->ops.clk.suspend_clk_support != NULL) {
g->ops.clk.suspend_clk_support(g); g->ops.clk.suspend_clk_support(g);
@@ -569,19 +572,26 @@ int nvgpu_finalize_poweron(struct gk20a *g)
*/ */
const struct nvgpu_init_table_t nvgpu_init_table[] = { const struct nvgpu_init_table_t nvgpu_init_table[] = {
/* /*
* Do this early so any early VMs that get made are capable of
* mapping buffers.
*/
/**
* ECC support initialization is split into generic init * ECC support initialization is split into generic init
* followed by per unit initialization and ends with sysfs * followed by per unit initialization and ends with sysfs
* support init. This is done to setup ECC data structures * support init. This is done to setup ECC data structures
* prior to enabling interrupts for corresponding units. * prior to enabling interrupts for corresponding units.
*/ */
NVGPU_INIT_TABLE_ENTRY(g->ops.ecc.ecc_init_support, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.ecc.ecc_init_support, NO_FLAG),
/*
* Do this early so any early VMs that get made are capable of
* mapping buffers.
*/
NVGPU_INIT_TABLE_ENTRY(g->ops.mm.pd_cache_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.mm.pd_cache_init, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(&nvgpu_falcons_sw_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(&nvgpu_falcons_sw_init, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.pmu.pmu_early_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.pmu.pmu_early_init, NO_FLAG),
/*
* Initialize the GPU's device list. Needed before NVLINK
* init since the NVLINK IOCTRL block is enumerated in the
* device list.
*/
NVGPU_INIT_TABLE_ENTRY(&nvgpu_device_init, NO_FLAG),
#ifdef CONFIG_NVGPU_DGPU #ifdef CONFIG_NVGPU_DGPU
NVGPU_INIT_TABLE_ENTRY(g->ops.sec2.init_sec2_setup_sw, NVGPU_INIT_TABLE_ENTRY(g->ops.sec2.init_sec2_setup_sw,
NVGPU_SUPPORT_SEC2_RTOS), NVGPU_SUPPORT_SEC2_RTOS),

View File

@@ -160,11 +160,8 @@ static int nvgpu_nvlink_discover_ioctrl(struct gk20a *g)
struct nvgpu_nvlink_ioctrl_list *ioctrl_table; struct nvgpu_nvlink_ioctrl_list *ioctrl_table;
u32 ioctrl_num_entries = 0U; u32 ioctrl_num_entries = 0U;
if (g->ops.top.get_num_engine_type_entries != NULL) { ioctrl_num_entries = nvgpu_device_count(g, NVGPU_DEVTYPE_IOCTRL);
ioctrl_num_entries = g->ops.top.get_num_engine_type_entries(g, nvgpu_log_info(g, "ioctrl_num_entries: %d", ioctrl_num_entries);
NVGPU_ENGINE_IOCTRL);
nvgpu_log_info(g, "ioctrl_num_entries: %d", ioctrl_num_entries);
}
if (ioctrl_num_entries == 0U) { if (ioctrl_num_entries == 0U) {
nvgpu_err(g, "No NVLINK IOCTRL entry found in dev_info table"); nvgpu_err(g, "No NVLINK IOCTRL entry found in dev_info table");
@@ -179,14 +176,13 @@ static int nvgpu_nvlink_discover_ioctrl(struct gk20a *g)
} }
for (i = 0U; i < ioctrl_num_entries; i++) { for (i = 0U; i < ioctrl_num_entries; i++) {
struct nvgpu_device_info dev_info; struct nvgpu_device dev_info;
ret = g->ops.top.get_device_info(g, &dev_info, ret = nvgpu_device_get(g, &dev_info, NVGPU_DEVTYPE_IOCTRL, i);
NVGPU_ENGINE_IOCTRL, i);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, "Failed to parse dev_info table" nvgpu_err(g, "Failed to parse dev_info table"
"for engine %d", "for engine %d",
NVGPU_ENGINE_IOCTRL); NVGPU_DEVTYPE_IOCTRL);
nvgpu_kfree(g, ioctrl_table); nvgpu_kfree(g, ioctrl_table);
return -EINVAL; return -EINVAL;
} }

View File

@@ -48,80 +48,70 @@ int gm20b_engine_init_ce_info(struct nvgpu_fifo *f)
gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g); gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id); nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id);
if (g->ops.top.get_device_info != NULL) { for (i = NVGPU_DEVTYPE_COPY0; i <= NVGPU_DEVTYPE_COPY2; i++) {
for (i = NVGPU_ENGINE_COPY0; i <= NVGPU_ENGINE_COPY2; i++) { struct nvgpu_device dev_info;
struct nvgpu_device_info dev_info; struct nvgpu_engine_info *info;
struct nvgpu_engine_info *info;
ret = g->ops.top.get_device_info(g, &dev_info, i, 0); ret = nvgpu_device_get(g, &dev_info, i, 0);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, /*
"Failed to parse dev_info table for" * Not an error condition; gm20b has only 1 CE.
" engine %d", i); */
return ret; continue;
} }
if (dev_info.engine_type != i) {
nvgpu_log_info(g, "No entry found in dev_info "
"table for engine_type %d", i);
continue;
}
found_pbdma_for_runlist = found_pbdma_for_runlist = g->ops.pbdma.find_for_runlist(g,
g->ops.pbdma.find_for_runlist(g,
dev_info.runlist_id, dev_info.runlist_id,
&pbdma_id); &pbdma_id);
if (!found_pbdma_for_runlist) { if (!found_pbdma_for_runlist) {
nvgpu_err(g, "busted pbdma map"); nvgpu_err(g, "busted pbdma map");
return -EINVAL; return -EINVAL;
}
info = &g->fifo.engine_info[dev_info.engine_id];
engine_enum = nvgpu_engine_enum_from_type(g,
dev_info.engine_type);
/* GR and GR_COPY shares same runlist_id */
if ((engine_enum == NVGPU_ENGINE_ASYNC_CE) &&
(gr_runlist_id ==
dev_info.runlist_id)) {
engine_enum = NVGPU_ENGINE_GRCE;
}
info->engine_enum = engine_enum;
if (g->ops.top.get_ce_inst_id != NULL) {
dev_info.inst_id = g->ops.top.get_ce_inst_id(g,
dev_info.engine_type);
}
if ((dev_info.fault_id == 0U) &&
(engine_enum ==
NVGPU_ENGINE_GRCE)) {
dev_info.fault_id = 0x1b;
}
info->fault_id = dev_info.fault_id;
info->intr_mask |= BIT32(dev_info.intr_id);
info->reset_mask |= BIT32(dev_info.reset_id);
info->runlist_id = dev_info.runlist_id;
info->pbdma_id = pbdma_id;
info->inst_id = dev_info.inst_id;
info->pri_base = dev_info.pri_base;
/* engine_id starts from 0 to NV_HOST_NUM_ENGINES */
f->active_engines_list[f->num_engines] =
dev_info.engine_id;
++f->num_engines;
nvgpu_log_info(g, "gr info: engine_id %d runlist_id %d "
"intr_id %d reset_id %d engine_type %d "
"engine_enum %d inst_id %d",
dev_info.engine_id,
dev_info.runlist_id,
dev_info.intr_id,
dev_info.reset_id,
dev_info.engine_type,
engine_enum,
dev_info.inst_id);
} }
info = &g->fifo.engine_info[dev_info.engine_id];
engine_enum = nvgpu_engine_enum_from_dev(g, &dev_info);
/* GR and GR_COPY shares same runlist_id */
if ((engine_enum == NVGPU_ENGINE_ASYNC_CE) &&
(gr_runlist_id == dev_info.runlist_id)) {
engine_enum = NVGPU_ENGINE_GRCE;
}
info->engine_enum = engine_enum;
if (g->ops.top.get_ce_inst_id != NULL) {
dev_info.inst_id = g->ops.top.get_ce_inst_id(g,
dev_info.type);
}
if ((dev_info.fault_id == 0U) &&
(engine_enum == NVGPU_ENGINE_GRCE)) {
dev_info.fault_id = 0x1b;
}
info->fault_id = dev_info.fault_id;
info->intr_mask |= BIT32(dev_info.intr_id);
info->reset_mask |= BIT32(dev_info.reset_id);
info->runlist_id = dev_info.runlist_id;
info->pbdma_id = pbdma_id;
info->inst_id = dev_info.inst_id;
info->pri_base = dev_info.pri_base;
/* engine_id starts from 0 to NV_HOST_NUM_ENGINES */
f->active_engines_list[f->num_engines] =
dev_info.engine_id;
++f->num_engines;
nvgpu_log_info(g, "gr info: engine_id %d runlist_id %d "
"intr_id %d reset_id %d type %d "
"engine_enum %d inst_id %d",
dev_info.engine_id,
dev_info.runlist_id,
dev_info.intr_id,
dev_info.reset_id,
dev_info.type,
engine_enum,
dev_info.inst_id);
} }
return 0; return 0;
} }

View File

@@ -45,22 +45,18 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g); gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id); nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id);
if (g->ops.top.get_num_engine_type_entries != NULL) { lce_num_entries = nvgpu_device_count(g, NVGPU_DEVTYPE_LCE);
lce_num_entries = g->ops.top.get_num_engine_type_entries(g, nvgpu_log_info(g, "lce_num_entries: %d", lce_num_entries);
NVGPU_ENGINE_LCE);
nvgpu_log_info(g, "lce_num_entries: %d", lce_num_entries);
}
for (i = 0; i < lce_num_entries; i++) { for (i = 0; i < lce_num_entries; i++) {
struct nvgpu_device_info dev_info; struct nvgpu_device dev_info;
struct nvgpu_engine_info *info; struct nvgpu_engine_info *info;
ret = g->ops.top.get_device_info(g, &dev_info, ret = nvgpu_device_get(g, &dev_info, NVGPU_DEVTYPE_LCE, i);
NVGPU_ENGINE_LCE, i);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, nvgpu_err(g,
"Failed to parse dev_info for engine%d", "Failed to parse dev_info for engine%d",
NVGPU_ENGINE_LCE); NVGPU_DEVTYPE_LCE);
return -EINVAL; return -EINVAL;
} }
@@ -75,9 +71,7 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
info = &g->fifo.engine_info[dev_info.engine_id]; info = &g->fifo.engine_info[dev_info.engine_id];
engine_enum = nvgpu_engine_enum_from_type( engine_enum = nvgpu_engine_enum_from_dev(g, &dev_info);
g,
dev_info.engine_type);
/* GR and GR_COPY shares same runlist_id */ /* GR and GR_COPY shares same runlist_id */
if ((engine_enum == NVGPU_ENGINE_ASYNC_CE) && if ((engine_enum == NVGPU_ENGINE_ASYNC_CE) &&
(gr_runlist_id == (gr_runlist_id ==
@@ -88,7 +82,7 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
if (g->ops.top.get_ce_inst_id != NULL) { if (g->ops.top.get_ce_inst_id != NULL) {
dev_info.inst_id = g->ops.top.get_ce_inst_id(g, dev_info.inst_id = g->ops.top.get_ce_inst_id(g,
dev_info.engine_type); dev_info.type);
} }
if ((dev_info.fault_id == 0U) && if ((dev_info.fault_id == 0U) &&
@@ -117,7 +111,7 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
dev_info.runlist_id, dev_info.runlist_id,
dev_info.intr_id, dev_info.intr_id,
dev_info.reset_id, dev_info.reset_id,
dev_info.engine_type, dev_info.type,
engine_enum, engine_enum,
dev_info.inst_id); dev_info.inst_id);
} }

View File

@@ -1107,11 +1107,9 @@ static const struct gpu_ops gm20b_ops = {
nvgpu_tegra_fuse_read_gcplex_config_fuse, nvgpu_tegra_fuse_read_gcplex_config_fuse,
}, },
.top = { .top = {
.parse_next_device = gm20b_top_parse_next_dev,
.device_info_parse_enum = gm20b_device_info_parse_enum, .device_info_parse_enum = gm20b_device_info_parse_enum,
.device_info_parse_data = gm20b_device_info_parse_data, .device_info_parse_data = gm20b_device_info_parse_data,
.get_device_info = gm20b_get_device_info,
.is_engine_gr = gm20b_is_engine_gr,
.is_engine_ce = gm20b_is_engine_ce,
.get_ce_inst_id = gm20b_get_ce_inst_id, .get_ce_inst_id = gm20b_get_ce_inst_id,
.get_max_gpc_count = gm20b_top_get_max_gpc_count, .get_max_gpc_count = gm20b_top_get_max_gpc_count,
.get_max_tpc_per_gpc_count = .get_max_tpc_per_gpc_count =

View File

@@ -1221,13 +1221,9 @@ static const struct gpu_ops gp10b_ops = {
nvgpu_tegra_fuse_read_per_device_identifier, nvgpu_tegra_fuse_read_per_device_identifier,
}, },
.top = { .top = {
.parse_next_device = gm20b_top_parse_next_dev,
.device_info_parse_enum = gm20b_device_info_parse_enum, .device_info_parse_enum = gm20b_device_info_parse_enum,
.device_info_parse_data = gp10b_device_info_parse_data, .device_info_parse_data = gp10b_device_info_parse_data,
.get_num_engine_type_entries =
gp10b_get_num_engine_type_entries,
.get_device_info = gp10b_get_device_info,
.is_engine_gr = gm20b_is_engine_gr,
.is_engine_ce = gp10b_is_engine_ce,
.get_ce_inst_id = NULL, .get_ce_inst_id = NULL,
.get_max_gpc_count = gm20b_top_get_max_gpc_count, .get_max_gpc_count = gm20b_top_get_max_gpc_count,
.get_max_tpc_per_gpc_count = .get_max_tpc_per_gpc_count =

View File

@@ -1484,13 +1484,9 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
nvgpu_tegra_fuse_read_per_device_identifier, nvgpu_tegra_fuse_read_per_device_identifier,
}, },
.top = { .top = {
.parse_next_device = gm20b_top_parse_next_dev,
.device_info_parse_enum = gm20b_device_info_parse_enum, .device_info_parse_enum = gm20b_device_info_parse_enum,
.device_info_parse_data = gv11b_device_info_parse_data, .device_info_parse_data = gv11b_device_info_parse_data,
.get_num_engine_type_entries =
gp10b_get_num_engine_type_entries,
.get_device_info = gp10b_get_device_info,
.is_engine_gr = gm20b_is_engine_gr,
.is_engine_ce = gp10b_is_engine_ce,
.get_ce_inst_id = NULL, .get_ce_inst_id = NULL,
.get_max_gpc_count = gm20b_top_get_max_gpc_count, .get_max_gpc_count = gm20b_top_get_max_gpc_count,
.get_max_tpc_per_gpc_count = .get_max_tpc_per_gpc_count =

View File

@@ -1612,13 +1612,9 @@ static const struct gpu_ops tu104_ops = {
.gsp_reset = tu104_gsp_reset, .gsp_reset = tu104_gsp_reset,
}, },
.top = { .top = {
.parse_next_device = gm20b_top_parse_next_dev,
.device_info_parse_enum = gm20b_device_info_parse_enum, .device_info_parse_enum = gm20b_device_info_parse_enum,
.device_info_parse_data = gv11b_device_info_parse_data, .device_info_parse_data = gv11b_device_info_parse_data,
.get_num_engine_type_entries =
gp10b_get_num_engine_type_entries,
.get_device_info = gp10b_get_device_info,
.is_engine_gr = gm20b_is_engine_gr,
.is_engine_ce = gp10b_is_engine_ce,
.get_ce_inst_id = NULL, .get_ce_inst_id = NULL,
.get_max_gpc_count = gm20b_top_get_max_gpc_count, .get_max_gpc_count = gm20b_top_get_max_gpc_count,
.get_max_tpc_per_gpc_count = .get_max_tpc_per_gpc_count =

View File

@@ -26,7 +26,6 @@
#include <nvgpu/utils.h> #include <nvgpu/utils.h>
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/device.h>
#include <nvgpu/nvlink_minion.h> #include <nvgpu/nvlink_minion.h>
#include <nvgpu/gops_mc.h> #include <nvgpu/gops_mc.h>
#include <nvgpu/mc.h> #include <nvgpu/mc.h>

View File

@@ -68,80 +68,8 @@ int gm20b_device_info_parse_data(struct gk20a *g, u32 table_entry, u32 *inst_id,
return 0; return 0;
} }
int gm20b_get_device_info(struct gk20a *g, struct nvgpu_device_info *dev_info,
u32 engine_type, u32 inst_id)
{
int ret = 0;
u32 i = 0;
u32 table_entry;
u32 entry;
u32 entry_engine = 0;
u32 entry_enum = 0;
u32 entry_data = 0;
u32 max_info_entries = top_device_info__size_1_v();
if (dev_info == NULL) {
nvgpu_err(g, "Null device_info pointer passed.");
return -EINVAL;
}
for (i = 0; i < max_info_entries; i++) {
table_entry = nvgpu_readl(g, top_device_info_r(i));
entry = top_device_info_entry_v(table_entry);
if (entry == top_device_info_entry_not_valid_v()) {
continue;
} else if (entry == top_device_info_entry_enum_v()) {
entry_enum = table_entry;
} else if (entry == top_device_info_entry_data_v()) {
entry_data = table_entry;
} else if (entry == top_device_info_entry_engine_type_v()) {
entry_engine = table_entry;
} else {
nvgpu_err(g, "Invalid entry type in device_info table");
return -EINVAL;
}
if (top_device_info_chain_v(table_entry) ==
top_device_info_chain_enable_v()) {
continue;
}
if (top_device_info_type_enum_v(entry_engine) == engine_type) {
dev_info->engine_type = engine_type;
if (g->ops.top.device_info_parse_enum != NULL) {
g->ops.top.device_info_parse_enum(g,
entry_enum,
&dev_info->engine_id,
&dev_info->runlist_id,
&dev_info->intr_id,
&dev_info->reset_id);
}
if (g->ops.top.device_info_parse_data != NULL) {
ret = g->ops.top.device_info_parse_data(g,
entry_data,
&dev_info->inst_id,
&dev_info->pri_base,
&dev_info->fault_id);
if (ret != 0) {
nvgpu_err(g,
"Error parsing Data Entry 0x%x",
entry_data);
return ret;
}
}
}
}
return ret;
}
bool gm20b_is_engine_ce(struct gk20a *g, u32 engine_type)
{
return ((engine_type >= top_device_info_type_enum_copy0_v()) &&
(engine_type <= top_device_info_type_enum_copy2_v()));
}
u32 gm20b_get_ce_inst_id(struct gk20a *g, u32 engine_type) u32 gm20b_get_ce_inst_id(struct gk20a *g, u32 engine_type)
{ {
/* inst_id starts from CE0 to CE2 */ /* inst_id starts from CE0 to CE2 */
return (engine_type - NVGPU_ENGINE_COPY0); return (engine_type - NVGPU_DEVTYPE_COPY0);
} }

View File

@@ -28,7 +28,7 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct nvgpu_device_info; struct nvgpu_device;
void gm20b_device_info_parse_enum(struct gk20a *g, u32 table_entry, void gm20b_device_info_parse_enum(struct gk20a *g, u32 table_entry,
u32 *engine_id, u32 *runlist_id, u32 *engine_id, u32 *runlist_id,
@@ -36,13 +36,11 @@ void gm20b_device_info_parse_enum(struct gk20a *g, u32 table_entry,
#ifdef CONFIG_NVGPU_HAL_NON_FUSA #ifdef CONFIG_NVGPU_HAL_NON_FUSA
int gm20b_device_info_parse_data(struct gk20a *g, u32 table_entry, u32 *inst_id, int gm20b_device_info_parse_data(struct gk20a *g, u32 table_entry, u32 *inst_id,
u32 *pri_base, u32 *fault_id); u32 *pri_base, u32 *fault_id);
int gm20b_get_device_info(struct gk20a *g, struct nvgpu_device_info *dev_info,
u32 engine_type, u32 inst_id);
bool gm20b_is_engine_ce(struct gk20a *g, u32 engine_type);
u32 gm20b_get_ce_inst_id(struct gk20a *g, u32 engine_type); u32 gm20b_get_ce_inst_id(struct gk20a *g, u32 engine_type);
#endif #endif
bool gm20b_is_engine_gr(struct gk20a *g, u32 engine_type); struct nvgpu_device *gm20b_top_parse_next_dev(struct gk20a *g, u32 *i);
u32 gm20b_top_get_max_gpc_count(struct gk20a *g); u32 gm20b_top_get_max_gpc_count(struct gk20a *g);
u32 gm20b_top_get_max_tpc_per_gpc_count(struct gk20a *g); u32 gm20b_top_get_max_tpc_per_gpc_count(struct gk20a *g);

View File

@@ -24,6 +24,8 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/kmem.h>
#include "top_gm20b.h" #include "top_gm20b.h"
#include <nvgpu/hw/gm20b/hw_top_gm20b.h> #include <nvgpu/hw/gm20b/hw_top_gm20b.h>
@@ -68,9 +70,102 @@ void gm20b_device_info_parse_enum(struct gk20a *g, u32 table_entry,
} }
bool gm20b_is_engine_gr(struct gk20a *g, u32 engine_type) /*
* Parse the device starting at *i. This will return a valid device struct
* pointer if a device was detected and parsed, NULL otherwise.
*/
struct nvgpu_device *gm20b_top_parse_next_dev(struct gk20a *g, u32 *token)
{ {
return (engine_type == top_device_info_type_enum_graphics_v()); int ret;
u32 table_entry;
u32 entry;
u32 entry_enum = 0;
u32 entry_engine = 0;
u32 entry_data = 0;
struct nvgpu_device *dev;
while (true) {
/*
* The core code relies on us to manage the index - a.k.a the
* token. If token crosses the device table size, then break and
* return NULL to signify we've hit the end of the dev list.
*/
if (*token >= top_device_info__size_1_v()) {
return NULL;
}
/*
* Once we have read a register we'll never have to read it
* again so always increment before doing anything further.
*/
table_entry = nvgpu_readl(g, top_device_info_r(*token));
(*token)++;
entry = top_device_info_entry_v(table_entry);
if (entry == top_device_info_entry_not_valid_v()) {
/*
* Empty section of the table. We'll skip these
* internally so that the common device manager is
* unaware of the holes in the device register array.
*/
continue;
} else if (entry == top_device_info_entry_enum_v()) {
entry_enum = table_entry;
} else if (entry == top_device_info_entry_data_v()) {
entry_data = table_entry;
} else if (entry == top_device_info_entry_engine_type_v()) {
entry_engine = table_entry;
} else {
nvgpu_err(g, "Invalid entry type in device_info table");
return NULL;
}
/*
* If we need to chain then we need to read the register in the
* table. Otherwise, if chain is false, then we have parsed all
* the relevant registers for this table entry.
*/
if (top_device_info_chain_v(table_entry) ==
top_device_info_chain_enable_v()) {
continue;
}
/*
* If we get here we have sufficient data to parse a device. E.g
* chain was set to 0.
*/
dev = nvgpu_kzalloc(g, sizeof(*dev));
if (dev == NULL) {
nvgpu_err(g, "TOP: OOM allocating nvgpu_device struct");
return NULL;
}
dev->type = top_device_info_type_enum_v(entry_engine);
g->ops.top.device_info_parse_enum(g,
entry_enum,
&dev->engine_id,
&dev->runlist_id,
&dev->intr_id,
&dev->reset_id);
ret = g->ops.top.device_info_parse_data(g,
entry_data,
&dev->inst_id,
&dev->pri_base,
&dev->fault_id);
if (ret != 0) {
nvgpu_err(g,
"TOP: error parsing Data Entry 0x%x",
entry_data);
nvgpu_kfree(g, dev);
return NULL;
}
break;
}
return dev;
} }
u32 gm20b_top_get_max_gpc_count(struct gk20a *g) u32 gm20b_top_get_max_gpc_count(struct gk20a *g)

View File

@@ -28,13 +28,10 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct nvgpu_device_info; struct nvgpu_device;
int gp10b_device_info_parse_data(struct gk20a *g, u32 table_entry, u32 *inst_id, int gp10b_device_info_parse_data(struct gk20a *g, u32 table_entry, u32 *inst_id,
u32 *pri_base, u32 *fault_id); u32 *pri_base, u32 *fault_id);
u32 gp10b_get_num_engine_type_entries(struct gk20a *g, u32 engine_type); int gp10b_get_device_info(struct gk20a *g, struct nvgpu_device *dev_info,
int gp10b_get_device_info(struct gk20a *g, struct nvgpu_device_info *dev_info,
u32 engine_type, u32 inst_id); u32 engine_type, u32 inst_id);
bool gp10b_is_engine_ce(struct gk20a *g, u32 engine_type);
#endif #endif

View File

@@ -1,143 +0,0 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/device.h>
#include <nvgpu/types.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include "top_gp10b.h"
#include <nvgpu/hw/gp10b/hw_top_gp10b.h>
u32 gp10b_get_num_engine_type_entries(struct gk20a *g, u32 engine_type)
{
u32 i = 0;
u32 max_info_entries = top_device_info__size_1_v();
u32 num_entries = 0;
u32 table_entry;
u32 entry;
for (i = 0; i < max_info_entries; i++) {
table_entry = nvgpu_readl(g, top_device_info_r(i));
entry = top_device_info_entry_v(table_entry);
if (entry == top_device_info_entry_engine_type_v()) {
nvgpu_log_info(g, "table_entry: 0x%x engine type: 0x%x",
table_entry,
top_device_info_type_enum_v(table_entry));
if (top_device_info_type_enum_v(table_entry) ==
engine_type) {
num_entries = nvgpu_safe_add_u32(num_entries,
1U);
}
}
}
return num_entries;
}
static int gp10b_check_device_match(struct gk20a *g,
struct nvgpu_device_info *dev_info,
u32 entry_engine, u32 engine_type,
u32 entry_data, u32 inst_id, u32 entry_enum)
{
int ret;
if ((top_device_info_type_enum_v(entry_engine) == engine_type)
&& (top_device_info_data_inst_id_v(entry_data) ==
inst_id)) {
dev_info->engine_type = engine_type;
g->ops.top.device_info_parse_enum(g,
entry_enum,
&dev_info->engine_id,
&dev_info->runlist_id,
&dev_info->intr_id,
&dev_info->reset_id);
ret = g->ops.top.device_info_parse_data(g,
entry_data,
&dev_info->inst_id,
&dev_info->pri_base,
&dev_info->fault_id);
if (ret != 0) {
nvgpu_err(g, "Error parsing Data Entry 0x%x",
entry_data);
return ret;
}
}
return 0;
}
int gp10b_get_device_info(struct gk20a *g, struct nvgpu_device_info *dev_info,
u32 engine_type, u32 inst_id)
{
int ret = 0;
u32 i = 0;
u32 table_entry;
u32 entry;
u32 entry_engine = 0;
u32 entry_enum = 0;
u32 entry_data = 0;
u32 max_info_entries = top_device_info__size_1_v();
if (dev_info == NULL) {
nvgpu_err(g, "Null device_info pointer passed.");
return -EINVAL;
}
if ((g->ops.top.device_info_parse_enum == NULL) ||
(g->ops.top.device_info_parse_data == NULL)) {
nvgpu_err(g, "Dev_info parsing functions ptrs not set.");
return -EINVAL;
}
for (i = 0; i < max_info_entries; i++) {
table_entry = nvgpu_readl(g, top_device_info_r(i));
entry = top_device_info_entry_v(table_entry);
if (entry == top_device_info_entry_not_valid_v()) {
continue;
} else if (entry == top_device_info_entry_enum_v()) {
entry_enum = table_entry;
} else if (entry == top_device_info_entry_data_v()) {
entry_data = table_entry;
} else {
entry_engine = table_entry;
}
if (top_device_info_chain_v(table_entry) ==
top_device_info_chain_enable_v()) {
continue;
}
ret = gp10b_check_device_match(g, dev_info, entry_engine,
engine_type, entry_data, inst_id, entry_enum);
if (ret != 0) {
return ret;
}
}
return ret;
}
bool gp10b_is_engine_ce(struct gk20a *g, u32 engine_type)
{
return (engine_type == top_device_info_type_enum_lce_v());
}

View File

@@ -22,100 +22,194 @@
#ifndef NVGPU_DEVICE_H #ifndef NVGPU_DEVICE_H
#define NVGPU_DEVICE_H #define NVGPU_DEVICE_H
/** /**
* @file * @file
* *
* Declare device info specific struct and defines. * Declare device info specific struct and defines.
*/ */
#include <nvgpu/types.h> #include <nvgpu/types.h>
#include <nvgpu/list.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT) #if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#include "include/nvgpu/nvgpu_next_top.h" #include "include/nvgpu/nvgpu_next_device.h"
#endif #endif
struct gk20a; struct gk20a;
/** /**
* @defgroup NVGPU_TOP_DEVICE_INFO_DEFINES * @defgroup NVGPU_DEVTYPE_DEFINES
* *
* List of engine enumeration values supported for device_info parsing * List of engine enumeration values supported for device_info parsing.
*/ */
/** /**
* @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES * @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES
* Engine type enum for graphics engine as defined by h/w. *
* Device type for all graphics engine instances.
*/ */
#define NVGPU_ENGINE_GRAPHICS 0U #define NVGPU_DEVTYPE_GRAPHICS 0U
/** /**
* @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES * @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES
* Engine type enum for copy engine instance 0 as defined by h/w. *
* Obsolete for Pascal and chips beyond it. * Copy Engine 0; obsolete on pascal+. For Pascal+ use the LCE type and relevant
* instance ID.
*
* This describes the 0th copy engine.
*/ */
#define NVGPU_ENGINE_COPY0 1U #define NVGPU_DEVTYPE_COPY0 1U
/** /**
* @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES * @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES
* Engine type enum for copy engine instance 1 as defined by h/w. *
* Obsolete for Pascal and chips beyond it. * See #NVGPU_DEVTYPE_COPY0.
*/ */
#define NVGPU_ENGINE_COPY1 2U #define NVGPU_DEVTYPE_COPY1 2U
/** /**
* @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES * @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES
* Engine type enum for copy engine instance 2 as defined by h/w. *
* Obsolete for Pascal and chips beyond it. * See #NVGPU_DEVTYPE_COPY0.
*/ */
#define NVGPU_ENGINE_COPY2 3U #define NVGPU_DEVTYPE_COPY2 3U
#define NVGPU_ENGINE_IOCTRL 18U
/** /**
* @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES * @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES
* Engine type enum for all copy engine as defined by h/w. *
* This enum type is used for copy engines on Pascal and chips beyond it. * NVLINK IOCTRL device - used by NVLINK on dGPUs.
*/ */
/** Engine enum type lce as defined by h/w. */ #define NVGPU_DEVTYPE_IOCTRL 18U
#define NVGPU_ENGINE_LCE 19U /**
* @ingroup NVGPU_TOP_DEVICE_INFO_DEFINES
*
* Logical Copy Engine devices.
*/
#define NVGPU_DEVTYPE_LCE 19U
#define NVGPU_MAX_DEVTYPE 24U
#define NVGPU_DEVICE_TOKEN_INIT 0U
/** /**
* Structure definition for storing information for the devices and the engines * Structure definition for storing information for the devices and the engines
* available on the chip. * available on the chip.
*/ */
struct nvgpu_device_info { struct nvgpu_device {
/** Engine enum type defined by device info h/w register. */ struct nvgpu_list_node dev_list_node;
u32 engine_type;
/** /**
* Specifies instance of a device, allowing s/w to distinguish between * Engine type for this device.
*/
u32 type;
/**
* Specifies instance of a device, allowing SW to distinguish between
* multiple copies of a device present on the chip. * multiple copies of a device present on the chip.
*/ */
u32 inst_id; u32 inst_id;
/** /**
* Used to determine the start of the h/w register address space * PRI base register offset for the 0th device instance of this type.
* for #inst_id 0.
*/ */
u32 pri_base; u32 pri_base;
/** /**
* Contains valid mmu fault id read from device info h/w register or * MMU fault ID for this device or the invalid fault ID: U32_MAX.
* invalid mmu fault id, U32_MAX.
*/ */
u32 fault_id; u32 fault_id;
/** Engine id read from device info h/w register. */
/**
* The unique per-device ID that host uses to identify any given engine.
*/
u32 engine_id; u32 engine_id;
/** Runlist id read from device info h/w register. */
/**
* The ID of the runlist that serves this engine.
*/
u32 runlist_id; u32 runlist_id;
/** Intr id read from device info h/w register. */
/**
* Interrupt ID for determining if this device has a pending interrupt.
*/
u32 intr_id; u32 intr_id;
/** Reset id read from device info h/w register. */
/**
* Reset ID for resetting the device in MC.
*/
u32 reset_id; u32 reset_id;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */ /** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT) #if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
/* nvgpu next device info additions */ /* nvgpu next device info additions */
struct nvgpu_next_device_info nvgpu_next; struct nvgpu_device_next next;
#endif #endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */ /** @endcond DOXYGEN_SHOULD_SKIP_THIS */
}; };
/**
* @brief Initialize the SW device list from the HW device list.
*
* @param g [in] The GPU.
*
* @return 0 on success; a negative error code otherwise.
*/
int nvgpu_device_init(struct gk20a *g);
/**
* @brief Cleanup the device list on power down.
*
* @param g [in] The GPU.
*/
void nvgpu_device_cleanup(struct gk20a *g);
/**
* @brief Read device info from SW device table.
*
* @param g [in] GPU device struct pointer
* @param dev_info [out] Pointer to device information struct
* which gets populated with all the
* engine related information.
* @param engine_type [in] Engine enumeration value
* @param inst_id [in] Engine's instance identification number
*
* This will copy the contents of the requested device into the passed
* device pointer. The device copied is chosen based on the \a type and
* \a inst_id fields provided.
*/
int nvgpu_device_get(struct gk20a *g,
struct nvgpu_device *dev_info,
u32 type, u32 inst_id);
/**
* @brief Return number of devices of type \a type.
*
* @param g [in] The GPU.
* @param type [i] The type of device.
*/
u32 nvgpu_device_count(struct gk20a *g, u32 type);
/**
* @brief Return true if dev is a copy engine device.
*
* @param g [in] The GPU.
* @param dev [in] A device.
*
* Returns true if \a dev matches a copy engine device type. For pre-Pascal
* chips this is COPY[0, 1, 2], for Pascal and onward this is LCE.
*/
bool nvgpu_device_is_ce(struct gk20a *g, struct nvgpu_device *dev);
/**
* @brief Return true if dev is a graphics device.
*
* @param g [in] The GPU.
* @param dev [in] A device.
*
* Returns true if \a dev matches the graphics device type.
*/
bool nvgpu_device_is_graphics(struct gk20a *g, struct nvgpu_device *dev);
#endif /* NVGPU_DEVICE_H */ #endif /* NVGPU_DEVICE_H */

View File

@@ -43,6 +43,8 @@
struct gk20a; struct gk20a;
struct nvgpu_fifo; struct nvgpu_fifo;
struct nvgpu_device;
/** /**
* Engine enum types used for s/w purpose. These enum values are * Engine enum types used for s/w purpose. These enum values are
* different as compared to engine enum types defined by h/w. * different as compared to engine enum types defined by h/w.
@@ -113,7 +115,7 @@ struct nvgpu_engine_info {
* See device.h for engine enum types defined by h/w. * See device.h for engine enum types defined by h/w.
* *
* @param g [in] The GPU driver struct. * @param g [in] The GPU driver struct.
* @param engine_type [in] Engine enum type defined by h/w. * @param dev [in] Device to check.
* *
* This is used to map engine enum type defined by h/w to engine enum type * This is used to map engine enum type defined by h/w to engine enum type
* defined by s/w. * defined by s/w.
@@ -124,8 +126,8 @@ struct nvgpu_engine_info {
* types or if #engine_type does not match with h/w defined engine enum * types or if #engine_type does not match with h/w defined engine enum
* types for gr and/or ce engines. * types for gr and/or ce engines.
*/ */
enum nvgpu_fifo_engine nvgpu_engine_enum_from_type(struct gk20a *g, enum nvgpu_fifo_engine nvgpu_engine_enum_from_dev(struct gk20a *g,
u32 engine_type); struct nvgpu_device *dev);
/** /**
* @brief Get pointer to #nvgpu_engine_info for the h/w engine id. * @brief Get pointer to #nvgpu_engine_info for the h/w engine id.
* *
@@ -421,9 +423,9 @@ u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg);
* @param f [in] Pointer to #nvgpu_fifo struct. * @param f [in] Pointer to #nvgpu_fifo struct.
* *
* - Get device info related info for h/w engine enum type, * - Get device info related info for h/w engine enum type,
* #NVGPU_ENGINE_GRAPHICS. * #NVGPU_DEVTYPE_GRAPHICS.
* - Get PBDMA id serving the runlist id of the h/w engine enum type, * - Get PBDMA id serving the runlist id of the h/w engine enum type,
* #NVGPU_ENGINE_GRAPHICS. * #NVGPU_DEVTYPE_GRAPHICS.
* - Get s/w defined engine enum type for the h/w engine enum type read * - Get s/w defined engine enum type for the h/w engine enum type read
* from device info registers. * from device info registers.
* - Initialize #nvgpu_fifo.engine_info and #nvgpu_fifo.active_engines_list * - Initialize #nvgpu_fifo.engine_info and #nvgpu_fifo.active_engines_list
@@ -435,9 +437,9 @@ u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg);
* *
* @return 0 upon success. * @return 0 upon success.
* @retval -EINVAL if call to function to get device info related info for * @retval -EINVAL if call to function to get device info related info for
* h/w engine enum type, #NVGPU_ENGINE_GRAPHICS returned failure. * h/w engine enum type, #NVGPU_DEVTYPE_GRAPHICS returned failure.
* @retval -EINVAL if call to function to get pbdma id for runlist id of * @retval -EINVAL if call to function to get pbdma id for runlist id of
* h/w engine enum type, #NVGPU_ENGINE_GRAPHICS returned failure. * h/w engine enum type, #NVGPU_DEVTYPE_GRAPHICS returned failure.
* @retval Return value of function called to initialize CE engine info. * @retval Return value of function called to initialize CE engine info.
*/ */
int nvgpu_engine_init_info(struct nvgpu_fifo *f); int nvgpu_engine_init_info(struct nvgpu_fifo *f);

View File

@@ -736,6 +736,7 @@ struct gk20a {
#ifdef CONFIG_NVGPU_SIM #ifdef CONFIG_NVGPU_SIM
struct sim_nvgpu *sim; struct sim_nvgpu *sim;
#endif #endif
struct nvgpu_device_list *devs;
struct mm_gk20a mm; struct mm_gk20a mm;
struct nvgpu_pmu *pmu; struct nvgpu_pmu *pmu;
struct nvgpu_acr *acr; struct nvgpu_acr *acr;

View File

@@ -213,7 +213,7 @@ struct gops_mc {
* - #NVGPU_UNIT_BLG * - #NVGPU_UNIT_BLG
* - Reset id of supported engines from the * - Reset id of supported engines from the
* device info. For e.g. GR engine has reset * device info. For e.g. GR engine has reset
* id of 12. @see #nvgpu_device_info. * id of 12. @see #nvgpu_device.
* *
* This function is invoked to reset the engines while initializing * This function is invoked to reset the engines while initializing
* FIFO, GR and other engines during #nvgpu_finalize_poweron. * FIFO, GR and other engines during #nvgpu_finalize_poweron.

View File

@@ -31,7 +31,7 @@
* *
*/ */
struct gk20a; struct gk20a;
struct nvgpu_device_info; struct nvgpu_device;
/** /**
* TOP unit HAL operations * TOP unit HAL operations
@@ -40,135 +40,30 @@ struct nvgpu_device_info;
*/ */
struct gops_top { struct gops_top {
/** /**
* @brief Get the number of entries of particular engine type in * @brief Parse the GPU device table into a SW representation.
* device_info table.
*
* @param g [in] GPU driver struct pointer.
* @param engine_type [in] Engine enumeration value.
*
* 1. Some engines have multiple entries in device_info table
* corresponding to each instance of the engine. All such entries
* corresponding to same engine will have same \a engine_type, but
* a unique instance id.
* 2. Traverse through the device_info table and get the total
* number of entries corresponding to input \a engine_type.
* 3. This HAL is valid for Pascal and chips beyond.
* 4. Prior to Pascal, each instance of the engine was denoted by a
* different engine_type.
*
* List of valid engine enumeration values:
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* NVGPU_ENGINE_GRAPHICS - 0
* NVGPU_ENGINE_COPY0 - 1
* NVGPU_ENGINE_COPY1 - 2
* NVGPU_ENGINE_COPY2 - 3
* NVGPU_ENGINE_IOCTRL - 18
* NVGPU_ENGINE_LCE - 19
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* @return Number of instances of \a engine_type in device_info
* table
*/
u32 (*get_num_engine_type_entries)(struct gk20a *g, u32 engine_type);
/**
* @brief Get all the engine related information from device_info table
* *
* @param g [in] GPU device struct pointer * @param g [in] GPU device struct pointer
* @param dev_info [out] Pointer to device information struct * @param token [in] Token to pass into table parsing code.
* which gets populated with all the
* engine related information.
* @param engine_type [in] Engine enumeration value
* @param inst_id [in] Engine's instance identification number
* *
* 1. Device_info table is an array of registers which contains engine * The devinfo table is an array of registers which contains a list of
* specific data like interrupt enum, reset enum, pri_base etc. * all devices in the GPU. This list can be parsed by SW to dynamically
* 2. This HAL reads such engine information from table after matching * determine the presence of HW devices on the GPU.
* the \a engine_type and \a inst_id and then populates the read
* information in \a dev_info struct.
* 3. In the device_info table, more than one register is required to
* denote information for a specific engine. So they use multiple
* consecutive registers in the array to represent a specific engine.
* 4. The MSB (called chain bit) in each register denotes if the next
* register talks the same engine as present the one. All the
* registers in the device info table can be classified in one of 4
* types:
* - Not_valid: Ignore these registers
* - Data: This type of register contains pri_base, fault_id etc
* - Enum: This type of register contains intr_enum, reset_enum
* - Engine_type: This type of register contains the engine name
* which is being described.
* 5. So, in the parsing code,
* - Loop through the array
* - Ignore the invalid entries
* - Store the “linked” register values in temporary variables
* until chain_bit is set. This helps us get all the data for
* particular engine type. [This is needed because the engine
* name may not be part of the first register representing the
* engine. So, reading the first register is not sufficient to
* determine if the group represents the \a engine_type.
* Chain_bit being disabled implies the next register read
* would represent a new engine.
* - Parse the stored variables to get engine_name,
* intr/reset_enums, pri base etc. Check if the engine
* type read from the table equals \a engine_type.
* *
* List of valid engine enumeration values: * Each table entry is a sequence of registers that SW can read. The table
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * format varies from chip to chip in subtle ways; this particular HAL
* NVGPU_ENGINE_GRAPHICS - 0 * is repsonsible for reading a single device from the table.
* NVGPU_ENGINE_COPY0 - 1
* NVGPU_ENGINE_COPY1 - 2
* NVGPU_ENGINE_COPY2 - 3
* NVGPU_ENGINE_IOCTRL - 18
* NVGPU_ENGINE_LCE - 19
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* *
* @return 0 in case of success and < 0 in case of failure * \a token is an opaque argument the parser can use for storing state
* as the table is parsed. This function is intented to be called
* repeatedly to parse all devices in the chip. It will return devices
* until there are no more devices to return at which point it will
* return NULL. To begine the parsing, \a token should be set to
* #NVGPU_DEVICE_TOKEN_INIT.
*
* @return A valid pointer to an nvgpu_device or NULL if no device was
* parsed or an error occured.
*/ */
int (*get_device_info)(struct gk20a *g, struct nvgpu_device *(*parse_next_device)(struct gk20a *g, u32 *token);
struct nvgpu_device_info *dev_info,
u32 engine_type, u32 inst_id);
/**
* @brief Checks if \a engine_type corresponds to graphics engine
*
* @param g [in] GPU device struct pointer
* @param engine_type [in] Engine enumeration value
*
* 1. This HAL checks if the input \a engine_type is the enumeration
* value corresponding to graphics engine.
* 2. The enumeration value for graphics engine for device_info table
* is 0.
*
* @return true if \a engine_type is equal to 0, false otherwise
*/
bool (*is_engine_gr)(struct gk20a *g, u32 engine_type);
/**
* @brief Checks if \a engine_type corresponds to copy engine
*
* @param g [in] GPU device struct pointer
* @param engine_type [in] Engine enumeration value
*
* 1. This HAL checks if the input \a engine_type is the enumeration value
* corresponding to copy engine.
* 2. Prior to Pascal, each instance of copy engine was denoted by
* different engine_type as below:
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* COPY_ENGINE_INSTANCE0 enum value - 1
* COPY_ENGINE_INSTANCE1 enum value - 2
* COPY_ENGINE_INSTANCE2 enum value - 3
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* 3. For Pascal and chips beyond, all instances of copy engine have same
* engine_type as below:
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* COPY_ENGINE enum value - 19
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* @return true if \a engine_type is equal to enum value specified above
* or false otherwise
*/
bool (*is_engine_ce)(struct gk20a *g, u32 engine_type);
/** /**
* @brief Get the instance ID for particular copy engine * @brief Get the instance ID for particular copy engine
@@ -306,10 +201,6 @@ struct gops_top {
int (*device_info_parse_data)(struct gk20a *g, int (*device_info_parse_data)(struct gk20a *g,
u32 table_entry, u32 *inst_id, u32 table_entry, u32 *inst_id,
u32 *pri_base, u32 *fault_id); u32 *pri_base, u32 *fault_id);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#include "include/nvgpu/nvgpu_next_gops_top.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */ /** @endcond DOXYGEN_SHOULD_SKIP_THIS */
}; };

View File

@@ -67,6 +67,7 @@ enum nvgpu_log_type {
#define gpu_dbg_event BIT(27) /* Events to User debugging. */ #define gpu_dbg_event BIT(27) /* Events to User debugging. */
#define gpu_dbg_vsrv BIT(28) /* server debugging. */ #define gpu_dbg_vsrv BIT(28) /* server debugging. */
#define gpu_dbg_mem BIT(31) /* memory accesses; very verbose. */ #define gpu_dbg_mem BIT(31) /* memory accesses; very verbose. */
#define gpu_dbg_device BIT(32) /* Device initialization and
querying. */
#endif #endif

View File

@@ -49,7 +49,6 @@ gm20b_fb_read_wpr_info
gm20b_fb_tlb_invalidate gm20b_fb_tlb_invalidate
gm20b_fb_vpr_info_fetch gm20b_fb_vpr_info_fetch
gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id
gm20b_is_engine_gr
gm20b_mm_get_big_page_sizes gm20b_mm_get_big_page_sizes
gm20b_pbdma_acquire_val gm20b_pbdma_acquire_val
gm20b_pbdma_device_fatal_0_intr_descs gm20b_pbdma_device_fatal_0_intr_descs
@@ -95,9 +94,6 @@ gp10b_pbdma_get_fc_runlist_timeslice
gp10b_pbdma_get_signature gp10b_pbdma_get_signature
gp10b_ramin_init_pdb gp10b_ramin_init_pdb
gp10b_engine_init_ce_info gp10b_engine_init_ce_info
gp10b_get_num_engine_type_entries
gp10b_get_device_info
gp10b_is_engine_ce
gp10b_priv_ring_isr gp10b_priv_ring_isr
gp10b_priv_ring_decode_error_code gp10b_priv_ring_decode_error_code
gp10b_ramfc_commit_userd gp10b_ramfc_commit_userd
@@ -351,7 +347,6 @@ nvgpu_ecc_init_support
nvgpu_engine_act_interrupt_mask nvgpu_engine_act_interrupt_mask
nvgpu_engine_check_valid_id nvgpu_engine_check_valid_id
nvgpu_engine_cleanup_sw nvgpu_engine_cleanup_sw
nvgpu_engine_enum_from_type
nvgpu_engine_find_busy_doing_ctxsw nvgpu_engine_find_busy_doing_ctxsw
nvgpu_engine_get_active_eng_info nvgpu_engine_get_active_eng_info
nvgpu_engine_get_all_ce_reset_mask nvgpu_engine_get_all_ce_reset_mask

View File

@@ -49,7 +49,6 @@ gm20b_fb_read_wpr_info
gm20b_fb_tlb_invalidate gm20b_fb_tlb_invalidate
gm20b_fb_vpr_info_fetch gm20b_fb_vpr_info_fetch
gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id
gm20b_is_engine_gr
gm20b_mm_get_big_page_sizes gm20b_mm_get_big_page_sizes
gm20b_pbdma_acquire_val gm20b_pbdma_acquire_val
gm20b_pbdma_device_fatal_0_intr_descs gm20b_pbdma_device_fatal_0_intr_descs
@@ -95,9 +94,6 @@ gp10b_pbdma_get_fc_runlist_timeslice
gp10b_pbdma_get_signature gp10b_pbdma_get_signature
gp10b_ramin_init_pdb gp10b_ramin_init_pdb
gp10b_engine_init_ce_info gp10b_engine_init_ce_info
gp10b_get_num_engine_type_entries
gp10b_get_device_info
gp10b_is_engine_ce
gp10b_priv_ring_isr gp10b_priv_ring_isr
gp10b_priv_ring_decode_error_code gp10b_priv_ring_decode_error_code
gp10b_ramfc_commit_userd gp10b_ramfc_commit_userd
@@ -348,6 +344,7 @@ nvgpu_current_time_ms
nvgpu_current_time_ns nvgpu_current_time_ns
nvgpu_current_time_us nvgpu_current_time_us
nvgpu_detect_chip nvgpu_detect_chip
nvgpu_device_init
nvgpu_dma_alloc nvgpu_dma_alloc
nvgpu_dma_alloc_get_fault_injection nvgpu_dma_alloc_get_fault_injection
nvgpu_dma_alloc_flags_sys nvgpu_dma_alloc_flags_sys
@@ -362,7 +359,6 @@ nvgpu_ecc_init_support
nvgpu_engine_act_interrupt_mask nvgpu_engine_act_interrupt_mask
nvgpu_engine_check_valid_id nvgpu_engine_check_valid_id
nvgpu_engine_cleanup_sw nvgpu_engine_cleanup_sw
nvgpu_engine_enum_from_type
nvgpu_engine_find_busy_doing_ctxsw nvgpu_engine_find_busy_doing_ctxsw
nvgpu_engine_get_active_eng_info nvgpu_engine_get_active_eng_info
nvgpu_engine_get_all_ce_reset_mask nvgpu_engine_get_all_ce_reset_mask

View File

@@ -71,82 +71,15 @@ static inline void subtest_setup(struct unit_module *m, u32 branches)
u.branches = branches; u.branches = branches;
} }
#define F_ENGINE_INIT_CE_INFO_GET_NUM_ENGINES_NULL BIT(0) #define F_ENGINE_INIT_CE_INFO_NO_LCE BIT(0)
#define F_ENGINE_INIT_CE_INFO_NO_LCE BIT(1) #define F_ENGINE_INIT_CE_INFO_GET_DEV_INFO_FAIL BIT(1)
#define F_ENGINE_INIT_CE_INFO_GET_DEV_INFO_FAIL BIT(2) #define F_ENGINE_INIT_CE_INFO_PBDMA_FIND_FAIL BIT(2)
#define F_ENGINE_INIT_CE_INFO_PBDMA_FIND_FAIL BIT(3) #define F_ENGINE_INIT_CE_INFO_ASYNC_CE BIT(3)
#define F_ENGINE_INIT_CE_INFO_ASYNC_CE BIT(4) #define F_ENGINE_INIT_CE_INFO_GRCE BIT(4)
#define F_ENGINE_INIT_CE_INFO_GRCE BIT(5) #define F_ENGINE_INIT_CE_INFO_FAULT_ID_0 BIT(5)
#define F_ENGINE_INIT_CE_INFO_FAULT_ID_0 BIT(6) #define F_ENGINE_INIT_CE_INFO_GET_INST_NULL BIT(6)
#define F_ENGINE_INIT_CE_INFO_GET_INST_NULL BIT(7) #define F_ENGINE_INIT_CE_INFO_INVAL_ENUM BIT(7)
#define F_ENGINE_INIT_CE_INFO_INVAL_ENUM BIT(8) #define F_ENGINE_INIT_CE_INFO_LAST BIT(8)
#define F_ENGINE_INIT_CE_INFO_LAST BIT(9)
static u32 wrap_top_get_num_engine_type_entries(struct gk20a *g,
u32 engine_type)
{
u32 branches = u.branches;
if (engine_type != NVGPU_ENGINE_LCE)
goto done;
if (branches & F_ENGINE_INIT_CE_INFO_NO_LCE) {
return 0;
}
if ((branches & F_ENGINE_INIT_CE_INFO_GRCE) ||
(branches & F_ENGINE_INIT_CE_INFO_ASYNC_CE)) {
return 1;
}
done:
return u.gops.top.get_num_engine_type_entries(g, engine_type);
}
static int wrap_top_get_device_info(struct gk20a *g,
struct nvgpu_device_info *dev_info,
u32 engine_type, u32 inst_id)
{
u32 branches = u.branches;
if (engine_type != NVGPU_ENGINE_LCE)
goto done;
if (branches & F_ENGINE_INIT_CE_INFO_GET_DEV_INFO_FAIL) {
return -EINVAL;
}
if (branches & F_ENGINE_INIT_CE_INFO_FAULT_ID_0) {
dev_info->fault_id = 0;
} else {
dev_info->fault_id = 1;
}
if (branches & F_ENGINE_INIT_CE_INFO_GRCE) {
dev_info->runlist_id = nvgpu_engine_get_gr_runlist_id(g);
dev_info->engine_id = 1;
dev_info->engine_type = top_device_info_type_enum_lce_v();
return 0;
}
if (branches & F_ENGINE_INIT_CE_INFO_ASYNC_CE) {
dev_info->runlist_id = 1;
dev_info->engine_id = 1;
dev_info->engine_type = top_device_info_type_enum_lce_v();
return 0;
}
if (branches & F_ENGINE_INIT_CE_INFO_INVAL_ENUM) {
dev_info->runlist_id = 1;
dev_info->engine_id = 1;
dev_info->engine_type = 5;
return 0;
}
done:
return u.gops.top.get_device_info(g, dev_info, engine_type, inst_id);
}
static bool wrap_pbdma_find_for_runlist(struct gk20a *g, static bool wrap_pbdma_find_for_runlist(struct gk20a *g,
u32 runlist_id, u32 *pbdma_id) u32 runlist_id, u32 *pbdma_id)
@@ -176,7 +109,6 @@ int test_gp10b_engine_init_ce_info(struct unit_module *m,
F_ENGINE_INIT_CE_INFO_GET_DEV_INFO_FAIL | F_ENGINE_INIT_CE_INFO_GET_DEV_INFO_FAIL |
F_ENGINE_INIT_CE_INFO_PBDMA_FIND_FAIL; F_ENGINE_INIT_CE_INFO_PBDMA_FIND_FAIL;
const char *labels[] = { const char *labels[] = {
"get_num_engines_null",
"no_lce", "no_lce",
"get_dev_info_fail", "get_dev_info_fail",
"pbdma_find_fail", "pbdma_find_fail",
@@ -187,7 +119,6 @@ int test_gp10b_engine_init_ce_info(struct unit_module *m,
"inval_enum" "inval_enum"
}; };
u32 prune = u32 prune =
F_ENGINE_INIT_CE_INFO_GET_NUM_ENGINES_NULL |
F_ENGINE_INIT_CE_INFO_NO_LCE | F_ENGINE_INIT_CE_INFO_NO_LCE |
F_ENGINE_INIT_CE_INFO_INVAL_ENUM | fail; F_ENGINE_INIT_CE_INFO_INVAL_ENUM | fail;
u32 branches = 0; u32 branches = 0;
@@ -197,10 +128,9 @@ int test_gp10b_engine_init_ce_info(struct unit_module *m,
u.gops = g->ops; u.gops = g->ops;
unit_assert(f->num_engines > 0, goto done); unit_assert(f->num_engines > 0, goto done);
unit_assert(f->engine_info[0].engine_enum == NVGPU_ENGINE_GR, unit_assert(f->engine_info[0].engine_enum == NVGPU_DEVTYPE_GRAPHICS,
goto done); goto done);
g->ops.top.get_device_info = wrap_top_get_device_info;
g->ops.pbdma.find_for_runlist = wrap_pbdma_find_for_runlist; g->ops.pbdma.find_for_runlist = wrap_pbdma_find_for_runlist;
for (branches = 0U; branches < F_ENGINE_INIT_CE_INFO_LAST; branches++) { for (branches = 0U; branches < F_ENGINE_INIT_CE_INFO_LAST; branches++) {
@@ -214,10 +144,6 @@ int test_gp10b_engine_init_ce_info(struct unit_module *m,
unit_verbose(m, "%s branches=%s\n", __func__, unit_verbose(m, "%s branches=%s\n", __func__,
branches_str(branches, labels)); branches_str(branches, labels));
g->ops.top.get_num_engine_type_entries =
branches & F_ENGINE_INIT_CE_INFO_GET_NUM_ENGINES_NULL ?
NULL : wrap_top_get_num_engine_type_entries;
g->ops.top.get_ce_inst_id = g->ops.top.get_ce_inst_id =
branches & F_ENGINE_INIT_CE_INFO_GET_INST_NULL ? branches & F_ENGINE_INIT_CE_INFO_GET_INST_NULL ?
NULL : wrap_top_get_ce_inst_id; NULL : wrap_top_get_ce_inst_id;
@@ -227,12 +153,9 @@ int test_gp10b_engine_init_ce_info(struct unit_module *m,
err = gp10b_engine_init_ce_info(f); err = gp10b_engine_init_ce_info(f);
if ((branches & F_ENGINE_INIT_CE_INFO_GET_NUM_ENGINES_NULL) || if ((branches & F_ENGINE_INIT_CE_INFO_NO_LCE) ||
(branches & F_ENGINE_INIT_CE_INFO_NO_LCE) ||
(branches & F_ENGINE_INIT_CE_INFO_INVAL_ENUM)) { (branches & F_ENGINE_INIT_CE_INFO_INVAL_ENUM)) {
num_lce = 0; num_lce = 0;
} else {
num_lce = g->ops.top.get_num_engine_type_entries(g, NVGPU_ENGINE_LCE);
} }
if (branches & fail) { if (branches & fail) {

View File

@@ -177,13 +177,6 @@ done:
#define F_ENGINE_INIT_INFO_INIT_CE_FAIL BIT(3) #define F_ENGINE_INIT_INFO_INIT_CE_FAIL BIT(3)
#define F_ENGINE_INIT_INFO_LAST BIT(4) #define F_ENGINE_INIT_INFO_LAST BIT(4)
static int stub_top_get_device_info_EINVAL(struct gk20a *g,
struct nvgpu_device_info *dev_info,
u32 engine_type, u32 inst_id)
{
return -EINVAL;
}
static bool stub_pbdma_find_for_runlist_none(struct gk20a *g, static bool stub_pbdma_find_for_runlist_none(struct gk20a *g,
u32 runlist_id, u32 *pbdma_id) u32 runlist_id, u32 *pbdma_id)
{ {
@@ -228,15 +221,6 @@ int test_engine_init_info(struct unit_module *m,
unit_verbose(m, "%s branches=%s\n", __func__, unit_verbose(m, "%s branches=%s\n", __func__,
branches_str(branches, labels)); branches_str(branches, labels));
if (branches & F_ENGINE_INIT_INFO_GET_DEV_INFO_NULL) {
g->ops.top.get_device_info = NULL;
} else {
g->ops.top.get_device_info =
branches & F_ENGINE_INIT_INFO_GET_DEV_INFO_FAIL ?
stub_top_get_device_info_EINVAL :
gops.top.get_device_info;
}
g->ops.pbdma.find_for_runlist = g->ops.pbdma.find_for_runlist =
branches & F_ENGINE_INIT_INFO_PBDMA_FIND_FAIL ? branches & F_ENGINE_INIT_INFO_PBDMA_FIND_FAIL ?
stub_pbdma_find_for_runlist_none : stub_pbdma_find_for_runlist_none :
@@ -445,41 +429,6 @@ done:
return ret; return ret;
} }
int test_engine_enum_from_type(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
int engine_enum;
struct gpu_ops gops = g->ops;
engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_graphics_v());
unit_assert(engine_enum == NVGPU_ENGINE_GR, goto done);
engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_lce_v());
unit_assert(engine_enum == NVGPU_ENGINE_ASYNC_CE, goto done);
engine_enum = nvgpu_engine_enum_from_type(g, 0xff);
unit_assert(engine_enum == NVGPU_ENGINE_INVAL, goto done);
g->ops.top.is_engine_gr = NULL;
engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_graphics_v());
unit_assert(engine_enum == NVGPU_ENGINE_INVAL, goto done);
g->ops = gops;
g->ops.top.is_engine_ce = NULL;
engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_graphics_v());
unit_assert(engine_enum == NVGPU_ENGINE_INVAL, goto done);
ret = UNIT_SUCCESS;
done:
g->ops = gops;
return ret;
}
int test_engine_interrupt_mask(struct unit_module *m, int test_engine_interrupt_mask(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
{ {
@@ -980,7 +929,6 @@ struct unit_module_test nvgpu_engine_tests[] = {
UNIT_TEST(init_info, test_engine_init_info, &u, 0), UNIT_TEST(init_info, test_engine_init_info, &u, 0),
UNIT_TEST(ids, test_engine_ids, &u, 0), UNIT_TEST(ids, test_engine_ids, &u, 0),
UNIT_TEST(get_active_eng_info, test_engine_get_active_eng_info, &u, 0), UNIT_TEST(get_active_eng_info, test_engine_get_active_eng_info, &u, 0),
UNIT_TEST(enum_from_type, test_engine_enum_from_type, &u, 0),
UNIT_TEST(interrupt_mask, test_engine_interrupt_mask, &u, 0), UNIT_TEST(interrupt_mask, test_engine_interrupt_mask, &u, 0),
UNIT_TEST(get_fast_ce_runlist_id, UNIT_TEST(get_fast_ce_runlist_id,
test_engine_get_fast_ce_runlist_id, &u, 0), test_engine_get_fast_ce_runlist_id, &u, 0),

View File

@@ -138,41 +138,12 @@ static void reset_ctx(void)
u.priv_ring_isr = false; u.priv_ring_isr = false;
} }
/*
* Replacement functions that can be assigned to function pointers
*/
static int mock_get_device_info(struct gk20a *g,
struct nvgpu_device_info *dev_info,
u32 engine_type, u32 inst_id)
{
if (engine_type == NVGPU_ENGINE_GRAPHICS) {
dev_info->intr_id = ACTIVE_GR_ID;
dev_info->engine_id = 0;
dev_info->engine_type = 0;
} else if (engine_type == NVGPU_ENGINE_LCE) {
dev_info->intr_id = ACTIVE_CE_ID;
dev_info->engine_id = 1;
dev_info->engine_type = 0x13;
dev_info->reset_id = ffs(mc_enable_ce2_enabled_f()) - 1;
}
return 0;
}
static bool mock_pbdma_find_for_runlist(struct gk20a *g, u32 runlist_id, static bool mock_pbdma_find_for_runlist(struct gk20a *g, u32 runlist_id,
u32 *pbdma_id) u32 *pbdma_id)
{ {
return true; return true;
} }
static u32 mock_get_num_engine_type_entries(struct gk20a *g, u32 engine_type)
{
if (engine_type == NVGPU_ENGINE_LCE) {
return 1;
}
return 0;
}
static void mock_bus_isr(struct gk20a *g) static void mock_bus_isr(struct gk20a *g)
{ {
u.bus_isr = true; u.bus_isr = true;
@@ -259,10 +230,7 @@ int test_setup_env(struct unit_module *m,
} }
/* override HALs */ /* override HALs */
g->ops.top.get_device_info = mock_get_device_info;
g->ops.pbdma.find_for_runlist = mock_pbdma_find_for_runlist; g->ops.pbdma.find_for_runlist = mock_pbdma_find_for_runlist;
g->ops.top.get_num_engine_type_entries =
mock_get_num_engine_type_entries;
g->ops.bus.isr = mock_bus_isr; g->ops.bus.isr = mock_bus_isr;
g->ops.ce.isr_stall = mock_ce_stall_isr; g->ops.ce.isr_stall = mock_ce_stall_isr;
g->ops.ce.isr_nonstall = mock_ce_nonstall_isr; g->ops.ce.isr_nonstall = mock_ce_nonstall_isr;
@@ -448,17 +416,6 @@ int test_pause_resume_mask(struct unit_module *m, struct gk20a *g, void *args)
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }
static void switch_ce_engine_type(struct nvgpu_engine_info *info)
{
if (info->engine_enum == NVGPU_ENGINE_ASYNC_CE) {
info->engine_enum = NVGPU_ENGINE_GRCE;
} else if (info->engine_enum == NVGPU_ENGINE_GRCE) {
info->engine_enum = NVGPU_ENGINE_ASYNC_CE;
} else {
BUG();
}
}
int test_intr_stall(struct unit_module *m, struct gk20a *g, void *args) int test_intr_stall(struct unit_module *m, struct gk20a *g, void *args)
{ {
u32 i, pend, val; u32 i, pend, val;
@@ -571,18 +528,6 @@ int test_isr_stall(struct unit_module *m, struct gk20a *g, void *args)
g->ops.mc.isr_stall(g); g->ops.mc.isr_stall(g);
g->ops.ce.isr_stall = mock_ce_stall_isr; g->ops.ce.isr_stall = mock_ce_stall_isr;
/* for branch coverage set CE engine to other type */
switch_ce_engine_type(&g->fifo.engine_info[1]);
for (i = 0; i < NUM_MC_UNITS; i++) {
intrs_pending |= mc_units[i].bit;
}
nvgpu_posix_io_writel_reg_space(g, STALL_PENDING_REG, intrs_pending);
reset_ctx();
g->ops.mc.isr_stall(g);
if (!u.ce_isr) {
unit_return_fail(m, "ISR not called\n");
}
/* /*
* for branch coverage set LTC intr in main intr reg, but not ltc * for branch coverage set LTC intr in main intr reg, but not ltc
* intr reg * intr reg
@@ -669,18 +614,6 @@ int test_isr_nonstall(struct unit_module *m, struct gk20a *g, void *args)
g->ops.mc.isr_nonstall(g); g->ops.mc.isr_nonstall(g);
g->ops.ce.isr_nonstall = mock_ce_nonstall_isr; g->ops.ce.isr_nonstall = mock_ce_nonstall_isr;
/* for branch coverage set CE engine to the opposite type */
switch_ce_engine_type(&g->fifo.engine_info[1]);
for (i = 0; i < NUM_MC_UNITS; i++) {
intrs_pending |= mc_units[i].bit;
}
nvgpu_posix_io_writel_reg_space(g, NONSTALL_PENDING_REG, intrs_pending);
reset_ctx();
g->ops.mc.isr_nonstall(g);
if (!u.ce_isr) {
unit_return_fail(m, "ISR not called\n");
}
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }

View File

@@ -76,11 +76,6 @@ int test_top_setup(struct unit_module *m, struct gk20a *g, void *args)
/* Init HAL */ /* Init HAL */
g->ops.top.device_info_parse_enum = gm20b_device_info_parse_enum; g->ops.top.device_info_parse_enum = gm20b_device_info_parse_enum;
g->ops.top.device_info_parse_data = gv11b_device_info_parse_data; g->ops.top.device_info_parse_data = gv11b_device_info_parse_data;
g->ops.top.get_num_engine_type_entries =
gp10b_get_num_engine_type_entries;
g->ops.top.get_device_info = gp10b_get_device_info;
g->ops.top.is_engine_gr = gm20b_is_engine_gr;
g->ops.top.is_engine_ce = gp10b_is_engine_ce;
g->ops.top.get_max_gpc_count = gm20b_top_get_max_gpc_count; g->ops.top.get_max_gpc_count = gm20b_top_get_max_gpc_count;
g->ops.top.get_max_tpc_per_gpc_count = g->ops.top.get_max_tpc_per_gpc_count =
gm20b_top_get_max_tpc_per_gpc_count; gm20b_top_get_max_tpc_per_gpc_count;
@@ -235,28 +230,6 @@ int test_get_max_gpc_count(struct unit_module *m, struct gk20a *g,
return ret; return ret;
} }
int test_is_engine_gr(struct unit_module *m, struct gk20a *g, void *args)
{
int ret = UNIT_SUCCESS;
bool val;
/* Set engine_type = 0 = graphics_enum */
val = g->ops.top.is_engine_gr(g, 0U);
if (!val) {
unit_err(m, "API to check if engine is GR is incorrect.\n");
ret = UNIT_FAIL;
}
/* Set engine_type = 1 != graphics_enum */
val = g->ops.top.is_engine_gr(g, 1U);
if (val) {
unit_err(m, "API to check if engine is GR is incorrect.\n");
ret = UNIT_FAIL;
}
return ret;
}
int test_get_max_tpc_per_gpc_count(struct unit_module *m, struct gk20a *g, int test_get_max_tpc_per_gpc_count(struct unit_module *m, struct gk20a *g,
void *args) void *args)
{ {
@@ -467,103 +440,6 @@ int test_device_info_parse_data(struct unit_module *m, struct gk20a *g,
return ret; return ret;
} }
int test_get_num_engine_type_entries(struct unit_module *m, struct gk20a *g,
void *args)
{
int ret = UNIT_SUCCESS;
int val = 0;
u32 engine_type = 19U;
/* The device_info table is setup during test_setup(). We directly call
* get_num_engine_type_entries HAL to parse number of copy engine
* related entries in the device_info table.
*/
val = g->ops.top.get_num_engine_type_entries(g, engine_type);
if (val != 2) {
unit_err(m, "top.get_num_engine_type_entries() failed.\n");
ret = UNIT_FAIL;
}
return ret;
}
int test_get_device_info(struct unit_module *m, struct gk20a *g, void *args)
{
int ret = UNIT_SUCCESS;
int val = 0;
struct nvgpu_device_info dev_info_1;
struct nvgpu_device_info *dev_info_2 = NULL;
u32 engine_type = 19U;
u32 inst_id = 3U;
/* The device_info table is setup during test_setup(). We directly call
* get_device_info HAL to parse copy engine related information from the
* device_info table.
*/
val = g->ops.top.get_device_info(g, &dev_info_1, engine_type, inst_id);
if (val != 0) {
unit_err(m, "Call to top.get_device_info() failed.\n");
ret = UNIT_FAIL;
}
/* Call HAL again to cover the error paths due to incorrect entry */
inst_id = 2U;
val = g->ops.top.get_device_info(g, &dev_info_1, engine_type, inst_id);
/* Verify if the retval is as expected */
if (val != -EINVAL) {
unit_err(m,
"get_device_info() failed to handle incorrect entry.\n");
ret = UNIT_FAIL;
}
/* Call top.get_device_info with NULL pointer to cover error path */
val = g->ops.top.get_device_info(g, dev_info_2, engine_type, inst_id);
/* Verify if the retval is as expected */
if (val != -EINVAL) {
unit_err(m,
"get_device_info() failed to handle NULL pointer.\n");
ret = UNIT_FAIL;
}
/* Call top.get_device_info with NULL function pointers */
g->ops.top.device_info_parse_enum = NULL;
g->ops.top.device_info_parse_data = NULL;
val = g->ops.top.get_device_info(g, &dev_info_1, engine_type, inst_id);
if (val != -EINVAL) {
unit_err(m,
"get_device_info() failed to handle NULL function pointers.\n");
ret = UNIT_FAIL;
}
return ret;
}
int test_is_engine_ce(struct unit_module *m, struct gk20a *g, void *args)
{
int ret = UNIT_SUCCESS;
bool val;
/* Set engine_type = 19 = copy engine enum */
val = g->ops.top.is_engine_ce(g, 19U);
if (!val) {
unit_err(m, "API to check if engine is CE is incorrect.\n");
ret = UNIT_FAIL;
}
/* Set engine_type = 0 != copy engine enum */
val = g->ops.top.is_engine_ce(g, 1U);
if (val) {
unit_err(m, "API to check if engine is CE is incorrect.\n");
ret = UNIT_FAIL;
}
return ret;
}
int test_get_num_lce(struct unit_module *m, struct gk20a *g, void *args) int test_get_num_lce(struct unit_module *m, struct gk20a *g, void *args)
{ {
int ret = UNIT_SUCCESS; int ret = UNIT_SUCCESS;
@@ -590,9 +466,6 @@ int test_get_num_lce(struct unit_module *m, struct gk20a *g, void *args)
struct unit_module_test top_tests[] = { struct unit_module_test top_tests[] = {
UNIT_TEST(top_setup, test_top_setup, NULL, 0), UNIT_TEST(top_setup, test_top_setup, NULL, 0),
UNIT_TEST(top_device_info_parse_enum,
test_device_info_parse_enum, NULL, 0),
UNIT_TEST(top_is_engine_gr, test_is_engine_gr, NULL, 0),
UNIT_TEST(top_get_max_gpc_count, test_get_max_gpc_count, NULL, 0), UNIT_TEST(top_get_max_gpc_count, test_get_max_gpc_count, NULL, 0),
UNIT_TEST(top_get_max_tpc_per_gpc_count, UNIT_TEST(top_get_max_tpc_per_gpc_count,
test_get_max_tpc_per_gpc_count, NULL, 0), test_get_max_tpc_per_gpc_count, NULL, 0),
@@ -604,10 +477,6 @@ struct unit_module_test top_tests[] = {
UNIT_TEST(top_get_num_ltcs, test_get_num_ltcs, NULL, 0), UNIT_TEST(top_get_num_ltcs, test_get_num_ltcs, NULL, 0),
UNIT_TEST(top_device_info_parse_data, UNIT_TEST(top_device_info_parse_data,
test_device_info_parse_data, NULL, 0), test_device_info_parse_data, NULL, 0),
UNIT_TEST(top_get_num_engine_type_entries,
test_get_num_engine_type_entries, NULL, 0),
UNIT_TEST(top_get_device_info, test_get_device_info, NULL, 0),
UNIT_TEST(top_is_engine_ce, test_is_engine_ce, NULL, 0),
UNIT_TEST(top_get_num_lce, test_get_num_lce, NULL, 0), UNIT_TEST(top_get_num_lce, test_get_num_lce, NULL, 0),
UNIT_TEST(top_free_reg_space, test_top_free_reg_space, NULL, 0), UNIT_TEST(top_free_reg_space, test_top_free_reg_space, NULL, 0),
}; };