mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Rework engine management to work with vGPU
Currently the vGPU engine management rewrites a lot of the common device agnostic engine management code. With the new top HAL parsing one device at a time, it is now more easily possible to tie the vGPU into the new common device framework by implementing the top HAL but with the vGPU engine list backend. This lets the vGPU inherit all the common engine and device management code. By doing so the vGPU HAL need only implement a trivial and simple HAL. This also gets us a step closer to merging all of the CE init code: logically it just iterates through all CE engines whatever they may be. The only reason this differs between chips is because of the swap from CE0-2 to LCEs in the Pascal generation. This could be abstracted by the unit code easily enough. Also, the pbdma_id for each engine has to be added to the device struct. Eventually this was going to happen anyway, since the device struct will soon replace the nvgpu_engine_info struct. It's a little bit of an abuse but might be worth it long term. If not, it should not be difficult to replace uses of dev->pbdma_id with a proper lookup of PBDMA ID based on the device info. JIRA NVGPU-5421 Change-Id: Ie8dcd3b0150184d58ca0f78940c2e7ca72994e64 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2351877 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -22,14 +22,14 @@ all:
|
|||||||
common/vgpu/pm_reservation_vgpu.h,
|
common/vgpu/pm_reservation_vgpu.h,
|
||||||
common/vgpu/ecc_vgpu.c,
|
common/vgpu/ecc_vgpu.c,
|
||||||
common/vgpu/ecc_vgpu.h,
|
common/vgpu/ecc_vgpu.h,
|
||||||
|
common/vgpu/top/top_vgpu.c,
|
||||||
|
common/vgpu/top/top_vgpu.h,
|
||||||
common/vgpu/fifo/fifo_vgpu.c,
|
common/vgpu/fifo/fifo_vgpu.c,
|
||||||
common/vgpu/fifo/fifo_vgpu.h,
|
common/vgpu/fifo/fifo_vgpu.h,
|
||||||
common/vgpu/fifo/channel_vgpu.c,
|
common/vgpu/fifo/channel_vgpu.c,
|
||||||
common/vgpu/fifo/channel_vgpu.h,
|
common/vgpu/fifo/channel_vgpu.h,
|
||||||
common/vgpu/fifo/tsg_vgpu.c,
|
common/vgpu/fifo/tsg_vgpu.c,
|
||||||
common/vgpu/fifo/tsg_vgpu.h,
|
common/vgpu/fifo/tsg_vgpu.h,
|
||||||
common/vgpu/fifo/engines_vgpu.c,
|
|
||||||
common/vgpu/fifo/engines_vgpu.h,
|
|
||||||
common/vgpu/fifo/preempt_vgpu.c,
|
common/vgpu/fifo/preempt_vgpu.c,
|
||||||
common/vgpu/fifo/preempt_vgpu.h,
|
common/vgpu/fifo/preempt_vgpu.h,
|
||||||
common/vgpu/fifo/ramfc_vgpu.c,
|
common/vgpu/fifo/ramfc_vgpu.c,
|
||||||
|
|||||||
@@ -553,10 +553,10 @@ nvgpu-$(CONFIG_NVGPU_GR_VIRTUALIZATION) += \
|
|||||||
common/vgpu/gr/gr_vgpu.o \
|
common/vgpu/gr/gr_vgpu.o \
|
||||||
common/vgpu/gr/ctx_vgpu.o \
|
common/vgpu/gr/ctx_vgpu.o \
|
||||||
common/vgpu/gr/subctx_vgpu.o \
|
common/vgpu/gr/subctx_vgpu.o \
|
||||||
|
common/vgpu/top/top_vgpu.o \
|
||||||
common/vgpu/fifo/fifo_vgpu.o \
|
common/vgpu/fifo/fifo_vgpu.o \
|
||||||
common/vgpu/fifo/channel_vgpu.o \
|
common/vgpu/fifo/channel_vgpu.o \
|
||||||
common/vgpu/fifo/tsg_vgpu.o \
|
common/vgpu/fifo/tsg_vgpu.o \
|
||||||
common/vgpu/fifo/engines_vgpu.o \
|
|
||||||
common/vgpu/fifo/preempt_vgpu.o \
|
common/vgpu/fifo/preempt_vgpu.o \
|
||||||
common/vgpu/fifo/runlist_vgpu.o \
|
common/vgpu/fifo/runlist_vgpu.o \
|
||||||
common/vgpu/fifo/ramfc_vgpu.o \
|
common/vgpu/fifo/ramfc_vgpu.o \
|
||||||
|
|||||||
@@ -535,10 +535,10 @@ srcs += common/vgpu/init/init_vgpu.c \
|
|||||||
common/vgpu/ivc/comm_vgpu.c \
|
common/vgpu/ivc/comm_vgpu.c \
|
||||||
common/vgpu/intr/intr_vgpu.c \
|
common/vgpu/intr/intr_vgpu.c \
|
||||||
common/vgpu/ptimer/ptimer_vgpu.c \
|
common/vgpu/ptimer/ptimer_vgpu.c \
|
||||||
|
common/vgpu/top/top_vgpu.c \
|
||||||
common/vgpu/fifo/fifo_vgpu.c \
|
common/vgpu/fifo/fifo_vgpu.c \
|
||||||
common/vgpu/fifo/channel_vgpu.c \
|
common/vgpu/fifo/channel_vgpu.c \
|
||||||
common/vgpu/fifo/tsg_vgpu.c \
|
common/vgpu/fifo/tsg_vgpu.c \
|
||||||
common/vgpu/fifo/engines_vgpu.c \
|
|
||||||
common/vgpu/fifo/preempt_vgpu.c \
|
common/vgpu/fifo/preempt_vgpu.c \
|
||||||
common/vgpu/fifo/runlist_vgpu.c \
|
common/vgpu/fifo/runlist_vgpu.c \
|
||||||
common/vgpu/fifo/ramfc_vgpu.c \
|
common/vgpu/fifo/ramfc_vgpu.c \
|
||||||
|
|||||||
@@ -2361,8 +2361,7 @@ int nvgpu_channel_deferred_reset_engines(struct gk20a *g,
|
|||||||
|
|
||||||
tsg = nvgpu_tsg_from_ch(ch);
|
tsg = nvgpu_tsg_from_ch(ch);
|
||||||
if (tsg != NULL) {
|
if (tsg != NULL) {
|
||||||
engines = nvgpu_engine_get_mask_on_id(g,
|
engines = nvgpu_engine_get_mask_on_id(g, tsg->tsgid, true);
|
||||||
tsg->tsgid, true);
|
|
||||||
} else {
|
} else {
|
||||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -516,7 +516,7 @@ int nvgpu_engine_setup_sw(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
(void) memset(f->active_engines_list, 0xff, size);
|
(void) memset(f->active_engines_list, 0xff, size);
|
||||||
|
|
||||||
err = g->ops.engine.init_info(f);
|
err = nvgpu_engine_init_info(f);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "init engine info failed");
|
nvgpu_err(g, "init engine info failed");
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
@@ -795,60 +795,94 @@ u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg)
|
|||||||
return engines;
|
return engines;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvgpu_engine_init_from_device_info(struct gk20a *g,
|
||||||
|
struct nvgpu_engine_info *info,
|
||||||
|
const struct nvgpu_device *dev)
|
||||||
|
{
|
||||||
|
bool found;
|
||||||
|
struct nvgpu_device *dev_rw = (struct nvgpu_device *)dev;
|
||||||
|
|
||||||
|
info->engine_id = dev->engine_id;
|
||||||
|
info->intr_mask |= BIT32(dev->intr_id);
|
||||||
|
info->reset_mask |= BIT32(dev->reset_id);
|
||||||
|
info->runlist_id = dev->runlist_id;
|
||||||
|
info->inst_id = dev->inst_id;
|
||||||
|
info->pri_base = dev->pri_base;
|
||||||
|
info->engine_enum = nvgpu_engine_enum_from_dev(g, dev);
|
||||||
|
info->fault_id = dev->fault_id;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Populate the PBDMA info for this device; ideally it'd be done during
|
||||||
|
* device init, but the FIFO unit is not out of reset that early in the
|
||||||
|
* nvgpu_finalize_poweron() sequence.
|
||||||
|
*
|
||||||
|
* We only need to do this for native; vGPU already has pbdma_id populated
|
||||||
|
* during device initialization.
|
||||||
|
*/
|
||||||
|
if (g->ops.fifo.find_pbdma_for_runlist != NULL) {
|
||||||
|
found = g->ops.fifo.find_pbdma_for_runlist(g,
|
||||||
|
dev->runlist_id,
|
||||||
|
&dev_rw->pbdma_id);
|
||||||
|
if (!found) {
|
||||||
|
nvgpu_err(g, "busted pbdma map");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info->pbdma_id = dev->pbdma_id;
|
||||||
|
|
||||||
|
#if defined(CONFIG_NVGPU_NEXT)
|
||||||
|
return nvgpu_next_engine_init_from_device_info(g, info, dev);
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nvgpu_engine_populate_gr_info(struct nvgpu_fifo *f,
|
||||||
|
u32 gr_inst)
|
||||||
|
{
|
||||||
|
struct gk20a *g = f->g;
|
||||||
|
const struct nvgpu_device *dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
dev = nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS, gr_inst);
|
||||||
|
if (dev == NULL) {
|
||||||
|
nvgpu_err(g, "Failed to get graphics engine inst: %d", gr_inst);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = nvgpu_engine_init_from_device_info(g,
|
||||||
|
&g->fifo.engine_info[dev->engine_id],
|
||||||
|
dev);
|
||||||
|
if (ret != 0) {
|
||||||
|
nvgpu_err(g, "Failed to init engine_info for engine_id: %d",
|
||||||
|
dev->engine_id);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* engine_id starts from 0 to NV_HOST_NUM_ENGINES */
|
||||||
|
f->active_engines_list[f->num_engines] = dev->engine_id;
|
||||||
|
f->num_engines = nvgpu_safe_add_u32(f->num_engines, 1U);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int nvgpu_engine_init_info(struct nvgpu_fifo *f)
|
int nvgpu_engine_init_info(struct nvgpu_fifo *f)
|
||||||
{
|
{
|
||||||
struct gk20a *g = f->g;
|
struct gk20a *g = f->g;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
enum nvgpu_fifo_engine engine_enum;
|
u32 gr_inst;
|
||||||
u32 pbdma_mask = 0U;
|
|
||||||
bool found = false;
|
|
||||||
struct nvgpu_engine_info *info;
|
|
||||||
const struct nvgpu_device *dev;
|
|
||||||
|
|
||||||
f->num_engines = 0;
|
f->num_engines = 0;
|
||||||
|
|
||||||
dev = nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS, 0);
|
for (gr_inst = 0U;
|
||||||
if (dev == NULL) {
|
gr_inst < nvgpu_device_count(g, NVGPU_DEVTYPE_GRAPHICS);
|
||||||
nvgpu_err(g, "Failed to get graphics engine %d", 0);
|
gr_inst++) {
|
||||||
return -EINVAL;
|
ret = nvgpu_engine_populate_gr_info(f, gr_inst);
|
||||||
|
if (ret != 0) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
found = g->ops.fifo.find_pbdma_for_runlist(g,
|
|
||||||
dev->runlist_id,
|
|
||||||
&pbdma_mask);
|
|
||||||
if (!found) {
|
|
||||||
nvgpu_err(g, "busted pbdma map");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
engine_enum = nvgpu_engine_enum_from_dev(g, dev);
|
|
||||||
|
|
||||||
info = &g->fifo.engine_info[dev->engine_id];
|
|
||||||
|
|
||||||
info->intr_mask |= BIT32(dev->intr_id);
|
|
||||||
info->reset_mask |= BIT32(dev->reset_id);
|
|
||||||
info->runlist_id = dev->runlist_id;
|
|
||||||
info->pbdma_id = nvgpu_safe_sub_u32(
|
|
||||||
nvgpu_safe_cast_u64_to_u32(nvgpu_ffs(pbdma_mask)), 1U);
|
|
||||||
info->inst_id = dev->inst_id;
|
|
||||||
info->pri_base = dev->pri_base;
|
|
||||||
info->engine_enum = engine_enum;
|
|
||||||
info->fault_id = dev->fault_id;
|
|
||||||
|
|
||||||
/* engine_id starts from 0 to NV_HOST_NUM_ENGINES */
|
|
||||||
f->active_engines_list[f->num_engines] = dev->engine_id;
|
|
||||||
++f->num_engines;
|
|
||||||
nvgpu_log_info(g,
|
|
||||||
"gr info: engine_id %d runlist_id %d intr_id %d "
|
|
||||||
"reset_id %d engine_type %d engine_enum %d inst_id %d",
|
|
||||||
dev->engine_id,
|
|
||||||
dev->runlist_id,
|
|
||||||
dev->intr_id,
|
|
||||||
dev->reset_id,
|
|
||||||
dev->type,
|
|
||||||
engine_enum,
|
|
||||||
dev->inst_id);
|
|
||||||
|
|
||||||
ret = g->ops.engine.init_ce_info(f);
|
ret = g->ops.engine.init_ce_info(f);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -250,8 +250,7 @@ void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct nvgpu_tsg *tsg,
|
|||||||
nvgpu_err(g, "failed to disable ctxsw");
|
nvgpu_err(g, "failed to disable ctxsw");
|
||||||
} else {
|
} else {
|
||||||
/* recover engines if tsg is loaded on the engines */
|
/* recover engines if tsg is loaded on the engines */
|
||||||
eng_bitmask = nvgpu_engine_get_mask_on_id(g,
|
eng_bitmask = nvgpu_engine_get_mask_on_id(g, tsg->tsgid, true);
|
||||||
tsg->tsgid, true);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* it is ok to enable ctxsw before tsg is recovered. If engines
|
* it is ok to enable ctxsw before tsg is recovered. If engines
|
||||||
|
|||||||
@@ -1,72 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <nvgpu/gk20a.h>
|
|
||||||
#include <nvgpu/engines.h>
|
|
||||||
#include <nvgpu/fifo.h>
|
|
||||||
|
|
||||||
#include <nvgpu/vgpu/vgpu.h>
|
|
||||||
|
|
||||||
#include "engines_vgpu.h"
|
|
||||||
|
|
||||||
int vgpu_engine_init_info(struct nvgpu_fifo *f)
|
|
||||||
{
|
|
||||||
struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g);
|
|
||||||
struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
|
|
||||||
u32 i;
|
|
||||||
struct gk20a *g = f->g;
|
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
|
||||||
|
|
||||||
if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) {
|
|
||||||
nvgpu_err(f->g, "num_engines %d larger than max %d",
|
|
||||||
engines->num_engines, TEGRA_VGPU_MAX_ENGINES);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
f->num_engines = engines->num_engines;
|
|
||||||
for (i = 0; i < f->num_engines; i++) {
|
|
||||||
struct nvgpu_engine_info *info =
|
|
||||||
&f->engine_info[engines->info[i].engine_id];
|
|
||||||
|
|
||||||
if (engines->info[i].engine_id >= f->max_engines) {
|
|
||||||
nvgpu_err(f->g, "engine id %d larger than max %d",
|
|
||||||
engines->info[i].engine_id,
|
|
||||||
f->max_engines);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
info->intr_mask = engines->info[i].intr_mask;
|
|
||||||
info->reset_mask = engines->info[i].reset_mask;
|
|
||||||
info->runlist_id = engines->info[i].runlist_id;
|
|
||||||
info->pbdma_id = engines->info[i].pbdma_id;
|
|
||||||
info->inst_id = engines->info[i].inst_id;
|
|
||||||
info->pri_base = engines->info[i].pri_base;
|
|
||||||
info->engine_enum = engines->info[i].engine_enum;
|
|
||||||
info->fault_id = engines->info[i].fault_id;
|
|
||||||
f->active_engines_list[i] = engines->info[i].engine_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_log_fn(g, "done");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@@ -36,6 +36,7 @@
|
|||||||
#include <nvgpu/cyclestats_snapshot.h>
|
#include <nvgpu/cyclestats_snapshot.h>
|
||||||
#include <nvgpu/gr/gr.h>
|
#include <nvgpu/gr/gr.h>
|
||||||
#include <nvgpu/nvgpu_init.h>
|
#include <nvgpu/nvgpu_init.h>
|
||||||
|
#include <nvgpu/device.h>
|
||||||
|
|
||||||
#include "init_vgpu.h"
|
#include "init_vgpu.h"
|
||||||
#include "hal/vgpu/init/init_hal_vgpu.h"
|
#include "hal/vgpu/init/init_hal_vgpu.h"
|
||||||
@@ -172,6 +173,12 @@ int vgpu_finalize_poweron_common(struct gk20a *g)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = nvgpu_device_init(g);
|
||||||
|
if (err != 0) {
|
||||||
|
nvgpu_err(g, "failed to init devices");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
err = nvgpu_init_ltc_support(g);
|
err = nvgpu_init_ltc_support(g);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "failed to init ltc");
|
nvgpu_err(g, "failed to init ltc");
|
||||||
|
|||||||
92
drivers/gpu/nvgpu/common/vgpu/top/top_vgpu.c
Normal file
92
drivers/gpu/nvgpu/common/vgpu/top/top_vgpu.c
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <nvgpu/kmem.h>
|
||||||
|
#include <nvgpu/device.h>
|
||||||
|
#include <nvgpu/engines.h>
|
||||||
|
|
||||||
|
#include <nvgpu/vgpu/vgpu.h>
|
||||||
|
|
||||||
|
#include "top_vgpu.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Similar to how the real HW version works, just read a device out of the vGPU
|
||||||
|
* device list one at a time. The core device management code will manage the
|
||||||
|
* actual device lists for us.
|
||||||
|
*/
|
||||||
|
struct nvgpu_device *vgpu_top_parse_next_dev(struct gk20a *g, u32 *token)
|
||||||
|
{
|
||||||
|
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
|
||||||
|
struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
|
||||||
|
struct nvgpu_device *dev;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check to see if we are done parsing engines.
|
||||||
|
*/
|
||||||
|
if (*token >= engines->num_engines) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev = nvgpu_kzalloc(g, sizeof(*dev));
|
||||||
|
if (!dev) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy the engine data into the device and return it to our caller.
|
||||||
|
*/
|
||||||
|
dev->engine_id = engines->info[*token].engine_id;
|
||||||
|
dev->intr_id = nvgpu_ffs(engines->info[*token].intr_mask) - 1;
|
||||||
|
dev->reset_id = nvgpu_ffs(engines->info[*token].reset_mask) - 1;
|
||||||
|
dev->runlist_id = engines->info[*token].runlist_id;
|
||||||
|
dev->pbdma_id = engines->info[*token].pbdma_id;
|
||||||
|
dev->inst_id = engines->info[*token].inst_id;
|
||||||
|
dev->pri_base = engines->info[*token].pri_base;
|
||||||
|
dev->fault_id = engines->info[*token].fault_id;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vGPU sends us an engine enum; this'll be fixed once we remove
|
||||||
|
* the engine_info struct. For now just do a quick reverse map.
|
||||||
|
*
|
||||||
|
* GRCEs and ASYNC_CEs are both LCEs in terms of engine types.
|
||||||
|
*/
|
||||||
|
switch (engines->info[*token].engine_enum) {
|
||||||
|
case NVGPU_ENGINE_GR:
|
||||||
|
dev->type = NVGPU_DEVTYPE_GRAPHICS;
|
||||||
|
break;
|
||||||
|
case NVGPU_ENGINE_GRCE:
|
||||||
|
dev->type = NVGPU_DEVTYPE_LCE;
|
||||||
|
break;
|
||||||
|
case NVGPU_ENGINE_ASYNC_CE:
|
||||||
|
dev->type = NVGPU_DEVTYPE_LCE;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
nvgpu_err(g, "Unknown engine_enum: %d",
|
||||||
|
engines->info[*token].engine_enum);
|
||||||
|
nvgpu_assert(true);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
(*token)++;
|
||||||
|
|
||||||
|
return dev;
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -20,9 +20,14 @@
|
|||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef NVGPU_ENGINES_VGPU_H
|
#ifndef COMMON_VGPU_TOP_VGPU_H
|
||||||
#define NVGPU_ENGINES_VGPU_H
|
#define COMMON_VGPU_TOP_VGPU_H
|
||||||
|
|
||||||
int vgpu_engine_init_info(struct nvgpu_fifo *f);
|
#include <nvgpu/types.h>
|
||||||
|
|
||||||
|
struct gk20a;
|
||||||
|
struct nvgpu_device;
|
||||||
|
|
||||||
|
struct nvgpu_device *vgpu_top_parse_next_dev(struct gk20a *g, u32 *token);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@@ -37,7 +37,6 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
enum nvgpu_fifo_engine engine_enum;
|
enum nvgpu_fifo_engine engine_enum;
|
||||||
u32 i;
|
u32 i;
|
||||||
u32 gr_runlist_id;
|
u32 gr_runlist_id;
|
||||||
u32 pbdma_mask = 0U;
|
|
||||||
u32 lce_num_entries = 0;
|
u32 lce_num_entries = 0;
|
||||||
bool found;
|
bool found;
|
||||||
|
|
||||||
@@ -49,6 +48,7 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
|
|
||||||
for (i = 0; i < lce_num_entries; i++) {
|
for (i = 0; i < lce_num_entries; i++) {
|
||||||
const struct nvgpu_device *dev;
|
const struct nvgpu_device *dev;
|
||||||
|
struct nvgpu_device *dev_rw;
|
||||||
struct nvgpu_engine_info *info;
|
struct nvgpu_engine_info *info;
|
||||||
|
|
||||||
dev = nvgpu_device_get(g, NVGPU_DEVTYPE_LCE, i);
|
dev = nvgpu_device_get(g, NVGPU_DEVTYPE_LCE, i);
|
||||||
@@ -56,17 +56,26 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
nvgpu_err(g, "Failed to get LCE device %u", i);
|
nvgpu_err(g, "Failed to get LCE device %u", i);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
dev_rw = (struct nvgpu_device *)dev;
|
||||||
found = g->ops.fifo.find_pbdma_for_runlist(g,
|
|
||||||
dev->runlist_id,
|
|
||||||
&pbdma_mask);
|
|
||||||
if (!found) {
|
|
||||||
nvgpu_err(g, "busted pbdma map");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
info = &g->fifo.engine_info[dev->engine_id];
|
info = &g->fifo.engine_info[dev->engine_id];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vGPU consideration. Not present in older chips. See
|
||||||
|
* nvgpu_engine_init_from_device_info() for more details in the
|
||||||
|
* comments.
|
||||||
|
*/
|
||||||
|
if (g->ops.fifo.find_pbdma_for_runlist != NULL) {
|
||||||
|
found = g->ops.fifo.find_pbdma_for_runlist(g,
|
||||||
|
dev->runlist_id,
|
||||||
|
&dev_rw->pbdma_id);
|
||||||
|
if (!found) {
|
||||||
|
nvgpu_err(g, "busted pbdma map");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info->pbdma_id = dev->pbdma_id;
|
||||||
|
|
||||||
engine_enum = nvgpu_engine_enum_from_dev(g, dev);
|
engine_enum = nvgpu_engine_enum_from_dev(g, dev);
|
||||||
/* GR and GR_COPY shares same runlist_id */
|
/* GR and GR_COPY shares same runlist_id */
|
||||||
if ((engine_enum == NVGPU_ENGINE_ASYNC_CE) &&
|
if ((engine_enum == NVGPU_ENGINE_ASYNC_CE) &&
|
||||||
@@ -79,8 +88,6 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
info->intr_mask |= BIT32(dev->intr_id);
|
info->intr_mask |= BIT32(dev->intr_id);
|
||||||
info->reset_mask |= BIT32(dev->reset_id);
|
info->reset_mask |= BIT32(dev->reset_id);
|
||||||
info->runlist_id = dev->runlist_id;
|
info->runlist_id = dev->runlist_id;
|
||||||
info->pbdma_id = nvgpu_safe_sub_u32(
|
|
||||||
nvgpu_safe_cast_u64_to_u32(nvgpu_ffs(pbdma_mask)), 1U);
|
|
||||||
info->inst_id = dev->inst_id;
|
info->inst_id = dev->inst_id;
|
||||||
info->pri_base = dev->pri_base;
|
info->pri_base = dev->pri_base;
|
||||||
info->engine_id = dev->engine_id;
|
info->engine_id = dev->engine_id;
|
||||||
|
|||||||
@@ -28,7 +28,6 @@
|
|||||||
#include <nvgpu/pm_reservation.h>
|
#include <nvgpu/pm_reservation.h>
|
||||||
#include <nvgpu/runlist.h>
|
#include <nvgpu/runlist.h>
|
||||||
#include <nvgpu/pbdma.h>
|
#include <nvgpu/pbdma.h>
|
||||||
#include <nvgpu/engines.h>
|
|
||||||
#include <nvgpu/perfbuf.h>
|
#include <nvgpu/perfbuf.h>
|
||||||
#include <nvgpu/cyclestats_snapshot.h>
|
#include <nvgpu/cyclestats_snapshot.h>
|
||||||
#include <nvgpu/fifo/userd.h>
|
#include <nvgpu/fifo/userd.h>
|
||||||
@@ -680,7 +679,6 @@ static const struct gpu_ops gm20b_ops = {
|
|||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
||||||
.init_info = nvgpu_engine_init_info,
|
|
||||||
.init_ce_info = gm20b_engine_init_ce_info,
|
.init_ce_info = gm20b_engine_init_ce_info,
|
||||||
},
|
},
|
||||||
.pbdma = {
|
.pbdma = {
|
||||||
|
|||||||
@@ -27,7 +27,6 @@
|
|||||||
#include <nvgpu/debugger.h>
|
#include <nvgpu/debugger.h>
|
||||||
#include <nvgpu/pm_reservation.h>
|
#include <nvgpu/pm_reservation.h>
|
||||||
#include <nvgpu/pbdma.h>
|
#include <nvgpu/pbdma.h>
|
||||||
#include <nvgpu/engines.h>
|
|
||||||
#include <nvgpu/runlist.h>
|
#include <nvgpu/runlist.h>
|
||||||
#include <nvgpu/fifo/userd.h>
|
#include <nvgpu/fifo/userd.h>
|
||||||
#include <nvgpu/perfbuf.h>
|
#include <nvgpu/perfbuf.h>
|
||||||
@@ -768,7 +767,6 @@ static const struct gpu_ops gp10b_ops = {
|
|||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
||||||
.init_info = nvgpu_engine_init_info,
|
|
||||||
.init_ce_info = gp10b_engine_init_ce_info,
|
.init_ce_info = gp10b_engine_init_ce_info,
|
||||||
},
|
},
|
||||||
.pbdma = {
|
.pbdma = {
|
||||||
|
|||||||
@@ -34,7 +34,6 @@
|
|||||||
#include <nvgpu/clk_arb.h>
|
#include <nvgpu/clk_arb.h>
|
||||||
#include <nvgpu/fuse.h>
|
#include <nvgpu/fuse.h>
|
||||||
#include <nvgpu/pbdma.h>
|
#include <nvgpu/pbdma.h>
|
||||||
#include <nvgpu/engines.h>
|
|
||||||
#include <nvgpu/preempt.h>
|
#include <nvgpu/preempt.h>
|
||||||
#include <nvgpu/regops.h>
|
#include <nvgpu/regops.h>
|
||||||
#include <nvgpu/gr/gr_falcon.h>
|
#include <nvgpu/gr/gr_falcon.h>
|
||||||
@@ -980,7 +979,6 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
|
|||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||||
.init_info = nvgpu_engine_init_info,
|
|
||||||
.init_ce_info = gp10b_engine_init_ce_info,
|
.init_ce_info = gp10b_engine_init_ce_info,
|
||||||
},
|
},
|
||||||
.pbdma = {
|
.pbdma = {
|
||||||
|
|||||||
@@ -221,7 +221,6 @@
|
|||||||
#include <nvgpu/debugger.h>
|
#include <nvgpu/debugger.h>
|
||||||
#include <nvgpu/pm_reservation.h>
|
#include <nvgpu/pm_reservation.h>
|
||||||
#include <nvgpu/pbdma.h>
|
#include <nvgpu/pbdma.h>
|
||||||
#include <nvgpu/engines.h>
|
|
||||||
#include <nvgpu/runlist.h>
|
#include <nvgpu/runlist.h>
|
||||||
#include <nvgpu/fifo/userd.h>
|
#include <nvgpu/fifo/userd.h>
|
||||||
#include <nvgpu/perfbuf.h>
|
#include <nvgpu/perfbuf.h>
|
||||||
@@ -1012,7 +1011,6 @@ static const struct gpu_ops tu104_ops = {
|
|||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||||
.init_info = nvgpu_engine_init_info,
|
|
||||||
.init_ce_info = gp10b_engine_init_ce_info,
|
.init_ce_info = gp10b_engine_init_ce_info,
|
||||||
},
|
},
|
||||||
.pbdma = {
|
.pbdma = {
|
||||||
|
|||||||
@@ -62,8 +62,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 eng_bitmask,
|
|||||||
nvgpu_runlist_lock_active_runlists(g);
|
nvgpu_runlist_lock_active_runlists(g);
|
||||||
|
|
||||||
if (id_is_known) {
|
if (id_is_known) {
|
||||||
engine_ids = nvgpu_engine_get_mask_on_id(g,
|
engine_ids = nvgpu_engine_get_mask_on_id(g, hw_id, id_is_tsg);
|
||||||
hw_id, id_is_tsg);
|
|
||||||
ref_id = hw_id;
|
ref_id = hw_id;
|
||||||
ref_type = id_is_tsg ?
|
ref_type = id_is_tsg ?
|
||||||
fifo_engine_status_id_type_tsgid_v() :
|
fifo_engine_status_id_type_tsgid_v() :
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ void gm20b_device_info_parse_enum(struct gk20a *g, u32 table_entry,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Parse the device starting at *i. This will return a valid device struct
|
* Parse the device starting at *token. This will return a valid device struct
|
||||||
* pointer if a device was detected and parsed, NULL otherwise.
|
* pointer if a device was detected and parsed, NULL otherwise.
|
||||||
*/
|
*/
|
||||||
struct nvgpu_device *gm20b_top_parse_next_dev(struct gk20a *g, u32 *token)
|
struct nvgpu_device *gm20b_top_parse_next_dev(struct gk20a *g, u32 *token)
|
||||||
|
|||||||
@@ -54,6 +54,7 @@
|
|||||||
#include "hal/class/class_gp10b.h"
|
#include "hal/class/class_gp10b.h"
|
||||||
#include "hal/fifo/fifo_gk20a.h"
|
#include "hal/fifo/fifo_gk20a.h"
|
||||||
#include "hal/fifo/engines_gm20b.h"
|
#include "hal/fifo/engines_gm20b.h"
|
||||||
|
#include "hal/fifo/engines_gp10b.h"
|
||||||
#include "hal/fifo/pbdma_gm20b.h"
|
#include "hal/fifo/pbdma_gm20b.h"
|
||||||
#include "hal/fifo/pbdma_gp10b.h"
|
#include "hal/fifo/pbdma_gp10b.h"
|
||||||
#include "hal/fifo/ramin_gk20a.h"
|
#include "hal/fifo/ramin_gk20a.h"
|
||||||
@@ -92,10 +93,10 @@
|
|||||||
|
|
||||||
#include "common/vgpu/init/init_vgpu.h"
|
#include "common/vgpu/init/init_vgpu.h"
|
||||||
#include "common/vgpu/fb/fb_vgpu.h"
|
#include "common/vgpu/fb/fb_vgpu.h"
|
||||||
|
#include "common/vgpu/top/top_vgpu.h"
|
||||||
#include "common/vgpu/fifo/fifo_vgpu.h"
|
#include "common/vgpu/fifo/fifo_vgpu.h"
|
||||||
#include "common/vgpu/fifo/channel_vgpu.h"
|
#include "common/vgpu/fifo/channel_vgpu.h"
|
||||||
#include "common/vgpu/fifo/tsg_vgpu.h"
|
#include "common/vgpu/fifo/tsg_vgpu.h"
|
||||||
#include "common/vgpu/fifo/engines_vgpu.h"
|
|
||||||
#include "common/vgpu/fifo/preempt_vgpu.h"
|
#include "common/vgpu/fifo/preempt_vgpu.h"
|
||||||
#include "common/vgpu/fifo/runlist_vgpu.h"
|
#include "common/vgpu/fifo/runlist_vgpu.h"
|
||||||
#include "common/vgpu/fifo/ramfc_vgpu.h"
|
#include "common/vgpu/fifo/ramfc_vgpu.h"
|
||||||
@@ -497,7 +498,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
|||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
||||||
.init_info = vgpu_engine_init_info,
|
.init_ce_info = gp10b_engine_init_ce_info,
|
||||||
},
|
},
|
||||||
.pbdma = {
|
.pbdma = {
|
||||||
.setup_sw = NULL,
|
.setup_sw = NULL,
|
||||||
@@ -852,6 +853,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
|||||||
.get_max_fbps_count = vgpu_gr_get_max_fbps_count,
|
.get_max_fbps_count = vgpu_gr_get_max_fbps_count,
|
||||||
.get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
|
.get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
|
||||||
.get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
|
.get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
|
||||||
|
.parse_next_device = vgpu_top_parse_next_dev,
|
||||||
},
|
},
|
||||||
.chip_init_gpu_characteristics = vgpu_init_gpu_characteristics,
|
.chip_init_gpu_characteristics = vgpu_init_gpu_characteristics,
|
||||||
.get_litter_value = gp10b_get_litter_value,
|
.get_litter_value = gp10b_get_litter_value,
|
||||||
|
|||||||
@@ -33,6 +33,7 @@
|
|||||||
#include "hal/class/class_gv11b.h"
|
#include "hal/class/class_gv11b.h"
|
||||||
#include "hal/fifo/fifo_gv11b.h"
|
#include "hal/fifo/fifo_gv11b.h"
|
||||||
#include "hal/fifo/preempt_gv11b.h"
|
#include "hal/fifo/preempt_gv11b.h"
|
||||||
|
#include "hal/fifo/engines_gp10b.h"
|
||||||
#include "hal/fifo/engines_gv11b.h"
|
#include "hal/fifo/engines_gv11b.h"
|
||||||
#include "hal/fifo/pbdma_gm20b.h"
|
#include "hal/fifo/pbdma_gm20b.h"
|
||||||
#include "hal/fifo/pbdma_gp10b.h"
|
#include "hal/fifo/pbdma_gp10b.h"
|
||||||
@@ -110,10 +111,10 @@
|
|||||||
|
|
||||||
#include "common/vgpu/init/init_vgpu.h"
|
#include "common/vgpu/init/init_vgpu.h"
|
||||||
#include "common/vgpu/fb/fb_vgpu.h"
|
#include "common/vgpu/fb/fb_vgpu.h"
|
||||||
|
#include "common/vgpu/top/top_vgpu.h"
|
||||||
#include "common/vgpu/fifo/fifo_vgpu.h"
|
#include "common/vgpu/fifo/fifo_vgpu.h"
|
||||||
#include "common/vgpu/fifo/channel_vgpu.h"
|
#include "common/vgpu/fifo/channel_vgpu.h"
|
||||||
#include "common/vgpu/fifo/tsg_vgpu.h"
|
#include "common/vgpu/fifo/tsg_vgpu.h"
|
||||||
#include "common/vgpu/fifo/engines_vgpu.h"
|
|
||||||
#include "common/vgpu/fifo/preempt_vgpu.h"
|
#include "common/vgpu/fifo/preempt_vgpu.h"
|
||||||
#include "common/vgpu/fifo/runlist_vgpu.h"
|
#include "common/vgpu/fifo/runlist_vgpu.h"
|
||||||
#include "common/vgpu/fifo/ramfc_vgpu.h"
|
#include "common/vgpu/fifo/ramfc_vgpu.h"
|
||||||
@@ -616,7 +617,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
|||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||||
.init_info = vgpu_engine_init_info,
|
.init_ce_info = gp10b_engine_init_ce_info,
|
||||||
},
|
},
|
||||||
.pbdma = {
|
.pbdma = {
|
||||||
.setup_sw = NULL,
|
.setup_sw = NULL,
|
||||||
@@ -977,6 +978,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
|||||||
.get_max_fbps_count = vgpu_gr_get_max_fbps_count,
|
.get_max_fbps_count = vgpu_gr_get_max_fbps_count,
|
||||||
.get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
|
.get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
|
||||||
.get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
|
.get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
|
||||||
|
.parse_next_device = vgpu_top_parse_next_dev,
|
||||||
},
|
},
|
||||||
.chip_init_gpu_characteristics = vgpu_gv11b_init_gpu_characteristics,
|
.chip_init_gpu_characteristics = vgpu_gv11b_init_gpu_characteristics,
|
||||||
.get_litter_value = gv11b_get_litter_value,
|
.get_litter_value = gv11b_get_litter_value,
|
||||||
|
|||||||
@@ -141,6 +141,17 @@ struct nvgpu_device {
|
|||||||
*/
|
*/
|
||||||
u32 reset_id;
|
u32 reset_id;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* PBDMA ID for this device. Technically not part of the dev_top array,
|
||||||
|
* but it's computable from various registers when the other device info
|
||||||
|
* is read.
|
||||||
|
*
|
||||||
|
* This also makes the vGPU support a little easier as this field gets
|
||||||
|
* passed to the vGPU client in the same data structure as the rest of the
|
||||||
|
* device info.
|
||||||
|
*/
|
||||||
|
u32 pbdma_id;
|
||||||
|
|
||||||
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
||||||
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
|
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
|
||||||
/* nvgpu next device info additions */
|
/* nvgpu next device info additions */
|
||||||
|
|||||||
@@ -76,7 +76,6 @@ struct gops_engine {
|
|||||||
|
|
||||||
bool (*is_fault_engine_subid_gpc)(struct gk20a *g,
|
bool (*is_fault_engine_subid_gpc)(struct gk20a *g,
|
||||||
u32 engine_subid);
|
u32 engine_subid);
|
||||||
int (*init_info)(struct nvgpu_fifo *f);
|
|
||||||
int (*init_ce_info)(struct nvgpu_fifo *f);
|
int (*init_ce_info)(struct nvgpu_fifo *f);
|
||||||
};
|
};
|
||||||
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
|
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
|
||||||
|
|||||||
@@ -81,20 +81,8 @@ static void subtest_setup(u32 branches)
|
|||||||
|
|
||||||
#define F_ENGINE_SETUP_SW_ENGINE_INFO_ENOMEM BIT(0)
|
#define F_ENGINE_SETUP_SW_ENGINE_INFO_ENOMEM BIT(0)
|
||||||
#define F_ENGINE_SETUP_SW_ENGINE_LIST_ENOMEM BIT(1)
|
#define F_ENGINE_SETUP_SW_ENGINE_LIST_ENOMEM BIT(1)
|
||||||
#define F_ENGINE_SETUP_SW_INIT_INFO_FAIL BIT(2)
|
|
||||||
#define F_ENGINE_SETUP_SW_LAST BIT(3)
|
#define F_ENGINE_SETUP_SW_LAST BIT(3)
|
||||||
|
|
||||||
|
|
||||||
static int stub_engine_init_info_EINVAL(struct nvgpu_fifo *f)
|
|
||||||
{
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int stub_engine_init_info(struct nvgpu_fifo *f)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int test_engine_setup_sw(struct unit_module *m,
|
int test_engine_setup_sw(struct unit_module *m,
|
||||||
struct gk20a *g, void *args)
|
struct gk20a *g, void *args)
|
||||||
{
|
{
|
||||||
@@ -105,12 +93,10 @@ int test_engine_setup_sw(struct unit_module *m,
|
|||||||
int ret = UNIT_FAIL;
|
int ret = UNIT_FAIL;
|
||||||
int err;
|
int err;
|
||||||
u32 fail = F_ENGINE_SETUP_SW_ENGINE_INFO_ENOMEM |
|
u32 fail = F_ENGINE_SETUP_SW_ENGINE_INFO_ENOMEM |
|
||||||
F_ENGINE_SETUP_SW_ENGINE_LIST_ENOMEM |
|
F_ENGINE_SETUP_SW_ENGINE_LIST_ENOMEM;
|
||||||
F_ENGINE_SETUP_SW_INIT_INFO_FAIL;
|
|
||||||
const char *labels[] = {
|
const char *labels[] = {
|
||||||
"engine_info_nomem",
|
"engine_info_nomem",
|
||||||
"engine_list_nomem",
|
"engine_list_nomem",
|
||||||
"init_info_fail",
|
|
||||||
};
|
};
|
||||||
u32 prune = fail;
|
u32 prune = fail;
|
||||||
|
|
||||||
@@ -142,10 +128,6 @@ int test_engine_setup_sw(struct unit_module *m,
|
|||||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
|
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
g->ops.engine.init_info =
|
|
||||||
branches & F_ENGINE_SETUP_SW_INIT_INFO_FAIL ?
|
|
||||||
stub_engine_init_info_EINVAL : stub_engine_init_info;
|
|
||||||
|
|
||||||
err = nvgpu_engine_setup_sw(g);
|
err = nvgpu_engine_setup_sw(g);
|
||||||
|
|
||||||
if (branches & fail) {
|
if (branches & fail) {
|
||||||
|
|||||||
@@ -228,11 +228,6 @@ static const char *f_fifo_init[] = {
|
|||||||
"fifo setup hw fail",
|
"fifo setup hw fail",
|
||||||
};
|
};
|
||||||
|
|
||||||
static int stub_nvgpu_engine_init_info(struct nvgpu_fifo *f)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int stub_init_fifo_setup_hw_fail(struct gk20a *g)
|
static int stub_init_fifo_setup_hw_fail(struct gk20a *g)
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
@@ -299,15 +294,6 @@ int test_init_support(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
(branches & F_FIFO_SETUP_SW_PBDMA_NULL) ?
|
(branches & F_FIFO_SETUP_SW_PBDMA_NULL) ?
|
||||||
NULL : gops.pbdma.setup_sw;
|
NULL : gops.pbdma.setup_sw;
|
||||||
|
|
||||||
/*
|
|
||||||
* Replace engine init_info with stub when
|
|
||||||
* PBDMA setup_sw is NULL
|
|
||||||
*/
|
|
||||||
g->ops.engine.init_info =
|
|
||||||
(branches & F_FIFO_SETUP_SW_PBDMA_NULL) ?
|
|
||||||
stub_nvgpu_engine_init_info :
|
|
||||||
gops.engine.init_info;
|
|
||||||
|
|
||||||
g->ops.pbdma.cleanup_sw =
|
g->ops.pbdma.cleanup_sw =
|
||||||
(branches & (F_FIFO_CLEANUP_SW_PBDMA_NULL |
|
(branches & (F_FIFO_CLEANUP_SW_PBDMA_NULL |
|
||||||
F_FIFO_SETUP_SW_PBDMA_NULL)) ?
|
F_FIFO_SETUP_SW_PBDMA_NULL)) ?
|
||||||
|
|||||||
Reference in New Issue
Block a user