mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Remove fifo->pbdma_map
The FIFO pbdma map is an array of bit maps that link PBDMAs to runlists. This array allows other software to query what PBDMA(s) serves a given runlist. The PBDMA map is read verbatim from an array of host registers. These registers are stored in a kmalloc()'ed array. This causes a problem for the device management code. The device management initialization executes well before the rest of the FIFO PBDMA initialization occurs. Thus, if the device management code queries the PBDMA mapping for a given device/runlist, the mapping has yet to be populated. In the next patches in this series the engine management code is subsumed into the device management code. In other words the device struct is reused by the engine management and all host SW does is pull pointers to the host managed devices from the device manager. This means that all engine initialization that used to be done on top of the device management needs to move to the device code. So, long story short, the PBDMA map needs to be read from the registers directly, instead of an array that gets allocated long after the device code has run. This patch removes the pbdma map array, deletes two HALs that managed that, and instead provides a new HAL to query this map directly from the registers so that the device code can use it. JIRA NVGPU-5421 Change-Id: I5966d440903faee640e3b41494d2caf4cd177b6d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2361134 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
@@ -800,8 +800,8 @@ int nvgpu_engine_init_info(struct nvgpu_fifo *f)
|
|||||||
struct gk20a *g = f->g;
|
struct gk20a *g = f->g;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
enum nvgpu_fifo_engine engine_enum;
|
enum nvgpu_fifo_engine engine_enum;
|
||||||
u32 pbdma_id = U32_MAX;
|
u32 pbdma_mask = 0U;
|
||||||
bool found_pbdma_for_runlist = false;
|
bool found = false;
|
||||||
struct nvgpu_engine_info *info;
|
struct nvgpu_engine_info *info;
|
||||||
const struct nvgpu_device *dev;
|
const struct nvgpu_device *dev;
|
||||||
|
|
||||||
@@ -813,10 +813,10 @@ int nvgpu_engine_init_info(struct nvgpu_fifo *f)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
found_pbdma_for_runlist = g->ops.pbdma.find_for_runlist(g,
|
found = g->ops.fifo.find_pbdma_for_runlist(g,
|
||||||
dev->runlist_id,
|
dev->runlist_id,
|
||||||
&pbdma_id);
|
&pbdma_mask);
|
||||||
if (!found_pbdma_for_runlist) {
|
if (!found) {
|
||||||
nvgpu_err(g, "busted pbdma map");
|
nvgpu_err(g, "busted pbdma map");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -828,7 +828,8 @@ int nvgpu_engine_init_info(struct nvgpu_fifo *f)
|
|||||||
info->intr_mask |= BIT32(dev->intr_id);
|
info->intr_mask |= BIT32(dev->intr_id);
|
||||||
info->reset_mask |= BIT32(dev->reset_id);
|
info->reset_mask |= BIT32(dev->reset_id);
|
||||||
info->runlist_id = dev->runlist_id;
|
info->runlist_id = dev->runlist_id;
|
||||||
info->pbdma_id = pbdma_id;
|
info->pbdma_id = nvgpu_safe_sub_u32(
|
||||||
|
nvgpu_safe_cast_u64_to_u32(nvgpu_ffs(pbdma_mask)), 1U);
|
||||||
info->inst_id = dev->inst_id;
|
info->inst_id = dev->inst_id;
|
||||||
info->pri_base = dev->pri_base;
|
info->pri_base = dev->pri_base;
|
||||||
info->engine_enum = engine_enum;
|
info->engine_enum = engine_enum;
|
||||||
|
|||||||
@@ -23,27 +23,6 @@
|
|||||||
#include <nvgpu/gk20a.h>
|
#include <nvgpu/gk20a.h>
|
||||||
#include <nvgpu/pbdma.h>
|
#include <nvgpu/pbdma.h>
|
||||||
|
|
||||||
bool nvgpu_pbdma_find_for_runlist(struct gk20a *g,
|
|
||||||
u32 runlist_id, u32 *pbdma_id)
|
|
||||||
{
|
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
|
||||||
bool found_pbdma_for_runlist = false;
|
|
||||||
u32 runlist_bit;
|
|
||||||
u32 id = U32_MAX;
|
|
||||||
|
|
||||||
runlist_bit = BIT32(runlist_id);
|
|
||||||
for (id = 0U; id < f->num_pbdma; id++) {
|
|
||||||
if ((f->pbdma_map[id] & runlist_bit) != 0U) {
|
|
||||||
nvgpu_log_info(g, "gr info: pbdma_map[%d]=%d",
|
|
||||||
id, f->pbdma_map[id]);
|
|
||||||
found_pbdma_for_runlist = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*pbdma_id = id;
|
|
||||||
return found_pbdma_for_runlist;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nvgpu_pbdma_init_intr_descs(struct gk20a *g)
|
static void nvgpu_pbdma_init_intr_descs(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
struct nvgpu_fifo *f = &g->fifo;
|
||||||
@@ -65,21 +44,6 @@ static void nvgpu_pbdma_init_intr_descs(struct gk20a *g)
|
|||||||
|
|
||||||
int nvgpu_pbdma_setup_sw(struct gk20a *g)
|
int nvgpu_pbdma_setup_sw(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
|
||||||
|
|
||||||
f->num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
|
|
||||||
f->pbdma_map = NULL;
|
|
||||||
|
|
||||||
if (g->ops.fifo.init_pbdma_map != NULL) {
|
|
||||||
f->pbdma_map = nvgpu_kzalloc(g,
|
|
||||||
f->num_pbdma * sizeof(*f->pbdma_map));
|
|
||||||
if (f->pbdma_map == NULL) {
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
g->ops.fifo.init_pbdma_map(g, f->pbdma_map, f->num_pbdma);
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_pbdma_init_intr_descs(g);
|
nvgpu_pbdma_init_intr_descs(g);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -87,10 +51,5 @@ int nvgpu_pbdma_setup_sw(struct gk20a *g)
|
|||||||
|
|
||||||
void nvgpu_pbdma_cleanup_sw(struct gk20a *g)
|
void nvgpu_pbdma_cleanup_sw(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
return;
|
||||||
|
|
||||||
if (f->pbdma_map != NULL) {
|
|
||||||
nvgpu_kfree(g, f->pbdma_map);
|
|
||||||
f->pbdma_map = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -133,7 +133,8 @@ void nvgpu_preempt_poll_tsg_on_pbdma(struct gk20a *g,
|
|||||||
runlist_id = tsg->runlist_id;
|
runlist_id = tsg->runlist_id;
|
||||||
runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask;
|
runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask;
|
||||||
|
|
||||||
for_each_set_bit(pbdma_id_bit, &runlist_served_pbdmas, f->num_pbdma) {
|
for_each_set_bit(pbdma_id_bit, &runlist_served_pbdmas,
|
||||||
|
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) {
|
||||||
pbdma_id = U32(pbdma_id_bit);
|
pbdma_id = U32(pbdma_id_bit);
|
||||||
/*
|
/*
|
||||||
* If pbdma preempt fails the only option is to reset
|
* If pbdma preempt fails the only option is to reset
|
||||||
|
|||||||
@@ -682,7 +682,7 @@ void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
|
|||||||
{
|
{
|
||||||
struct nvgpu_runlist_info *runlist;
|
struct nvgpu_runlist_info *runlist;
|
||||||
struct nvgpu_engine_info *engine_info;
|
struct nvgpu_engine_info *engine_info;
|
||||||
u32 i, engine_id, pbdma_id, j;
|
u32 i, engine_id, j;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
@@ -693,12 +693,9 @@ void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
|
|||||||
for (i = 0; i < f->num_runlists; i++) {
|
for (i = 0; i < f->num_runlists; i++) {
|
||||||
runlist = &f->active_runlist_info[i];
|
runlist = &f->active_runlist_info[i];
|
||||||
|
|
||||||
for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
|
(void) g->ops.fifo.find_pbdma_for_runlist(g,
|
||||||
if ((f->pbdma_map[pbdma_id] &
|
runlist->runlist_id,
|
||||||
BIT32(runlist->runlist_id)) != 0U) {
|
&runlist->pbdma_bitmask);
|
||||||
runlist->pbdma_bitmask |= BIT32(pbdma_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nvgpu_log(g, gpu_dbg_info, "runlist %d: pbdma bitmask 0x%x",
|
nvgpu_log(g, gpu_dbg_info, "runlist %d: pbdma bitmask 0x%x",
|
||||||
runlist->runlist_id, runlist->pbdma_bitmask);
|
runlist->runlist_id, runlist->pbdma_bitmask);
|
||||||
|
|
||||||
|
|||||||
@@ -40,9 +40,9 @@ int gm20b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
struct gk20a *g = f->g;
|
struct gk20a *g = f->g;
|
||||||
u32 i;
|
u32 i;
|
||||||
enum nvgpu_fifo_engine engine_enum;
|
enum nvgpu_fifo_engine engine_enum;
|
||||||
u32 pbdma_id = U32_MAX;
|
u32 pbdma_mask = 0U;
|
||||||
u32 gr_runlist_id;
|
u32 gr_runlist_id;
|
||||||
bool found_pbdma_for_runlist = false;
|
bool found;
|
||||||
|
|
||||||
gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||||
nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id);
|
nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id);
|
||||||
@@ -63,10 +63,10 @@ int gm20b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
found_pbdma_for_runlist = g->ops.pbdma.find_for_runlist(g,
|
found = g->ops.fifo.find_pbdma_for_runlist(g,
|
||||||
dev->runlist_id,
|
dev->runlist_id,
|
||||||
&pbdma_id);
|
&pbdma_mask);
|
||||||
if (!found_pbdma_for_runlist) {
|
if (!found) {
|
||||||
nvgpu_err(g, "busted pbdma map");
|
nvgpu_err(g, "busted pbdma map");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -95,7 +95,8 @@ int gm20b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
info->intr_mask |= BIT32(dev->intr_id);
|
info->intr_mask |= BIT32(dev->intr_id);
|
||||||
info->reset_mask |= BIT32(dev->reset_id);
|
info->reset_mask |= BIT32(dev->reset_id);
|
||||||
info->runlist_id = dev->runlist_id;
|
info->runlist_id = dev->runlist_id;
|
||||||
info->pbdma_id = pbdma_id;
|
info->pbdma_id = nvgpu_safe_sub_u32(
|
||||||
|
nvgpu_safe_cast_u64_to_u32(nvgpu_ffs(pbdma_mask)), 1U);
|
||||||
info->inst_id = dev->inst_id;
|
info->inst_id = dev->inst_id;
|
||||||
info->pri_base = dev->pri_base;
|
info->pri_base = dev->pri_base;
|
||||||
|
|
||||||
|
|||||||
@@ -34,12 +34,12 @@
|
|||||||
int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
|
int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
|
||||||
{
|
{
|
||||||
struct gk20a *g = f->g;
|
struct gk20a *g = f->g;
|
||||||
u32 i;
|
|
||||||
enum nvgpu_fifo_engine engine_enum;
|
enum nvgpu_fifo_engine engine_enum;
|
||||||
|
u32 i;
|
||||||
u32 gr_runlist_id;
|
u32 gr_runlist_id;
|
||||||
u32 pbdma_id = U32_MAX;
|
u32 pbdma_mask = 0U;
|
||||||
bool found_pbdma_for_runlist = false;
|
|
||||||
u32 lce_num_entries = 0;
|
u32 lce_num_entries = 0;
|
||||||
|
bool found;
|
||||||
|
|
||||||
gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||||
nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id);
|
nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id);
|
||||||
@@ -57,11 +57,10 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
found_pbdma_for_runlist =
|
found = g->ops.fifo.find_pbdma_for_runlist(g,
|
||||||
g->ops.pbdma.find_for_runlist(g,
|
|
||||||
dev->runlist_id,
|
dev->runlist_id,
|
||||||
&pbdma_id);
|
&pbdma_mask);
|
||||||
if (!found_pbdma_for_runlist) {
|
if (!found) {
|
||||||
nvgpu_err(g, "busted pbdma map");
|
nvgpu_err(g, "busted pbdma map");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -80,7 +79,8 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
|
|||||||
info->intr_mask |= BIT32(dev->intr_id);
|
info->intr_mask |= BIT32(dev->intr_id);
|
||||||
info->reset_mask |= BIT32(dev->reset_id);
|
info->reset_mask |= BIT32(dev->reset_id);
|
||||||
info->runlist_id = dev->runlist_id;
|
info->runlist_id = dev->runlist_id;
|
||||||
info->pbdma_id = pbdma_id;
|
info->pbdma_id = nvgpu_safe_sub_u32(
|
||||||
|
nvgpu_safe_cast_u64_to_u32(nvgpu_ffs(pbdma_mask)), 1U);
|
||||||
info->inst_id = dev->inst_id;
|
info->inst_id = dev->inst_id;
|
||||||
info->pri_base = dev->pri_base;
|
info->pri_base = dev->pri_base;
|
||||||
info->engine_id = dev->engine_id;
|
info->engine_id = dev->engine_id;
|
||||||
|
|||||||
@@ -31,8 +31,9 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
|
|||||||
int gk20a_init_fifo_setup_hw(struct gk20a *g);
|
int gk20a_init_fifo_setup_hw(struct gk20a *g);
|
||||||
void gk20a_fifo_bar1_snooping_disable(struct gk20a *g);
|
void gk20a_fifo_bar1_snooping_disable(struct gk20a *g);
|
||||||
#endif
|
#endif
|
||||||
void gk20a_fifo_init_pbdma_map(struct gk20a *g, u32 *pbdma_map, u32 num_pbdma);
|
|
||||||
u32 gk20a_fifo_get_runlist_timeslice(struct gk20a *g);
|
u32 gk20a_fifo_get_runlist_timeslice(struct gk20a *g);
|
||||||
u32 gk20a_fifo_get_pb_timeslice(struct gk20a *g);
|
u32 gk20a_fifo_get_pb_timeslice(struct gk20a *g);
|
||||||
|
bool gk20a_fifo_find_pbdma_for_runlist(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 *pbdma_mask);
|
||||||
|
|
||||||
#endif /* NVGPU_FIFO_GK20A_H */
|
#endif /* NVGPU_FIFO_GK20A_H */
|
||||||
|
|||||||
@@ -32,15 +32,26 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
|
#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
|
||||||
|
|
||||||
void gk20a_fifo_init_pbdma_map(struct gk20a *g, u32 *pbdma_map, u32 num_pbdma)
|
bool gk20a_fifo_find_pbdma_for_runlist(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 *pbdma_mask)
|
||||||
{
|
{
|
||||||
|
u32 runlist_bit = BIT32(runlist_id);
|
||||||
|
u32 num_pbdmas = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
|
||||||
u32 id;
|
u32 id;
|
||||||
|
|
||||||
for (id = 0U; id < num_pbdma; ++id) {
|
*pbdma_mask = 0U;
|
||||||
pbdma_map[id] = nvgpu_readl(g, fifo_pbdma_map_r(id));
|
|
||||||
|
for (id = 0U; id < num_pbdmas; id++) {
|
||||||
|
u32 pbdma_map = nvgpu_readl(g, fifo_pbdma_map_r(id));
|
||||||
|
|
||||||
|
if ((pbdma_map & runlist_bit) != 0U) {
|
||||||
|
*pbdma_mask |= BIT32(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return *pbdma_mask != 0U;
|
||||||
|
}
|
||||||
|
|
||||||
u32 gk20a_fifo_get_runlist_timeslice(struct gk20a *g)
|
u32 gk20a_fifo_get_runlist_timeslice(struct gk20a *g)
|
||||||
{
|
{
|
||||||
return fifo_runlist_timeslice_timeout_128_f() |
|
return fifo_runlist_timeslice_timeout_128_f() |
|
||||||
|
|||||||
@@ -62,6 +62,8 @@ u32 gm20b_pbdma_get_ctrl_hce_priv_mode_yes(void);
|
|||||||
u32 gm20b_pbdma_get_userd_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem);
|
u32 gm20b_pbdma_get_userd_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem);
|
||||||
u32 gm20b_pbdma_get_userd_addr(u32 addr_lo);
|
u32 gm20b_pbdma_get_userd_addr(u32 addr_lo);
|
||||||
u32 gm20b_pbdma_get_userd_hi_addr(u32 addr_hi);
|
u32 gm20b_pbdma_get_userd_hi_addr(u32 addr_hi);
|
||||||
|
bool gm20b_pbdma_find_for_runlist(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 *pbdma_mask);
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
void gm20b_pbdma_intr_enable(struct gk20a *g, bool enable);
|
void gm20b_pbdma_intr_enable(struct gk20a *g, bool enable);
|
||||||
|
|||||||
@@ -336,7 +336,8 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
|||||||
runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask;
|
runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask;
|
||||||
runlist_served_engines = f->runlist_info[runlist_id]->eng_bitmask;
|
runlist_served_engines = f->runlist_info[runlist_id]->eng_bitmask;
|
||||||
|
|
||||||
for_each_set_bit(bit, &runlist_served_pbdmas, f->num_pbdma) {
|
for_each_set_bit(bit, &runlist_served_pbdmas,
|
||||||
|
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) {
|
||||||
pbdma_id = U32(bit);
|
pbdma_id = U32(bit);
|
||||||
err = gv11b_fifo_preempt_poll_pbdma(g, tsgid,
|
err = gv11b_fifo_preempt_poll_pbdma(g, tsgid,
|
||||||
pbdma_id);
|
pbdma_id);
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ int gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
int i;
|
int i;
|
||||||
unsigned int runque, buffer_size;
|
unsigned int runque, buffer_size;
|
||||||
u32 page_size = U32(PAGE_SIZE);
|
u32 page_size = U32(PAGE_SIZE);
|
||||||
unsigned int num_pbdma = g->fifo.num_pbdma;
|
unsigned int num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
|
||||||
|
|
||||||
if (tsg->eng_method_buffers != NULL) {
|
if (tsg->eng_method_buffers != NULL) {
|
||||||
nvgpu_warn(g, "eng method buffers already allocated");
|
nvgpu_warn(g, "eng method buffers already allocated");
|
||||||
@@ -161,7 +161,9 @@ void gv11b_tsg_deinit_eng_method_buffers(struct gk20a *g,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (runque = 0; runque < g->fifo.num_pbdma; runque++) {
|
for (runque = 0;
|
||||||
|
runque < nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
|
||||||
|
runque++) {
|
||||||
nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]);
|
nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -647,7 +647,6 @@ static const struct gpu_ops gm20b_ops = {
|
|||||||
.preempt_channel = gk20a_fifo_preempt_channel,
|
.preempt_channel = gk20a_fifo_preempt_channel,
|
||||||
.preempt_tsg = gk20a_fifo_preempt_tsg,
|
.preempt_tsg = gk20a_fifo_preempt_tsg,
|
||||||
.preempt_trigger = gk20a_fifo_preempt_trigger,
|
.preempt_trigger = gk20a_fifo_preempt_trigger,
|
||||||
.init_pbdma_map = gk20a_fifo_init_pbdma_map,
|
|
||||||
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
|
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
|
||||||
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
|
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
|
||||||
#ifdef CONFIG_NVGPU_RECOVERY
|
#ifdef CONFIG_NVGPU_RECOVERY
|
||||||
@@ -677,6 +676,7 @@ static const struct gpu_ops gm20b_ops = {
|
|||||||
.get_pb_timeslice = gk20a_fifo_get_pb_timeslice,
|
.get_pb_timeslice = gk20a_fifo_get_pb_timeslice,
|
||||||
.is_mmu_fault_pending = gk20a_fifo_is_mmu_fault_pending,
|
.is_mmu_fault_pending = gk20a_fifo_is_mmu_fault_pending,
|
||||||
.bar1_snooping_disable = gk20a_fifo_bar1_snooping_disable,
|
.bar1_snooping_disable = gk20a_fifo_bar1_snooping_disable,
|
||||||
|
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
|
||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
||||||
@@ -703,7 +703,6 @@ static const struct gpu_ops gm20b_ops = {
|
|||||||
gm20b_pbdma_channel_fatal_0_intr_descs,
|
gm20b_pbdma_channel_fatal_0_intr_descs,
|
||||||
.restartable_0_intr_descs =
|
.restartable_0_intr_descs =
|
||||||
gm20b_pbdma_restartable_0_intr_descs,
|
gm20b_pbdma_restartable_0_intr_descs,
|
||||||
.find_for_runlist = nvgpu_pbdma_find_for_runlist,
|
|
||||||
.format_gpfifo_entry =
|
.format_gpfifo_entry =
|
||||||
gm20b_pbdma_format_gpfifo_entry,
|
gm20b_pbdma_format_gpfifo_entry,
|
||||||
.get_gp_base = gm20b_pbdma_get_gp_base,
|
.get_gp_base = gm20b_pbdma_get_gp_base,
|
||||||
|
|||||||
@@ -735,7 +735,6 @@ static const struct gpu_ops gp10b_ops = {
|
|||||||
.preempt_channel = gk20a_fifo_preempt_channel,
|
.preempt_channel = gk20a_fifo_preempt_channel,
|
||||||
.preempt_tsg = gk20a_fifo_preempt_tsg,
|
.preempt_tsg = gk20a_fifo_preempt_tsg,
|
||||||
.preempt_trigger = gk20a_fifo_preempt_trigger,
|
.preempt_trigger = gk20a_fifo_preempt_trigger,
|
||||||
.init_pbdma_map = gk20a_fifo_init_pbdma_map,
|
|
||||||
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
|
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
|
||||||
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
|
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
|
||||||
#ifdef CONFIG_NVGPU_RECOVERY
|
#ifdef CONFIG_NVGPU_RECOVERY
|
||||||
@@ -765,6 +764,7 @@ static const struct gpu_ops gp10b_ops = {
|
|||||||
.get_pb_timeslice = gk20a_fifo_get_pb_timeslice,
|
.get_pb_timeslice = gk20a_fifo_get_pb_timeslice,
|
||||||
.is_mmu_fault_pending = gk20a_fifo_is_mmu_fault_pending,
|
.is_mmu_fault_pending = gk20a_fifo_is_mmu_fault_pending,
|
||||||
.bar1_snooping_disable = gk20a_fifo_bar1_snooping_disable,
|
.bar1_snooping_disable = gk20a_fifo_bar1_snooping_disable,
|
||||||
|
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
|
||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
||||||
@@ -791,7 +791,6 @@ static const struct gpu_ops gp10b_ops = {
|
|||||||
gp10b_pbdma_channel_fatal_0_intr_descs,
|
gp10b_pbdma_channel_fatal_0_intr_descs,
|
||||||
.restartable_0_intr_descs =
|
.restartable_0_intr_descs =
|
||||||
gm20b_pbdma_restartable_0_intr_descs,
|
gm20b_pbdma_restartable_0_intr_descs,
|
||||||
.find_for_runlist = nvgpu_pbdma_find_for_runlist,
|
|
||||||
.format_gpfifo_entry =
|
.format_gpfifo_entry =
|
||||||
gm20b_pbdma_format_gpfifo_entry,
|
gm20b_pbdma_format_gpfifo_entry,
|
||||||
.get_gp_base = gm20b_pbdma_get_gp_base,
|
.get_gp_base = gm20b_pbdma_get_gp_base,
|
||||||
|
|||||||
@@ -950,7 +950,6 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
|
|||||||
.preempt_tsg = nvgpu_fifo_preempt_tsg,
|
.preempt_tsg = nvgpu_fifo_preempt_tsg,
|
||||||
.preempt_trigger = gv11b_fifo_preempt_trigger,
|
.preempt_trigger = gv11b_fifo_preempt_trigger,
|
||||||
.preempt_poll_pbdma = gv11b_fifo_preempt_poll_pbdma,
|
.preempt_poll_pbdma = gv11b_fifo_preempt_poll_pbdma,
|
||||||
.init_pbdma_map = gk20a_fifo_init_pbdma_map,
|
|
||||||
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
||||||
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
|
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
|
||||||
#ifdef CONFIG_NVGPU_RECOVERY
|
#ifdef CONFIG_NVGPU_RECOVERY
|
||||||
@@ -978,6 +977,7 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
|
|||||||
.get_runlist_timeslice = gk20a_fifo_get_runlist_timeslice,
|
.get_runlist_timeslice = gk20a_fifo_get_runlist_timeslice,
|
||||||
.get_pb_timeslice = gk20a_fifo_get_pb_timeslice,
|
.get_pb_timeslice = gk20a_fifo_get_pb_timeslice,
|
||||||
.mmu_fault_id_to_pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id,
|
.mmu_fault_id_to_pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id,
|
||||||
|
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
|
||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||||
@@ -1006,7 +1006,6 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
|
|||||||
gv11b_pbdma_channel_fatal_0_intr_descs,
|
gv11b_pbdma_channel_fatal_0_intr_descs,
|
||||||
.restartable_0_intr_descs =
|
.restartable_0_intr_descs =
|
||||||
gm20b_pbdma_restartable_0_intr_descs,
|
gm20b_pbdma_restartable_0_intr_descs,
|
||||||
.find_for_runlist = nvgpu_pbdma_find_for_runlist,
|
|
||||||
.format_gpfifo_entry =
|
.format_gpfifo_entry =
|
||||||
gm20b_pbdma_format_gpfifo_entry,
|
gm20b_pbdma_format_gpfifo_entry,
|
||||||
.get_gp_base = gm20b_pbdma_get_gp_base,
|
.get_gp_base = gm20b_pbdma_get_gp_base,
|
||||||
|
|||||||
@@ -980,7 +980,6 @@ static const struct gpu_ops tu104_ops = {
|
|||||||
.preempt_tsg = nvgpu_fifo_preempt_tsg,
|
.preempt_tsg = nvgpu_fifo_preempt_tsg,
|
||||||
.preempt_trigger = gv11b_fifo_preempt_trigger,
|
.preempt_trigger = gv11b_fifo_preempt_trigger,
|
||||||
.preempt_poll_pbdma = gv11b_fifo_preempt_poll_pbdma,
|
.preempt_poll_pbdma = gv11b_fifo_preempt_poll_pbdma,
|
||||||
.init_pbdma_map = gk20a_fifo_init_pbdma_map,
|
|
||||||
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
||||||
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
|
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
|
||||||
#ifdef CONFIG_NVGPU_RECOVERY
|
#ifdef CONFIG_NVGPU_RECOVERY
|
||||||
@@ -1008,6 +1007,7 @@ static const struct gpu_ops tu104_ops = {
|
|||||||
.get_runlist_timeslice = gk20a_fifo_get_runlist_timeslice,
|
.get_runlist_timeslice = gk20a_fifo_get_runlist_timeslice,
|
||||||
.get_pb_timeslice = gk20a_fifo_get_pb_timeslice,
|
.get_pb_timeslice = gk20a_fifo_get_pb_timeslice,
|
||||||
.mmu_fault_id_to_pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id,
|
.mmu_fault_id_to_pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id,
|
||||||
|
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
|
||||||
},
|
},
|
||||||
.engine = {
|
.engine = {
|
||||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||||
@@ -1036,7 +1036,6 @@ static const struct gpu_ops tu104_ops = {
|
|||||||
gv11b_pbdma_channel_fatal_0_intr_descs,
|
gv11b_pbdma_channel_fatal_0_intr_descs,
|
||||||
.restartable_0_intr_descs =
|
.restartable_0_intr_descs =
|
||||||
gm20b_pbdma_restartable_0_intr_descs,
|
gm20b_pbdma_restartable_0_intr_descs,
|
||||||
.find_for_runlist = nvgpu_pbdma_find_for_runlist,
|
|
||||||
.format_gpfifo_entry =
|
.format_gpfifo_entry =
|
||||||
gm20b_pbdma_format_gpfifo_entry,
|
gm20b_pbdma_format_gpfifo_entry,
|
||||||
.get_gp_base = gm20b_pbdma_get_gp_base,
|
.get_gp_base = gm20b_pbdma_get_gp_base,
|
||||||
|
|||||||
@@ -245,18 +245,6 @@ struct nvgpu_fifo {
|
|||||||
/** Number of runlist entries per runlist as supported by the h/w. */
|
/** Number of runlist entries per runlist as supported by the h/w. */
|
||||||
unsigned int num_runlist_entries;
|
unsigned int num_runlist_entries;
|
||||||
|
|
||||||
/** Number of PBDMA supported by the h/w. */
|
|
||||||
unsigned int num_pbdma;
|
|
||||||
/**
|
|
||||||
* This is the area of memory allocated by kernel to store pbdma_map for
|
|
||||||
* #num_pbdma supported by the chip. This area of memory is used to
|
|
||||||
* store pbdma map value as read from h/w register. Pbdma_map value
|
|
||||||
* gives a bitmask describing the runlists that the given pbdma
|
|
||||||
* will service. Pointer is indexed by pbdma_id starting with 0 to
|
|
||||||
* #num_pbdma - 1.
|
|
||||||
*/
|
|
||||||
u32 *pbdma_map;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the area of memory allocated by kernel to keep information for
|
* This is the area of memory allocated by kernel to keep information for
|
||||||
* #max_engines supported by the chip. This information is filled up
|
* #max_engines supported by the chip. This information is filled up
|
||||||
|
|||||||
@@ -177,8 +177,6 @@ struct gops_fifo {
|
|||||||
void (*preempt_trigger)(struct gk20a *g, u32 id, unsigned int id_type);
|
void (*preempt_trigger)(struct gk20a *g, u32 id, unsigned int id_type);
|
||||||
int (*preempt_poll_pbdma)(struct gk20a *g, u32 tsgid,
|
int (*preempt_poll_pbdma)(struct gk20a *g, u32 tsgid,
|
||||||
u32 pbdma_id);
|
u32 pbdma_id);
|
||||||
void (*init_pbdma_map)(struct gk20a *g,
|
|
||||||
u32 *pbdma_map, u32 num_pbdma);
|
|
||||||
int (*is_preempt_pending)(struct gk20a *g, u32 id,
|
int (*is_preempt_pending)(struct gk20a *g, u32 id,
|
||||||
unsigned int id_type);
|
unsigned int id_type);
|
||||||
void (*intr_set_recover_mask)(struct gk20a *g);
|
void (*intr_set_recover_mask)(struct gk20a *g);
|
||||||
@@ -203,6 +201,8 @@ struct gops_fifo {
|
|||||||
u32 (*mmu_fault_id_to_pbdma_id)(struct gk20a *g,
|
u32 (*mmu_fault_id_to_pbdma_id)(struct gk20a *g,
|
||||||
u32 mmu_fault_id);
|
u32 mmu_fault_id);
|
||||||
void (*bar1_snooping_disable)(struct gk20a *g);
|
void (*bar1_snooping_disable)(struct gk20a *g);
|
||||||
|
bool (*find_pbdma_for_runlist)(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 *pbdma_id);
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_RECOVERY
|
#ifdef CONFIG_NVGPU_RECOVERY
|
||||||
void (*recover)(struct gk20a *g, u32 act_eng_bitmask,
|
void (*recover)(struct gk20a *g, u32 act_eng_bitmask,
|
||||||
|
|||||||
@@ -60,8 +60,6 @@ struct gops_pbdma {
|
|||||||
u32 (*device_fatal_0_intr_descs)(void);
|
u32 (*device_fatal_0_intr_descs)(void);
|
||||||
u32 (*channel_fatal_0_intr_descs)(void);
|
u32 (*channel_fatal_0_intr_descs)(void);
|
||||||
u32 (*restartable_0_intr_descs)(void);
|
u32 (*restartable_0_intr_descs)(void);
|
||||||
bool (*find_for_runlist)(struct gk20a *g,
|
|
||||||
u32 runlist_id, u32 *pbdma_id);
|
|
||||||
void (*format_gpfifo_entry)(struct gk20a *g,
|
void (*format_gpfifo_entry)(struct gk20a *g,
|
||||||
struct nvgpu_gpfifo_entry *gpfifo_entry,
|
struct nvgpu_gpfifo_entry *gpfifo_entry,
|
||||||
u64 pb_gpu_va, u32 method_size);
|
u64 pb_gpu_va, u32 method_size);
|
||||||
|
|||||||
@@ -64,22 +64,4 @@ int nvgpu_pbdma_setup_sw(struct gk20a *g);
|
|||||||
*/
|
*/
|
||||||
void nvgpu_pbdma_cleanup_sw(struct gk20a *g);
|
void nvgpu_pbdma_cleanup_sw(struct gk20a *g);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Find PBDMA servicing the runlist
|
|
||||||
*
|
|
||||||
* @param g [in] The GPU driver struct owning the runlist.
|
|
||||||
* @param runlist_id [in] Runlist identifier.
|
|
||||||
* @param pbdma_id [out] Pointer to PBDMA identifier.
|
|
||||||
*
|
|
||||||
* Finds the PBDMA which is servicing #runlist_id.
|
|
||||||
*
|
|
||||||
* @return true if PBDMA was found, false otherwise.
|
|
||||||
* @retval Sets #pbdma_id to valid value and returns true in case PBDMA
|
|
||||||
* could be found.
|
|
||||||
* @retval Sets #pbdma_id to U32_MAX and returns false in case PBDMA could
|
|
||||||
* not be found.
|
|
||||||
*/
|
|
||||||
bool nvgpu_pbdma_find_for_runlist(struct gk20a *g,
|
|
||||||
u32 runlist_id, u32 *pbdma_id);
|
|
||||||
|
|
||||||
#endif /* NVGPU_PBDMA_COMMON_H */
|
#endif /* NVGPU_PBDMA_COMMON_H */
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ gk20a_channel_enable
|
|||||||
gk20a_channel_read_state
|
gk20a_channel_read_state
|
||||||
gk20a_fifo_get_pb_timeslice
|
gk20a_fifo_get_pb_timeslice
|
||||||
gk20a_fifo_get_runlist_timeslice
|
gk20a_fifo_get_runlist_timeslice
|
||||||
gk20a_fifo_init_pbdma_map
|
|
||||||
gk20a_fifo_intr_1_enable
|
gk20a_fifo_intr_1_enable
|
||||||
gk20a_fifo_intr_1_isr
|
gk20a_fifo_intr_1_isr
|
||||||
gk20a_fifo_intr_handle_chsw_error
|
gk20a_fifo_intr_handle_chsw_error
|
||||||
@@ -592,7 +591,6 @@ nvgpu_netlist_get_gpccs_inst_list
|
|||||||
nvgpu_netlist_get_gpccs_data_list
|
nvgpu_netlist_get_gpccs_data_list
|
||||||
nvgpu_nvgpu_get_fault_injection
|
nvgpu_nvgpu_get_fault_injection
|
||||||
nvgpu_pbdma_cleanup_sw
|
nvgpu_pbdma_cleanup_sw
|
||||||
nvgpu_pbdma_find_for_runlist
|
|
||||||
nvgpu_pbdma_status_is_chsw_load
|
nvgpu_pbdma_status_is_chsw_load
|
||||||
nvgpu_pbdma_status_is_chsw_save
|
nvgpu_pbdma_status_is_chsw_save
|
||||||
nvgpu_pbdma_status_is_chsw_switch
|
nvgpu_pbdma_status_is_chsw_switch
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ gk20a_channel_enable
|
|||||||
gk20a_channel_read_state
|
gk20a_channel_read_state
|
||||||
gk20a_fifo_get_pb_timeslice
|
gk20a_fifo_get_pb_timeslice
|
||||||
gk20a_fifo_get_runlist_timeslice
|
gk20a_fifo_get_runlist_timeslice
|
||||||
gk20a_fifo_init_pbdma_map
|
|
||||||
gk20a_fifo_intr_1_enable
|
gk20a_fifo_intr_1_enable
|
||||||
gk20a_fifo_intr_1_isr
|
gk20a_fifo_intr_1_isr
|
||||||
gk20a_fifo_intr_handle_chsw_error
|
gk20a_fifo_intr_handle_chsw_error
|
||||||
@@ -607,7 +606,6 @@ nvgpu_netlist_get_gpccs_inst_list
|
|||||||
nvgpu_netlist_get_gpccs_data_list
|
nvgpu_netlist_get_gpccs_data_list
|
||||||
nvgpu_nvgpu_get_fault_injection
|
nvgpu_nvgpu_get_fault_injection
|
||||||
nvgpu_pbdma_cleanup_sw
|
nvgpu_pbdma_cleanup_sw
|
||||||
nvgpu_pbdma_find_for_runlist
|
|
||||||
nvgpu_pbdma_status_is_chsw_load
|
nvgpu_pbdma_status_is_chsw_load
|
||||||
nvgpu_pbdma_status_is_chsw_save
|
nvgpu_pbdma_status_is_chsw_save
|
||||||
nvgpu_pbdma_status_is_chsw_switch
|
nvgpu_pbdma_status_is_chsw_switch
|
||||||
|
|||||||
@@ -405,7 +405,7 @@ test_gv11b_mm_mmu_fault_setup_hw.setup_hw=0
|
|||||||
test_gv11b_mm_mmu_fault_setup_sw.setup_sw_s0=0
|
test_gv11b_mm_mmu_fault_setup_sw.setup_sw_s0=0
|
||||||
test_gv11b_mm_mmu_fault_setup_sw.setup_sw_s1=0
|
test_gv11b_mm_mmu_fault_setup_sw.setup_sw_s1=0
|
||||||
test_handle_mmu_fault_common.handle_mmu_common_s0=0
|
test_handle_mmu_fault_common.handle_mmu_common_s0=0
|
||||||
test_handle_mmu_fault_common.handle_mmu_common_s10=0
|
test_handle_mmu_fault_common.handle_mmu_common_s10=2
|
||||||
test_handle_mmu_fault_common.handle_mmu_common_s1=0
|
test_handle_mmu_fault_common.handle_mmu_common_s1=0
|
||||||
test_handle_mmu_fault_common.handle_mmu_common_s2=0
|
test_handle_mmu_fault_common.handle_mmu_common_s2=0
|
||||||
test_handle_mmu_fault_common.handle_mmu_common_s3=0
|
test_handle_mmu_fault_common.handle_mmu_common_s3=0
|
||||||
@@ -587,7 +587,6 @@ test_gk20a_fifo_intr_handle_chsw_error.intr_handle_chsw_error=0
|
|||||||
test_gk20a_fifo_intr_handle_runlist_event.intr_handle_runlist_event=0
|
test_gk20a_fifo_intr_handle_runlist_event.intr_handle_runlist_event=0
|
||||||
test_gk20a_fifo_pbdma_isr.pbdma_isr=0
|
test_gk20a_fifo_pbdma_isr.pbdma_isr=0
|
||||||
test_gk20a_get_timeslices.get_timeslices=0
|
test_gk20a_get_timeslices.get_timeslices=0
|
||||||
test_gk20a_init_pbdma_map.init_pbdma_map=0
|
|
||||||
|
|
||||||
[nvgpu_fifo_gv11b]
|
[nvgpu_fifo_gv11b]
|
||||||
test_fifo_init_support.init_support=0
|
test_fifo_init_support.init_support=0
|
||||||
@@ -690,7 +689,6 @@ test_nvgpu_memset_sysmem.nvgpu_memset_sysmem=0
|
|||||||
[nvgpu_pbdma]
|
[nvgpu_pbdma]
|
||||||
test_fifo_init_support.init_support=0
|
test_fifo_init_support.init_support=0
|
||||||
test_fifo_remove_support.remove_support=0
|
test_fifo_remove_support.remove_support=0
|
||||||
test_pbdma_find_for_runlist.pbdma_find_for_runlist=0
|
|
||||||
test_pbdma_setup_sw.setup_sw=0
|
test_pbdma_setup_sw.setup_sw=0
|
||||||
test_pbdma_status.pbdma_status=0
|
test_pbdma_status.pbdma_status=0
|
||||||
|
|
||||||
|
|||||||
@@ -173,15 +173,8 @@ done:
|
|||||||
|
|
||||||
#define F_ENGINE_INIT_INFO_GET_DEV_INFO_NULL BIT(0)
|
#define F_ENGINE_INIT_INFO_GET_DEV_INFO_NULL BIT(0)
|
||||||
#define F_ENGINE_INIT_INFO_GET_DEV_INFO_FAIL BIT(1)
|
#define F_ENGINE_INIT_INFO_GET_DEV_INFO_FAIL BIT(1)
|
||||||
#define F_ENGINE_INIT_INFO_PBDMA_FIND_FAIL BIT(2)
|
#define F_ENGINE_INIT_INFO_INIT_CE_FAIL BIT(2)
|
||||||
#define F_ENGINE_INIT_INFO_INIT_CE_FAIL BIT(3)
|
#define F_ENGINE_INIT_INFO_LAST BIT(3)
|
||||||
#define F_ENGINE_INIT_INFO_LAST BIT(4)
|
|
||||||
|
|
||||||
static bool stub_pbdma_find_for_runlist_none(struct gk20a *g,
|
|
||||||
u32 runlist_id, u32 *pbdma_id)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int stub_engine_init_ce_info_EINVAL(struct nvgpu_fifo *f)
|
static int stub_engine_init_ce_info_EINVAL(struct nvgpu_fifo *f)
|
||||||
{
|
{
|
||||||
@@ -200,12 +193,10 @@ int test_engine_init_info(struct unit_module *m,
|
|||||||
u32 fail =
|
u32 fail =
|
||||||
F_ENGINE_INIT_INFO_GET_DEV_INFO_NULL |
|
F_ENGINE_INIT_INFO_GET_DEV_INFO_NULL |
|
||||||
F_ENGINE_INIT_INFO_GET_DEV_INFO_FAIL |
|
F_ENGINE_INIT_INFO_GET_DEV_INFO_FAIL |
|
||||||
F_ENGINE_INIT_INFO_PBDMA_FIND_FAIL |
|
|
||||||
F_ENGINE_INIT_INFO_INIT_CE_FAIL;
|
F_ENGINE_INIT_INFO_INIT_CE_FAIL;
|
||||||
const char *labels[] = {
|
const char *labels[] = {
|
||||||
"get_dev_info_null",
|
"get_dev_info_null",
|
||||||
"get_dev_info_fail",
|
"get_dev_info_fail",
|
||||||
"pbdma_find_fail",
|
|
||||||
"init_ce_fail",
|
"init_ce_fail",
|
||||||
};
|
};
|
||||||
u32 prune = fail;
|
u32 prune = fail;
|
||||||
@@ -221,11 +212,6 @@ int test_engine_init_info(struct unit_module *m,
|
|||||||
unit_verbose(m, "%s branches=%s\n", __func__,
|
unit_verbose(m, "%s branches=%s\n", __func__,
|
||||||
branches_str(branches, labels));
|
branches_str(branches, labels));
|
||||||
|
|
||||||
g->ops.pbdma.find_for_runlist =
|
|
||||||
branches & F_ENGINE_INIT_INFO_PBDMA_FIND_FAIL ?
|
|
||||||
stub_pbdma_find_for_runlist_none :
|
|
||||||
gops.pbdma.find_for_runlist;
|
|
||||||
|
|
||||||
g->ops.engine.init_ce_info =
|
g->ops.engine.init_ce_info =
|
||||||
branches & F_ENGINE_INIT_INFO_INIT_CE_FAIL ?
|
branches & F_ENGINE_INIT_INFO_INIT_CE_FAIL ?
|
||||||
stub_engine_init_ce_info_EINVAL :
|
stub_engine_init_ce_info_EINVAL :
|
||||||
|
|||||||
@@ -58,28 +58,6 @@
|
|||||||
|
|
||||||
#define UNIT_MAX_PBDMA 32
|
#define UNIT_MAX_PBDMA 32
|
||||||
|
|
||||||
int test_gk20a_init_pbdma_map(struct unit_module *m,
|
|
||||||
struct gk20a *g, void *args)
|
|
||||||
{
|
|
||||||
int ret = UNIT_FAIL;
|
|
||||||
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
|
|
||||||
u32 pbdma_map[UNIT_MAX_PBDMA];
|
|
||||||
u32 id;
|
|
||||||
unit_assert(num_pbdma > 0, goto done);
|
|
||||||
unit_assert(num_pbdma <= UNIT_MAX_PBDMA, goto done);
|
|
||||||
|
|
||||||
memset(pbdma_map, 0, sizeof(pbdma_map));
|
|
||||||
gk20a_fifo_init_pbdma_map(g, pbdma_map, num_pbdma);
|
|
||||||
for (id = 0; id < num_pbdma; id++) {
|
|
||||||
unit_verbose(m, "id=%u map=%08x\n", id, pbdma_map[id]);
|
|
||||||
unit_assert(pbdma_map[id] != 0, goto done);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = UNIT_SUCCESS;
|
|
||||||
done:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int test_gk20a_get_timeslices(struct unit_module *m,
|
int test_gk20a_get_timeslices(struct unit_module *m,
|
||||||
struct gk20a *g, void *args)
|
struct gk20a *g, void *args)
|
||||||
{
|
{
|
||||||
@@ -106,7 +84,6 @@ struct unit_module_test nvgpu_fifo_gk20a_tests[] = {
|
|||||||
UNIT_TEST(init_support, test_fifo_init_support, NULL, 0),
|
UNIT_TEST(init_support, test_fifo_init_support, NULL, 0),
|
||||||
|
|
||||||
/* fifo gk20a */
|
/* fifo gk20a */
|
||||||
UNIT_TEST(init_pbdma_map, test_gk20a_init_pbdma_map, NULL, 0),
|
|
||||||
UNIT_TEST(get_timeslices, test_gk20a_get_timeslices, NULL, 0),
|
UNIT_TEST(get_timeslices, test_gk20a_get_timeslices, NULL, 0),
|
||||||
|
|
||||||
/* fifo intr gk20a */
|
/* fifo intr gk20a */
|
||||||
|
|||||||
@@ -418,10 +418,11 @@ int test_gm20b_pbdma_read_data(struct unit_module *m,
|
|||||||
struct gk20a *g, void *args)
|
struct gk20a *g, void *args)
|
||||||
{
|
{
|
||||||
int ret = UNIT_FAIL;
|
int ret = UNIT_FAIL;
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
|
||||||
u32 pbdma_id = 0;
|
u32 pbdma_id = 0;
|
||||||
|
|
||||||
for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
|
for (pbdma_id = 0;
|
||||||
|
pbdma_id < nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
|
||||||
|
pbdma_id++) {
|
||||||
u32 pattern = (0xbeef << 16) + pbdma_id;
|
u32 pattern = (0xbeef << 16) + pbdma_id;
|
||||||
nvgpu_writel(g, pbdma_hdr_shadow_r(pbdma_id), pattern);
|
nvgpu_writel(g, pbdma_hdr_shadow_r(pbdma_id), pattern);
|
||||||
unit_assert(gm20b_pbdma_read_data(g, pbdma_id) == pattern,
|
unit_assert(gm20b_pbdma_read_data(g, pbdma_id) == pattern,
|
||||||
|
|||||||
@@ -64,11 +64,10 @@ struct unit_ctx {
|
|||||||
|
|
||||||
static struct unit_ctx unit_ctx;
|
static struct unit_ctx unit_ctx;
|
||||||
|
|
||||||
#define F_PBDMA_SETUP_SW_KZALLOC_FAIL BIT(0)
|
#define F_PBDMA_SETUP_SW_DEVICE_FATAL_0 BIT(0)
|
||||||
#define F_PBDMA_SETUP_SW_DEVICE_FATAL_0 BIT(1)
|
#define F_PBDMA_SETUP_SW_CHANNEL_FATAL_0 BIT(1)
|
||||||
#define F_PBDMA_SETUP_SW_CHANNEL_FATAL_0 BIT(2)
|
#define F_PBDMA_SETUP_SW_RESTARTABLE_0 BIT(2)
|
||||||
#define F_PBDMA_SETUP_SW_RESTARTABLE_0 BIT(3)
|
#define F_PBDMA_SETUP_SW_LAST BIT(3)
|
||||||
#define F_PBDMA_SETUP_SW_LAST BIT(4)
|
|
||||||
|
|
||||||
static u32 stub_pbdma_device_fatal_0_intr_descs(void) {
|
static u32 stub_pbdma_device_fatal_0_intr_descs(void) {
|
||||||
return F_PBDMA_SETUP_SW_DEVICE_FATAL_0;
|
return F_PBDMA_SETUP_SW_DEVICE_FATAL_0;
|
||||||
@@ -90,14 +89,12 @@ int test_pbdma_setup_sw(struct unit_module *m,
|
|||||||
struct nvgpu_posix_fault_inj *kmem_fi;
|
struct nvgpu_posix_fault_inj *kmem_fi;
|
||||||
u32 branches = 0U;
|
u32 branches = 0U;
|
||||||
int ret = UNIT_FAIL;
|
int ret = UNIT_FAIL;
|
||||||
u32 fail = F_PBDMA_SETUP_SW_KZALLOC_FAIL;
|
|
||||||
static const char *labels[] = {
|
static const char *labels[] = {
|
||||||
"kzalloc_fail",
|
|
||||||
"device_fatal_0",
|
"device_fatal_0",
|
||||||
"channel_fatal_0",
|
"channel_fatal_0",
|
||||||
"restartable_0",
|
"restartable_0",
|
||||||
};
|
};
|
||||||
u32 prune = fail;
|
u32 prune = 0U;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
kmem_fi = nvgpu_kmem_get_fault_injection();
|
kmem_fi = nvgpu_kmem_get_fault_injection();
|
||||||
@@ -117,10 +114,6 @@ int test_pbdma_setup_sw(struct unit_module *m,
|
|||||||
unit_verbose(m, "%s branches=%s\n", __func__,
|
unit_verbose(m, "%s branches=%s\n", __func__,
|
||||||
branches_str(branches, labels));
|
branches_str(branches, labels));
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(kmem_fi,
|
|
||||||
branches & F_PBDMA_SETUP_SW_KZALLOC_FAIL ?
|
|
||||||
true : false, 0);
|
|
||||||
|
|
||||||
f->intr.pbdma.device_fatal_0 = 0;
|
f->intr.pbdma.device_fatal_0 = 0;
|
||||||
f->intr.pbdma.channel_fatal_0 = 0;
|
f->intr.pbdma.channel_fatal_0 = 0;
|
||||||
f->intr.pbdma.restartable_0 = 0;
|
f->intr.pbdma.restartable_0 = 0;
|
||||||
@@ -139,12 +132,7 @@ int test_pbdma_setup_sw(struct unit_module *m,
|
|||||||
|
|
||||||
err = nvgpu_pbdma_setup_sw(g);
|
err = nvgpu_pbdma_setup_sw(g);
|
||||||
|
|
||||||
if (branches & fail) {
|
|
||||||
unit_assert(err != 0, goto done);
|
|
||||||
unit_assert(f->pbdma_map == NULL, goto done);
|
|
||||||
} else {
|
|
||||||
unit_assert(err == 0, goto done);
|
unit_assert(err == 0, goto done);
|
||||||
unit_assert(f->pbdma_map != NULL, goto done);
|
|
||||||
unit_assert(f->intr.pbdma.device_fatal_0 ==
|
unit_assert(f->intr.pbdma.device_fatal_0 ==
|
||||||
(branches & F_PBDMA_SETUP_SW_DEVICE_FATAL_0),
|
(branches & F_PBDMA_SETUP_SW_DEVICE_FATAL_0),
|
||||||
goto done);
|
goto done);
|
||||||
@@ -154,9 +142,8 @@ int test_pbdma_setup_sw(struct unit_module *m,
|
|||||||
unit_assert(f->intr.pbdma.restartable_0 ==
|
unit_assert(f->intr.pbdma.restartable_0 ==
|
||||||
(branches & F_PBDMA_SETUP_SW_RESTARTABLE_0),
|
(branches & F_PBDMA_SETUP_SW_RESTARTABLE_0),
|
||||||
goto done);
|
goto done);
|
||||||
|
|
||||||
nvgpu_pbdma_cleanup_sw(g);
|
nvgpu_pbdma_cleanup_sw(g);
|
||||||
unit_assert(f->pbdma_map == NULL, goto done);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ret = UNIT_SUCCESS;
|
ret = UNIT_SUCCESS;
|
||||||
|
|
||||||
@@ -170,46 +157,6 @@ done:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int test_pbdma_find_for_runlist(struct unit_module *m,
|
|
||||||
struct gk20a *g, void *args)
|
|
||||||
{
|
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
|
||||||
struct nvgpu_fifo fifo = g->fifo;
|
|
||||||
u32 runlist_id;
|
|
||||||
bool active;
|
|
||||||
bool found;
|
|
||||||
u32 pbdma_id;
|
|
||||||
int ret = UNIT_FAIL;
|
|
||||||
|
|
||||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
|
||||||
|
|
||||||
active = nvgpu_engine_is_valid_runlist_id(g, runlist_id);
|
|
||||||
|
|
||||||
pbdma_id = U32_MAX;
|
|
||||||
found = nvgpu_pbdma_find_for_runlist(g, runlist_id, &pbdma_id);
|
|
||||||
|
|
||||||
if (active) {
|
|
||||||
unit_assert(found, goto done);
|
|
||||||
unit_assert(pbdma_id != U32_MAX, goto done);
|
|
||||||
unit_assert((f->pbdma_map[pbdma_id] &
|
|
||||||
BIT(runlist_id)) != 0, goto done);
|
|
||||||
} else {
|
|
||||||
unit_assert(!found, goto done);
|
|
||||||
unit_assert(pbdma_id == U32_MAX, goto done);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f->num_pbdma = 0;
|
|
||||||
unit_assert(!nvgpu_pbdma_find_for_runlist(g, 0, &pbdma_id), goto done);
|
|
||||||
|
|
||||||
ret = UNIT_SUCCESS;
|
|
||||||
|
|
||||||
done:
|
|
||||||
g->fifo = fifo;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int test_pbdma_status(struct unit_module *m,
|
int test_pbdma_status(struct unit_module *m,
|
||||||
struct gk20a *g, void *args)
|
struct gk20a *g, void *args)
|
||||||
{
|
{
|
||||||
@@ -264,7 +211,6 @@ done:
|
|||||||
struct unit_module_test nvgpu_pbdma_tests[] = {
|
struct unit_module_test nvgpu_pbdma_tests[] = {
|
||||||
UNIT_TEST(setup_sw, test_pbdma_setup_sw, &unit_ctx, 0),
|
UNIT_TEST(setup_sw, test_pbdma_setup_sw, &unit_ctx, 0),
|
||||||
UNIT_TEST(init_support, test_fifo_init_support, &unit_ctx, 0),
|
UNIT_TEST(init_support, test_fifo_init_support, &unit_ctx, 0),
|
||||||
UNIT_TEST(pbdma_find_for_runlist, test_pbdma_find_for_runlist, &unit_ctx, 0),
|
|
||||||
UNIT_TEST(pbdma_status, test_pbdma_status, &unit_ctx, 0),
|
UNIT_TEST(pbdma_status, test_pbdma_status, &unit_ctx, 0),
|
||||||
UNIT_TEST(remove_support, test_fifo_remove_support, &unit_ctx, 0),
|
UNIT_TEST(remove_support, test_fifo_remove_support, &unit_ctx, 0),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -61,30 +61,6 @@ struct gk20a;
|
|||||||
int test_pbdma_setup_sw(struct unit_module *m,
|
int test_pbdma_setup_sw(struct unit_module *m,
|
||||||
struct gk20a *g, void *args);
|
struct gk20a *g, void *args);
|
||||||
|
|
||||||
/**
|
|
||||||
* Test specification for: test_pbdma_find_for_runlist
|
|
||||||
*
|
|
||||||
* Description: Branch coverage for nvgpu_pbdma_find_for_runlist
|
|
||||||
*
|
|
||||||
* Test Type: Feature
|
|
||||||
*
|
|
||||||
* Targets: nvgpu_pbdma_find_for_runlist, nvgpu_engine_is_valid_runlist_id
|
|
||||||
*
|
|
||||||
* Input: test_fifo_init_support() run for this GPU.
|
|
||||||
*
|
|
||||||
* Steps:
|
|
||||||
* - Check that nvgpu_pbdma_find_for_runlist does not find any valid
|
|
||||||
* PBDMA for inactive runlists.
|
|
||||||
* - Check that nvgpu_pbdma_find_for_runlist finds a valid PBDMA for
|
|
||||||
* active runlists:
|
|
||||||
* - Function must return true for active runlists.
|
|
||||||
* - Check PBDMA actually services the runlist using pbdma_map.
|
|
||||||
*
|
|
||||||
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
|
|
||||||
*/
|
|
||||||
int test_pbdma_find_for_runlist(struct unit_module *m,
|
|
||||||
struct gk20a *g, void *args);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test specification for: test_pbdma_status
|
* Test specification for: test_pbdma_status
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -138,12 +138,6 @@ static void reset_ctx(void)
|
|||||||
u.priv_ring_isr = false;
|
u.priv_ring_isr = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mock_pbdma_find_for_runlist(struct gk20a *g, u32 runlist_id,
|
|
||||||
u32 *pbdma_id)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mock_bus_isr(struct gk20a *g)
|
static void mock_bus_isr(struct gk20a *g)
|
||||||
{
|
{
|
||||||
u.bus_isr = true;
|
u.bus_isr = true;
|
||||||
@@ -230,7 +224,6 @@ int test_setup_env(struct unit_module *m,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* override HALs */
|
/* override HALs */
|
||||||
g->ops.pbdma.find_for_runlist = mock_pbdma_find_for_runlist;
|
|
||||||
g->ops.bus.isr = mock_bus_isr;
|
g->ops.bus.isr = mock_bus_isr;
|
||||||
g->ops.ce.isr_stall = mock_ce_stall_isr;
|
g->ops.ce.isr_stall = mock_ce_stall_isr;
|
||||||
g->ops.ce.isr_nonstall = mock_ce_nonstall_isr;
|
g->ops.ce.isr_nonstall = mock_ce_nonstall_isr;
|
||||||
|
|||||||
@@ -826,7 +826,7 @@ struct unit_module_test mm_mmu_fault_gv11b_fusa_tests[] = {
|
|||||||
UNIT_TEST(handle_mmu_common_s7, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_REFCH, 0),
|
UNIT_TEST(handle_mmu_common_s7, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_REFCH, 0),
|
||||||
UNIT_TEST(handle_mmu_common_s8, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_REFCH_NACK_HNDLD, 0),
|
UNIT_TEST(handle_mmu_common_s8, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_REFCH_NACK_HNDLD, 0),
|
||||||
UNIT_TEST(handle_mmu_common_s9, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_FAULTED_INVALID, 0),
|
UNIT_TEST(handle_mmu_common_s9, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_FAULTED_INVALID, 0),
|
||||||
UNIT_TEST(handle_mmu_common_s10, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_TSG, 0),
|
UNIT_TEST(handle_mmu_common_s10, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_TSG, 2),
|
||||||
UNIT_TEST(handle_nonreplay_s0, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_BUF_EMPTY, 0),
|
UNIT_TEST(handle_nonreplay_s0, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_BUF_EMPTY, 0),
|
||||||
UNIT_TEST(handle_nonreplay_s1, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_INVALID_BUF_ENTRY, 0),
|
UNIT_TEST(handle_nonreplay_s1, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_INVALID_BUF_ENTRY, 0),
|
||||||
UNIT_TEST(handle_nonreplay_s2, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_VALID_BUF_ENTRY, 0),
|
UNIT_TEST(handle_nonreplay_s2, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_VALID_BUF_ENTRY, 0),
|
||||||
|
|||||||
Reference in New Issue
Block a user