gpu: nvgpu: vgpu: move vgpu fifo files under vgpu/fifo

Create a new directory fifo under common vgpu path moving all
vgp common fifo files under that directory.

Move vgpu runlist implementations to a new file runlist_vgpu.c
and create corresponding header file.

Also fix lines over 80 chars in fifo_vgpu.c

Jira GVSCI-334

Change-Id: Ic00535b22a6066a0d27435b9a987de7fa701ea05
Signed-off-by: Aparna Das <aparnad@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2011762
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Aparna Das
2019-02-04 15:24:58 -08:00
committed by mobile promotions
parent 8431b0b0ba
commit d8c5ce3c85
11 changed files with 274 additions and 207 deletions

View File

@@ -385,7 +385,9 @@ nvgpu-$(CONFIG_GK20A_VIDMEM) += \
nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/vgpu/ltc_vgpu.o \
common/vgpu/gr_vgpu.o \
common/vgpu/fifo_vgpu.o \
common/vgpu/fifo/fifo_vgpu.o \
common/vgpu/fifo/runlist_vgpu.o \
common/vgpu/fifo/vgpu_fifo_gv11b.o \
common/vgpu/ce_vgpu.o \
common/vgpu/mm_vgpu.o \
common/vgpu/vgpu.o \
@@ -404,7 +406,6 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/vgpu/gv11b/vgpu_gv11b.o \
common/vgpu/gv11b/vgpu_hal_gv11b.o \
common/vgpu/gv11b/vgpu_gr_gv11b.o \
common/vgpu/gv11b/vgpu_fifo_gv11b.o \
common/vgpu/gv11b/vgpu_subctx_gv11b.o \
common/vgpu/gv11b/vgpu_tsg_gv11b.o \

View File

@@ -308,7 +308,9 @@ srcs += common/sim.c \
tu104/func_tu104.c \
tu104/fecs_trace_tu104.c \
common/vgpu/vgpu.c \
common/vgpu/fifo_vgpu.c \
common/vgpu/fifo/fifo_vgpu.c \
common/vgpu/fifo/runlist_vgpu.c \
common/vgpu/fifo/vgpu_fifo_gv11b.c \
common/vgpu/tsg_vgpu.c \
common/vgpu/perf/cyclestats_snapshot_vgpu.c \
common/vgpu/perf/perf_vgpu.c \
@@ -321,7 +323,6 @@ srcs += common/sim.c \
common/vgpu/ce_vgpu.c \
common/vgpu/gv11b/vgpu_gv11b.c \
common/vgpu/gv11b/vgpu_hal_gv11b.c \
common/vgpu/gv11b/vgpu_fifo_gv11b.c \
common/vgpu/gv11b/vgpu_tsg_gv11b.c \
common/vgpu/gv11b/vgpu_subctx_gv11b.c \
common/vgpu/gv11b/vgpu_gr_gv11b.c \

View File

@@ -198,7 +198,7 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
f->num_engines = engines->num_engines;
for (i = 0; i < f->num_engines; i++) {
struct fifo_engine_info_gk20a *info =
&f->engine_info[engines->info[i].engine_id];
&f->engine_info[engines->info[i].engine_id];
if (engines->info[i].engine_id >= f->max_engines) {
nvgpu_err(f->g, "engine id %d larger than max %d",
@@ -263,7 +263,8 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
sizeof(*f->engine_info));
f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32));
if (!(f->channel && f->tsg && f->engine_info && f->active_engines_list)) {
if (!(f->channel && f->tsg && f->engine_info &&
f->active_engines_list)) {
err = -ENOMEM;
goto clean_up;
}
@@ -446,171 +447,6 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
return err;
}
static int vgpu_submit_runlist(struct gk20a *g, u64 handle, u8 runlist_id,
u16 *runlist, u32 num_entries)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_runlist_params *p;
int err;
void *oob_handle;
void *oob;
size_t size, oob_size;
oob_handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
TEGRA_VGPU_QUEUE_CMD,
&oob, &oob_size);
if (!oob_handle) {
return -EINVAL;
}
size = sizeof(*runlist) * num_entries;
if (oob_size < size) {
err = -ENOMEM;
goto done;
}
msg.cmd = TEGRA_VGPU_CMD_SUBMIT_RUNLIST;
msg.handle = handle;
p = &msg.params.runlist;
p->runlist_id = runlist_id;
p->num_entries = num_entries;
nvgpu_memcpy((u8 *)oob, (u8 *)runlist, size);
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = (err || msg.ret) ? -1 : 0;
done:
vgpu_ivc_oob_put_ptr(oob_handle);
return err;
}
static bool vgpu_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add)
{
struct fifo_gk20a *f = &g->fifo;
struct fifo_runlist_info_gk20a *runlist;
runlist = &f->runlist_info[runlist_id];
if (add) {
if (test_and_set_bit((int)ch->chid,
runlist->active_channels)) {
return false;
/* was already there */
}
} else {
if (!test_and_clear_bit((int)ch->chid,
runlist->active_channels)) {
/* wasn't there */
return false;
}
}
return true;
}
static void vgpu_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
bool add_entries)
{
struct fifo_gk20a *f = &g->fifo;
struct fifo_runlist_info_gk20a *runlist;
runlist = &f->runlist_info[runlist_id];
if (add_entries) {
u16 *runlist_entry;
u32 count = 0;
unsigned long chid;
runlist_entry = runlist->mem[0].cpu_va;
nvgpu_assert(f->num_channels <= (unsigned int)U16_MAX);
for_each_set_bit(chid,
runlist->active_channels, f->num_channels) {
nvgpu_log_info(g, "add channel %lu to runlist", chid);
*runlist_entry++ = (u16)chid;
count++;
}
runlist->count = count;
} else {
runlist->count = 0;
}
}
static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add,
bool wait_for_finish)
{
struct fifo_gk20a *f = &g->fifo;
struct fifo_runlist_info_gk20a *runlist;
bool add_entries;
nvgpu_log_fn(g, " ");
if (ch != NULL) {
bool update = vgpu_runlist_modify_active_locked(g, runlist_id,
ch, add);
if (!update) {
/* no change in runlist contents */
return 0;
}
/* had a channel to update, so reconstruct */
add_entries = true;
} else {
/* no channel; add means update all, !add means clear all */
add_entries = add;
}
runlist = &f->runlist_info[runlist_id];
vgpu_runlist_reconstruct_locked(g, runlist_id, add_entries);
return vgpu_submit_runlist(g, vgpu_get_handle(g), runlist_id,
runlist->mem[0].cpu_va, runlist->count);
}
/* add/remove a channel from runlist
special cases below: runlist->active_channels will NOT be changed.
(ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */
static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch,
bool add, bool wait_for_finish)
{
struct fifo_runlist_info_gk20a *runlist = NULL;
struct fifo_gk20a *f = &g->fifo;
int ret;
nvgpu_log_fn(g, " ");
runlist = &f->runlist_info[runlist_id];
nvgpu_mutex_acquire(&runlist->runlist_lock);
ret = vgpu_runlist_update_locked(g, runlist_id, ch, add,
wait_for_finish);
nvgpu_mutex_release(&runlist->runlist_lock);
return ret;
}
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch,
bool add, bool wait_for_finish)
{
nvgpu_assert(ch != NULL);
return vgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish);
}
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish)
{
return vgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish);
}
int vgpu_fifo_wait_engine_idle(struct gk20a *g)
{
nvgpu_log_fn(g, " ");
@@ -618,27 +454,6 @@ int vgpu_fifo_wait_engine_idle(struct gk20a *g)
return 0;
}
int vgpu_runlist_set_interleave(struct gk20a *g,
u32 id,
u32 runlist_id,
u32 new_level)
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_runlist_interleave_params *p =
&msg.params.tsg_interleave;
int err;
nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
msg.handle = vgpu_get_handle(g);
p->tsg_id = id;
p->level = new_level;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret);
return err ? err : msg.ret;
}
int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
u32 err_code, bool verbose)
{
@@ -747,7 +562,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
break;
case TEGRA_VGPU_FIFO_INTR_CTXSW_TIMEOUT:
g->ops.fifo.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
break;
case TEGRA_VGPU_FIFO_INTR_MMU_FAULT:
vgpu_fifo_set_ctx_mmu_error_ch_tsg(g, ch);

View File

@@ -43,16 +43,7 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
int vgpu_fifo_init_engine_info(struct fifo_gk20a *f);
int vgpu_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch);
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch,
bool add, bool wait_for_finish);
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish);
int vgpu_fifo_wait_engine_idle(struct gk20a *g);
int vgpu_runlist_set_interleave(struct gk20a *g,
u32 id,
u32 runlist_id,
u32 new_level);
int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice);
int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
u32 err_code, bool verbose);

View File

@@ -0,0 +1,220 @@
/*
* Virtualized GPU Runlist
*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/kmem.h>
#include <nvgpu/string.h>
#include <nvgpu/bug.h>
#include <nvgpu/vgpu/vgpu_ivc.h>
#include <nvgpu/vgpu/vgpu.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/channel.h>
#include <nvgpu/runlist.h>
#include "runlist_vgpu.h"
static int vgpu_submit_runlist(struct gk20a *g, u64 handle, u8 runlist_id,
u16 *runlist, u32 num_entries)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_runlist_params *p;
int err;
void *oob_handle;
void *oob;
size_t size, oob_size;
oob_handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
TEGRA_VGPU_QUEUE_CMD,
&oob, &oob_size);
if (!oob_handle) {
return -EINVAL;
}
size = sizeof(*runlist) * num_entries;
if (oob_size < size) {
err = -ENOMEM;
goto done;
}
msg.cmd = TEGRA_VGPU_CMD_SUBMIT_RUNLIST;
msg.handle = handle;
p = &msg.params.runlist;
p->runlist_id = runlist_id;
p->num_entries = num_entries;
nvgpu_memcpy((u8 *)oob, (u8 *)runlist, size);
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = (err || msg.ret) ? -1 : 0;
done:
vgpu_ivc_oob_put_ptr(oob_handle);
return err;
}
static bool vgpu_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add)
{
struct fifo_gk20a *f = &g->fifo;
struct fifo_runlist_info_gk20a *runlist;
runlist = &f->runlist_info[runlist_id];
if (add) {
if (test_and_set_bit((int)ch->chid,
runlist->active_channels)) {
return false;
/* was already there */
}
} else {
if (!test_and_clear_bit((int)ch->chid,
runlist->active_channels)) {
/* wasn't there */
return false;
}
}
return true;
}
static void vgpu_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
bool add_entries)
{
struct fifo_gk20a *f = &g->fifo;
struct fifo_runlist_info_gk20a *runlist;
runlist = &f->runlist_info[runlist_id];
if (add_entries) {
u16 *runlist_entry;
u32 count = 0;
unsigned long chid;
runlist_entry = runlist->mem[0].cpu_va;
nvgpu_assert(f->num_channels <= (unsigned int)U16_MAX);
for_each_set_bit(chid,
runlist->active_channels, f->num_channels) {
nvgpu_log_info(g, "add channel %lu to runlist", chid);
*runlist_entry++ = (u16)chid;
count++;
}
runlist->count = count;
} else {
runlist->count = 0;
}
}
static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add,
bool wait_for_finish)
{
struct fifo_gk20a *f = &g->fifo;
struct fifo_runlist_info_gk20a *runlist;
bool add_entries;
nvgpu_log_fn(g, " ");
if (ch != NULL) {
bool update = vgpu_runlist_modify_active_locked(g, runlist_id,
ch, add);
if (!update) {
/* no change in runlist contents */
return 0;
}
/* had a channel to update, so reconstruct */
add_entries = true;
} else {
/* no channel; add means update all, !add means clear all */
add_entries = add;
}
runlist = &f->runlist_info[runlist_id];
vgpu_runlist_reconstruct_locked(g, runlist_id, add_entries);
return vgpu_submit_runlist(g, vgpu_get_handle(g), runlist_id,
runlist->mem[0].cpu_va, runlist->count);
}
/* add/remove a channel from runlist
special cases below: runlist->active_channels will NOT be changed.
(ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */
static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch,
bool add, bool wait_for_finish)
{
struct fifo_runlist_info_gk20a *runlist = NULL;
struct fifo_gk20a *f = &g->fifo;
u32 ret = 0;
nvgpu_log_fn(g, " ");
runlist = &f->runlist_info[runlist_id];
nvgpu_mutex_acquire(&runlist->runlist_lock);
ret = vgpu_runlist_update_locked(g, runlist_id, ch, add,
wait_for_finish);
nvgpu_mutex_release(&runlist->runlist_lock);
return ret;
}
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch,
bool add, bool wait_for_finish)
{
nvgpu_assert(ch != NULL);
return vgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish);
}
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish)
{
return vgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish);
}
int vgpu_runlist_set_interleave(struct gk20a *g,
u32 id,
u32 runlist_id,
u32 new_level)
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_runlist_interleave_params *p =
&msg.params.tsg_interleave;
int err;
nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
msg.handle = vgpu_get_handle(g);
p->tsg_id = id;
p->level = new_level;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret);
return err ? err : msg.ret;
}

View File

@@ -0,0 +1,36 @@
/*
* Virtualized GPU Runlist
*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
struct gk20a;
struct channel_gk20a;
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch,
bool add, bool wait_for_finish);
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish);
int vgpu_runlist_set_interleave(struct gk20a *g,
u32 id,
u32 runlist_id,
u32 new_level);

View File

@@ -28,7 +28,7 @@
#include <nvgpu/channel.h>
#include "gv11b/fifo_gv11b.h"
#include "vgpu_fifo_gv11b.h"
#include "common/vgpu/fifo/vgpu_fifo_gv11b.h"
#ifdef CONFIG_TEGRA_GK20A_NVHOST

View File

@@ -41,7 +41,8 @@
#include "common/fifo/runlist_gk20a.h"
#include "common/fifo/channel_gm20b.h"
#include "common/vgpu/fifo_vgpu.h"
#include "common/vgpu/fifo/fifo_vgpu.h"
#include "common/vgpu/fifo/runlist_vgpu.h"
#include "common/vgpu/gr_vgpu.h"
#include "common/vgpu/ltc_vgpu.h"
#include "common/vgpu/mm_vgpu.h"

View File

@@ -52,7 +52,8 @@
#include <nvgpu/vgpu/vgpu.h>
#include <nvgpu/error_notifier.h>
#include "common/vgpu/fifo_vgpu.h"
#include "common/vgpu/fifo/fifo_vgpu.h"
#include "common/vgpu/fifo/runlist_vgpu.h"
#include "common/vgpu/gr_vgpu.h"
#include "common/vgpu/ltc_vgpu.h"
#include "common/vgpu/mm_vgpu.h"
@@ -60,6 +61,7 @@
#include "common/vgpu/perf/perf_vgpu.h"
#include "common/vgpu/fecs_trace_vgpu.h"
#include "common/vgpu/perf/cyclestats_snapshot_vgpu.h"
#include "common/vgpu/fifo/vgpu_fifo_gv11b.h"
#include "common/vgpu/gm20b/vgpu_gr_gm20b.h"
#include "common/vgpu/gp10b/vgpu_mm_gp10b.h"
#include "common/vgpu/gp10b/vgpu_gr_gp10b.h"
@@ -93,7 +95,7 @@
#include "vgpu_gv11b.h"
#include "vgpu_gr_gv11b.h"
#include "vgpu_fifo_gv11b.h"
#include "vgpu_subctx_gv11b.h"
#include "vgpu_tsg_gv11b.h"

View File

@@ -28,7 +28,7 @@
#include <nvgpu/vgpu/tegra_vgpu.h>
#include <nvgpu/vgpu/vgpu.h>
#include "fifo_vgpu.h"
#include "fifo/fifo_vgpu.h"
int vgpu_tsg_open(struct tsg_gk20a *tsg)
{