gpu: nvgpu: move post_event_id from fifo to tsg

Moved the following HAL from fifo to tsg:
- tsg.post_event_id

Implemented as
- nvgpu_tsg_post_event_id

Jira NVGPU-2979

Change-Id: I074233aaf96b33d011b50b1465ea5d76e0a43689
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2087186
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-04-01 17:32:11 -07:00
committed by mobile promotions
parent 35e9663bd0
commit 9121f84e67
18 changed files with 483 additions and 20 deletions

View File

@@ -2640,7 +2640,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
struct tsg_gk20a *tsg =
tsg_gk20a_from_ch(c);
if (tsg != NULL) {
g->ops.fifo.post_event_id(tsg,
g->ops.tsg.post_event_id(tsg,
NVGPU_EVENT_ID_BLOCKING_SYNC);
}
}

View File

@@ -336,7 +336,7 @@ int nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g,
tsg = tsg_gk20a_from_ch(ch);
if (tsg != NULL) {
g->ops.fifo.post_event_id(tsg,
g->ops.tsg.post_event_id(tsg,
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
nvgpu_cond_broadcast(&ch->semaphore_wq);

View File

@@ -40,6 +40,8 @@
#include <nvgpu/string.h>
#include <nvgpu/vm_area.h>
#include <hal/fifo/tsg_gk20a.h>
#include "fifo_vgpu.h"
#include "common/vgpu/gr/subctx_vgpu.h"
#include "common/vgpu/ivc/comm_vgpu.h"
@@ -513,7 +515,7 @@ void vgpu_handle_channel_event(struct gk20a *g,
tsg = &g->fifo.tsg[info->id];
gk20a_tsg_event_id_post_event(tsg, info->event_id);
nvgpu_tsg_post_event_id(tsg, info->event_id);
}
void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)

View File

@@ -421,7 +421,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.is_preempt_pending = NULL,
.reset_enable_hw = NULL,
.teardown_ch_tsg = NULL,
.post_event_id = gk20a_tsg_event_id_post_event,
.setup_sw = vgpu_fifo_setup_sw,
.cleanup_sw = vgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,
@@ -553,6 +552,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.unbind_channel_check_eng_faulted = NULL,
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
.force_reset = vgpu_tsg_force_reset_ch,
.post_event_id = nvgpu_tsg_post_event_id,
},
.netlist = {
.get_netlist_name = gp10b_netlist_get_name,

View File

@@ -504,7 +504,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = NULL,
.teardown_ch_tsg = NULL,
.post_event_id = gk20a_tsg_event_id_post_event,
.setup_sw = vgpu_fifo_setup_sw,
.cleanup_sw = vgpu_fifo_cleanup_sw,
.ring_channel_doorbell = gv11b_ring_channel_doorbell,
@@ -638,6 +637,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.unbind_channel_check_eng_faulted = NULL,
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
.force_reset = vgpu_tsg_force_reset_ch,
.post_event_id = nvgpu_tsg_post_event_id,
},
.netlist = {
.get_netlist_name = gv11b_netlist_get_name,

View File

@@ -0,0 +1,461 @@
/*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/enabled.h>
#include <nvgpu/ptimer.h>
#include <nvgpu/vgpu/vgpu_ivc.h>
#include <nvgpu/vgpu/vgpu.h>
#include <nvgpu/timers.h>
#include <nvgpu/fifo.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/channel.h>
#include <nvgpu/clk_arb.h>
#include <nvgpu/string.h>
#include <nvgpu/ltc.h>
#include <nvgpu/cbc.h>
#include "common/vgpu/gr/fecs_trace_vgpu.h"
int vgpu_comm_init(struct gk20a *g)
{
size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
return vgpu_ivc_init(g, 3, queue_sizes, TEGRA_VGPU_QUEUE_CMD,
ARRAY_SIZE(queue_sizes));
}
void vgpu_comm_deinit(void)
{
size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
vgpu_ivc_deinit(TEGRA_VGPU_QUEUE_CMD, ARRAY_SIZE(queue_sizes));
}
int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
size_t size_out)
{
void *handle;
size_t size = size_in;
void *data = msg;
int err;
err = vgpu_ivc_sendrecv(vgpu_ivc_get_server_vmid(),
TEGRA_VGPU_QUEUE_CMD, &handle, &data, &size);
if (!err) {
WARN_ON(size < size_out);
nvgpu_memcpy((u8 *)msg, (u8 *)data, size_out);
vgpu_ivc_release(handle);
}
return err;
}
u64 vgpu_connect(void)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_connect_params *p = &msg.params.connect;
int err;
msg.cmd = TEGRA_VGPU_CMD_CONNECT;
p->module = TEGRA_VGPU_MODULE_GPU;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
return (err || msg.ret) ? 0 : p->handle;
}
int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_attrib_params *p = &msg.params.attrib;
int err;
msg.cmd = TEGRA_VGPU_CMD_GET_ATTRIBUTE;
msg.handle = handle;
p->attrib = attrib;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) {
return -1;
}
*value = p->value;
return 0;
}
static void vgpu_handle_channel_event(struct gk20a *g,
struct tegra_vgpu_channel_event_info *info)
{
struct tsg_gk20a *tsg;
if (!info->is_tsg) {
nvgpu_err(g, "channel event posted");
return;
}
if (info->id >= g->fifo.num_channels ||
info->event_id >= TEGRA_VGPU_CHANNEL_EVENT_ID_MAX) {
nvgpu_err(g, "invalid channel event");
return;
}
tsg = &g->fifo.tsg[info->id];
nvgpu_tsg_post_event_id(tsg, info->event_id);
}
static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
{
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
if (ch == NULL) {
nvgpu_err(g, "invalid channel id %d", chid);
return;
}
gk20a_channel_set_unserviceable(ch);
g->ops.fifo.ch_abort_clean_up(ch);
gk20a_channel_put(ch);
}
static void vgpu_set_error_notifier(struct gk20a *g,
struct tegra_vgpu_channel_set_error_notifier *p)
{
struct channel_gk20a *ch;
if (p->chid >= g->fifo.num_channels) {
nvgpu_err(g, "invalid chid %d", p->chid);
return;
}
ch = &g->fifo.channel[p->chid];
g->ops.fifo.set_error_notifier(ch, p->error);
}
int vgpu_intr_thread(void *dev_id)
{
struct gk20a *g = dev_id;
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
while (true) {
struct tegra_vgpu_intr_msg *msg;
u32 sender;
void *handle;
size_t size;
int err;
err = vgpu_ivc_recv(TEGRA_VGPU_QUEUE_INTR, &handle,
(void **)&msg, &size, &sender);
if (err == -ETIME) {
continue;
}
if (err != 0) {
nvgpu_do_assert_print(g,
"Unexpected vgpu_ivc_recv err=%d", err);
continue;
}
if (msg->event == TEGRA_VGPU_EVENT_ABORT) {
vgpu_ivc_release(handle);
break;
}
switch (msg->event) {
case TEGRA_VGPU_EVENT_INTR:
if (msg->unit == TEGRA_VGPU_INTR_GR) {
vgpu_gr_isr(g, &msg->info.gr_intr);
} else if (msg->unit == TEGRA_VGPU_INTR_FIFO) {
vgpu_fifo_isr(g, &msg->info.fifo_intr);
}
break;
#ifdef CONFIG_GK20A_CTXSW_TRACE
case TEGRA_VGPU_EVENT_FECS_TRACE:
vgpu_fecs_trace_data_update(g);
break;
#endif
case TEGRA_VGPU_EVENT_CHANNEL:
vgpu_handle_channel_event(g, &msg->info.channel_event);
break;
case TEGRA_VGPU_EVENT_SM_ESR:
vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr);
break;
case TEGRA_VGPU_EVENT_SEMAPHORE_WAKEUP:
g->ops.semaphore_wakeup(g,
!!msg->info.sem_wakeup.post_events);
break;
case TEGRA_VGPU_EVENT_CHANNEL_CLEANUP:
vgpu_channel_abort_cleanup(g,
msg->info.ch_cleanup.chid);
break;
case TEGRA_VGPU_EVENT_SET_ERROR_NOTIFIER:
vgpu_set_error_notifier(g,
&msg->info.set_error_notifier);
break;
default:
nvgpu_err(g, "unknown event %u", msg->event);
break;
}
vgpu_ivc_release(handle);
}
while (!nvgpu_thread_should_stop(&priv->intr_handler)) {
nvgpu_msleep(10);
}
return 0;
}
void vgpu_remove_support_common(struct gk20a *g)
{
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
struct tegra_vgpu_intr_msg msg;
int err;
if (g->dbg_regops_tmp_buf) {
nvgpu_kfree(g, g->dbg_regops_tmp_buf);
}
if (g->pmu.remove_support) {
g->pmu.remove_support(&g->pmu);
}
if (g->gr.remove_support) {
g->gr.remove_support(&g->gr);
}
if (g->fifo.remove_support) {
g->fifo.remove_support(&g->fifo);
}
if (g->mm.remove_support) {
g->mm.remove_support(&g->mm);
}
msg.event = TEGRA_VGPU_EVENT_ABORT;
err = vgpu_ivc_send(vgpu_ivc_get_peer_self(), TEGRA_VGPU_QUEUE_INTR,
&msg, sizeof(msg));
WARN_ON(err);
nvgpu_thread_stop(&priv->intr_handler);
nvgpu_clk_arb_cleanup_arbiter(g);
nvgpu_mutex_destroy(&g->clk_arb_enable_lock);
nvgpu_mutex_destroy(&priv->vgpu_clk_get_freq_lock);
nvgpu_kfree(g, priv->freqs);
}
void vgpu_detect_chip(struct gk20a *g)
{
struct nvgpu_gpu_params *p = &g->params;
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
p->gpu_arch = priv->constants.arch;
p->gpu_impl = priv->constants.impl;
p->gpu_rev = priv->constants.rev;
nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n",
p->gpu_arch,
p->gpu_impl,
p->gpu_rev);
}
void vgpu_init_gpu_characteristics(struct gk20a *g)
{
nvgpu_log_fn(g, " ");
gk20a_init_gpu_characteristics(g);
/* features vgpu does not support */
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, false);
}
int vgpu_read_ptimer(struct gk20a *g, u64 *value)
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer;
int err;
nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER;
msg.handle = vgpu_get_handle(g);
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (!err) {
*value = p->time;
} else {
nvgpu_err(g, "vgpu read ptimer failed, err=%d", err);
}
return err;
}
int vgpu_get_timestamps_zipper(struct gk20a *g,
u32 source_id, u32 count,
struct nvgpu_cpu_time_correlation_sample *samples)
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_get_timestamps_zipper_params *p =
&msg.params.get_timestamps_zipper;
int err;
u32 i;
nvgpu_log_fn(g, " ");
if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) {
nvgpu_err(g, "count %u overflow", count);
return -EINVAL;
}
msg.cmd = TEGRA_VGPU_CMD_GET_TIMESTAMPS_ZIPPER;
msg.handle = vgpu_get_handle(g);
p->source_id = TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_SRC_ID_TSC;
p->count = count;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (err) {
nvgpu_err(g, "vgpu get timestamps zipper failed, err=%d", err);
return err;
}
for (i = 0; i < count; i++) {
samples[i].cpu_timestamp = p->samples[i].cpu_timestamp;
samples[i].gpu_timestamp = p->samples[i].gpu_timestamp;
}
return err;
}
int vgpu_init_hal(struct gk20a *g)
{
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
int err;
switch (ver) {
case NVGPU_GPUID_GP10B:
nvgpu_log_info(g, "gp10b detected");
err = vgpu_gp10b_init_hal(g);
break;
case NVGPU_GPUID_GV11B:
err = vgpu_gv11b_init_hal(g);
break;
default:
nvgpu_err(g, "no support for %x", ver);
err = -ENODEV;
break;
}
if (err == 0) {
err = vgpu_init_hal_os(g);
}
return err;
}
int vgpu_get_constants(struct gk20a *g)
{
struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_constants_params *p = &msg.params.constants;
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
int err;
nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS;
msg.handle = vgpu_get_handle(g);
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (unlikely(err)) {
nvgpu_err(g, "%s failed, err=%d", __func__, err);
return err;
}
if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT ||
p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) {
nvgpu_err(g, "gpc_count %d max_tpc_per_gpc %d overflow",
(int)p->gpc_count, (int)p->max_tpc_per_gpc_count);
return -EINVAL;
}
priv->constants = *p;
return 0;
}
int vgpu_finalize_poweron_common(struct gk20a *g)
{
int err;
nvgpu_log_fn(g, " ");
vgpu_detect_chip(g);
err = vgpu_init_hal(g);
if (err != 0) {
return err;
}
err = nvgpu_init_ltc_support(g);
if (err != 0) {
nvgpu_err(g, "failed to init ltc");
return err;
}
err = vgpu_init_mm_support(g);
if (err != 0) {
nvgpu_err(g, "failed to init gk20a mm");
return err;
}
err = nvgpu_fifo_init_support(g);
if (err != 0) {
nvgpu_err(g, "failed to init gk20a fifo");
return err;
}
err = vgpu_init_gr_support(g);
if (err != 0) {
nvgpu_err(g, "failed to init gk20a gr");
return err;
}
err = nvgpu_clk_arb_init_arbiter(g);
if (err != 0) {
nvgpu_err(g, "failed to init clk arb");
return err;
}
err = nvgpu_cbc_init_support(g);
if (err != 0) {
nvgpu_err(g, "failed to init cbc");
return err;
}
g->ops.chip_init_gpu_characteristics(g);
g->ops.fifo.channel_resume(g);
return 0;
}

View File

@@ -857,12 +857,12 @@ static int gk20a_gr_post_bpt_events(struct gk20a *g, struct tsg_gk20a *tsg,
{
if ((global_esr &
gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) != 0U) {
g->ops.fifo.post_event_id(tsg, NVGPU_EVENT_ID_BPT_INT);
g->ops.tsg.post_event_id(tsg, NVGPU_EVENT_ID_BPT_INT);
}
if ((global_esr &
gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) != 0U) {
g->ops.fifo.post_event_id(tsg, NVGPU_EVENT_ID_BPT_PAUSE);
g->ops.tsg.post_event_id(tsg, NVGPU_EVENT_ID_BPT_PAUSE);
}
return 0;

View File

@@ -636,7 +636,6 @@ static const struct gpu_ops gm20b_ops = {
.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
.teardown_mask_intr = gk20a_fifo_teardown_mask_intr,
.teardown_unmask_intr = gk20a_fifo_teardown_unmask_intr,
.post_event_id = gk20a_tsg_event_id_post_event,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
@@ -774,6 +773,7 @@ static const struct gpu_ops gm20b_ops = {
.unbind_channel_check_eng_faulted = NULL,
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
.force_reset = nvgpu_tsg_force_reset_ch,
.post_event_id = nvgpu_tsg_post_event_id,
},
.netlist = {
.get_netlist_name = gm20b_netlist_get_name,

View File

@@ -753,8 +753,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
gr_ctx->cilp_preempt_pending = true;
g->gr.cilp_preempt_pending_chid = fault_ch->chid;
g->ops.fifo.post_event_id(tsg,
NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
g->ops.tsg.post_event_id(tsg, NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
return 0;
}
@@ -992,8 +991,8 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
tsg = &g->fifo.tsg[ch->tsgid];
g->ops.fifo.post_event_id(tsg,
NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
g->ops.tsg.post_event_id(tsg,
NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
gk20a_channel_put(ch);
}

View File

@@ -725,7 +725,6 @@ static const struct gpu_ops gp10b_ops = {
.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
.teardown_mask_intr = gk20a_fifo_teardown_mask_intr,
.teardown_unmask_intr = gk20a_fifo_teardown_unmask_intr,
.post_event_id = gk20a_tsg_event_id_post_event,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
@@ -866,6 +865,7 @@ static const struct gpu_ops gp10b_ops = {
.unbind_channel_check_eng_faulted = NULL,
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
.force_reset = nvgpu_tsg_force_reset_ch,
.post_event_id = nvgpu_tsg_post_event_id,
},
.netlist = {
.get_netlist_name = gp10b_netlist_get_name,

View File

@@ -906,7 +906,6 @@ static const struct gpu_ops gv100_ops = {
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
.teardown_mask_intr = gv100_fifo_teardown_mask_intr,
.teardown_unmask_intr = gv100_fifo_teardown_unmask_intr,
.post_event_id = gk20a_tsg_event_id_post_event,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.ring_channel_doorbell = gv11b_ring_channel_doorbell,
@@ -1054,6 +1053,7 @@ static const struct gpu_ops gv100_ops = {
gv11b_tsg_unbind_channel_check_eng_faulted,
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
.force_reset = nvgpu_tsg_force_reset_ch,
.post_event_id = nvgpu_tsg_post_event_id,
},
.netlist = {
.get_netlist_name = gv100_netlist_get_name,

View File

@@ -862,7 +862,6 @@ static const struct gpu_ops gv11b_ops = {
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
.teardown_mask_intr = gv11b_fifo_teardown_mask_intr,
.teardown_unmask_intr = gv11b_fifo_teardown_unmask_intr,
.post_event_id = gk20a_tsg_event_id_post_event,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.ring_channel_doorbell = gv11b_ring_channel_doorbell,
@@ -1012,6 +1011,7 @@ static const struct gpu_ops gv11b_ops = {
gv11b_tsg_unbind_channel_check_eng_faulted,
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
.force_reset = nvgpu_tsg_force_reset_ch,
.post_event_id = nvgpu_tsg_post_event_id,
},
.netlist = {
.get_netlist_name = gv11b_netlist_get_name,

View File

@@ -26,5 +26,6 @@
struct tsg_gk20a;
void gk20a_tsg_enable(struct tsg_gk20a *tsg);
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg);
#endif /* NVGPU_TSG_GK20A_H */

View File

@@ -971,7 +971,6 @@ struct gpu_ops {
void (*teardown_mask_intr)(struct gk20a *g);
void (*teardown_unmask_intr)(struct gk20a *g);
u32 (*get_preempt_timeout)(struct gk20a *g);
void (*post_event_id)(struct tsg_gk20a *tsg, int event_id);
void (*ring_channel_doorbell)(struct channel_gk20a *c);
u64 (*usermode_base)(struct gk20a *g);
u32 (*doorbell_token)(struct channel_gk20a *c);
@@ -1170,6 +1169,7 @@ struct gpu_ops {
bool *verbose, u32 *ms);
int (*force_reset)(struct channel_gk20a *ch,
u32 err_code, bool verbose);
void (*post_event_id)(struct tsg_gk20a *tsg, int event_id);
} tsg;
struct {
void (*read_engine_status_info) (struct gk20a *g,

View File

@@ -113,7 +113,7 @@ void nvgpu_tsg_set_ctx_mmu_error(struct gk20a *g,
struct tsg_gk20a *tsg);
bool nvgpu_tsg_mark_error(struct gk20a *g, struct tsg_gk20a *tsg);
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
void nvgpu_tsg_post_event_id(struct tsg_gk20a *tsg,
int event_id);
bool nvgpu_tsg_check_ctxsw_timeout(struct tsg_gk20a *tsg,
bool *debug_dump, u32 *ms);

View File

@@ -200,7 +200,7 @@ static u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
}
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
void nvgpu_tsg_post_event_id(struct tsg_gk20a *tsg,
int event_id)
{
struct gk20a_event_id_data *channel_event_id_data;

View File

@@ -22,7 +22,7 @@
#include <nvgpu/tsg.h>
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
void nvgpu_tsg_post_event_id(struct tsg_gk20a *tsg,
int event_id)
{
}

View File

@@ -939,7 +939,6 @@ static const struct gpu_ops tu104_ops = {
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
.teardown_mask_intr = gv11b_fifo_teardown_mask_intr,
.teardown_unmask_intr = gv11b_fifo_teardown_unmask_intr,
.post_event_id = gk20a_tsg_event_id_post_event,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.ring_channel_doorbell = tu104_ring_channel_doorbell,
@@ -1089,6 +1088,7 @@ static const struct gpu_ops tu104_ops = {
gv11b_tsg_unbind_channel_check_eng_faulted,
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
.force_reset = nvgpu_tsg_force_reset_ch,
.post_event_id = nvgpu_tsg_post_event_id,
},
.netlist = {
.get_netlist_name = tu104_netlist_get_name,