mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: update .channel.enable/disable to use runlist_id and chid
Moving to use IDs rather than struct makes it reusable on server side. Jira GVSCI-15770 Change-Id: Ibd94ab8c9f0492bd6d20243525905d637eb8de66 Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2863438 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
d9c8d317f0
commit
c8d6a91de6
@@ -157,6 +157,16 @@ int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add)
|
||||
return c->g->ops.runlist.update(c->g, c->runlist, c, add, true);
|
||||
}
|
||||
|
||||
void nvgpu_channel_enable(struct nvgpu_channel *ch)
|
||||
{
|
||||
ch->g->ops.channel.enable(ch->g, ch->runlist->id, ch->chid);
|
||||
}
|
||||
|
||||
void nvgpu_channel_disable(struct nvgpu_channel *ch)
|
||||
{
|
||||
ch->g->ops.channel.disable(ch->g, ch->runlist->id, ch->chid);
|
||||
}
|
||||
|
||||
int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch)
|
||||
{
|
||||
struct nvgpu_tsg *tsg;
|
||||
|
||||
@@ -193,12 +193,11 @@ int nvgpu_tsg_validate_cilp_config(struct nvgpu_channel *ch)
|
||||
|
||||
void nvgpu_tsg_disable(struct nvgpu_tsg *tsg)
|
||||
{
|
||||
struct gk20a *g = tsg->g;
|
||||
struct nvgpu_channel *ch;
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||
g->ops.channel.disable(ch);
|
||||
nvgpu_channel_disable(ch);
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
}
|
||||
@@ -639,7 +638,7 @@ static int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg,
|
||||
/* another thread could have re-enabled the channel because it was
|
||||
* still on the list at that time, so make sure it's truly disabled
|
||||
*/
|
||||
g->ops.channel.disable(ch);
|
||||
nvgpu_channel_disable(ch);
|
||||
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -110,35 +110,35 @@ void vgpu_channel_free_inst(struct gk20a *g, struct nvgpu_channel *ch)
|
||||
WARN_ON(err || msg.ret);
|
||||
}
|
||||
|
||||
void vgpu_channel_enable(struct nvgpu_channel *ch)
|
||||
void vgpu_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
struct tegra_vgpu_channel_config_params *p =
|
||||
&msg.params.channel_config;
|
||||
struct nvgpu_channel *ch = &g->fifo.channel[chid];
|
||||
int err;
|
||||
struct gk20a *g = ch->g;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE;
|
||||
msg.handle = vgpu_get_handle(ch->g);
|
||||
msg.handle = vgpu_get_handle(g);
|
||||
p->handle = ch->virt_ctx;
|
||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||
WARN_ON(err || msg.ret);
|
||||
}
|
||||
|
||||
void vgpu_channel_disable(struct nvgpu_channel *ch)
|
||||
void vgpu_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
struct tegra_vgpu_channel_config_params *p =
|
||||
&msg.params.channel_config;
|
||||
struct nvgpu_channel *ch = &g->fifo.channel[chid];
|
||||
int err;
|
||||
struct gk20a *g = ch->g;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE;
|
||||
msg.handle = vgpu_get_handle(ch->g);
|
||||
msg.handle = vgpu_get_handle(g);
|
||||
p->handle = ch->virt_ctx;
|
||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||
WARN_ON(err || msg.ret);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -30,8 +30,8 @@ void vgpu_channel_bind(struct nvgpu_channel *ch);
|
||||
void vgpu_channel_unbind(struct nvgpu_channel *ch);
|
||||
int vgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch);
|
||||
void vgpu_channel_free_inst(struct gk20a *g, struct nvgpu_channel *ch);
|
||||
void vgpu_channel_enable(struct nvgpu_channel *ch);
|
||||
void vgpu_channel_disable(struct nvgpu_channel *ch);
|
||||
void vgpu_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
void vgpu_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
u32 vgpu_channel_count(struct gk20a *g);
|
||||
void vgpu_channel_set_ctx_mmu_error(struct gk20a *g, struct nvgpu_channel *ch);
|
||||
void vgpu_channel_set_error_notifier(struct gk20a *g,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -81,12 +81,11 @@ void vgpu_tsg_release(struct nvgpu_tsg *tsg)
|
||||
|
||||
void vgpu_tsg_enable(struct nvgpu_tsg *tsg)
|
||||
{
|
||||
struct gk20a *g = tsg->g;
|
||||
struct nvgpu_channel *ch;
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||
g->ops.channel.enable(ch);
|
||||
nvgpu_channel_enable(ch);
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
}
|
||||
|
||||
@@ -30,8 +30,8 @@ struct nvgpu_channel;
|
||||
struct nvgpu_channel_hw_state;
|
||||
|
||||
u32 ga10b_channel_count(struct gk20a *g);
|
||||
void ga10b_channel_enable(struct nvgpu_channel *ch);
|
||||
void ga10b_channel_disable(struct nvgpu_channel *ch);
|
||||
void ga10b_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
void ga10b_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
void ga10b_channel_bind(struct nvgpu_channel *ch);
|
||||
void ga10b_channel_unbind(struct nvgpu_channel *ch);
|
||||
void ga10b_channel_clear(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
|
||||
@@ -56,26 +56,20 @@ u32 ga10b_channel_count(struct gk20a *g)
|
||||
return num_channels;
|
||||
}
|
||||
|
||||
void ga10b_channel_enable(struct nvgpu_channel *ch)
|
||||
void ga10b_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
struct gk20a *g = ch->g;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
|
||||
runlist = ch->runlist;
|
||||
|
||||
nvgpu_chram_bar0_writel(g, runlist, runlist_chram_channel_r(ch->chid),
|
||||
nvgpu_chram_bar0_writel(g,
|
||||
g->fifo.runlists[runlist_id],
|
||||
runlist_chram_channel_r(chid),
|
||||
runlist_chram_channel_update_f(
|
||||
runlist_chram_channel_update_enable_channel_v()));
|
||||
}
|
||||
|
||||
void ga10b_channel_disable(struct nvgpu_channel *ch)
|
||||
void ga10b_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
struct gk20a *g = ch->g;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
|
||||
runlist = ch->runlist;
|
||||
|
||||
nvgpu_chram_bar0_writel(g, runlist, runlist_chram_channel_r(ch->chid),
|
||||
nvgpu_chram_bar0_writel(g,
|
||||
g->fifo.runlists[runlist_id],
|
||||
runlist_chram_channel_r(chid),
|
||||
runlist_chram_channel_update_f(
|
||||
runlist_chram_channel_update_disable_channel_v()));
|
||||
}
|
||||
@@ -95,7 +89,7 @@ void ga10b_channel_bind(struct nvgpu_channel *ch)
|
||||
}
|
||||
|
||||
/* Enable channel */
|
||||
g->ops.channel.enable(ch);
|
||||
nvgpu_channel_enable(ch);
|
||||
|
||||
nvgpu_atomic_set(&ch->bound, CHANNEL_BOUND);
|
||||
}
|
||||
|
||||
@@ -29,8 +29,8 @@ struct nvgpu_channel_hw_state;
|
||||
struct nvgpu_debug_context;
|
||||
struct nvgpu_channel_dump_info;
|
||||
|
||||
void gk20a_channel_enable(struct nvgpu_channel *ch);
|
||||
void gk20a_channel_disable(struct nvgpu_channel *ch);
|
||||
void gk20a_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
void gk20a_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
void gk20a_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
|
||||
struct nvgpu_channel_hw_state *state);
|
||||
|
||||
|
||||
@@ -35,18 +35,20 @@
|
||||
|
||||
#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
|
||||
|
||||
void gk20a_channel_enable(struct nvgpu_channel *ch)
|
||||
void gk20a_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
nvgpu_writel(ch->g, ccsr_channel_r(ch->chid),
|
||||
gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) |
|
||||
(void)runlist_id;
|
||||
nvgpu_writel(g, ccsr_channel_r(chid),
|
||||
gk20a_readl(g, ccsr_channel_r(chid)) |
|
||||
ccsr_channel_enable_set_true_f());
|
||||
}
|
||||
|
||||
void gk20a_channel_disable(struct nvgpu_channel *ch)
|
||||
void gk20a_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
nvgpu_writel(ch->g, ccsr_channel_r(ch->chid),
|
||||
gk20a_readl(ch->g,
|
||||
ccsr_channel_r(ch->chid)) |
|
||||
(void)runlist_id;
|
||||
nvgpu_writel(g, ccsr_channel_r(chid),
|
||||
gk20a_readl(g,
|
||||
ccsr_channel_r(chid)) |
|
||||
ccsr_channel_enable_clr_true_f());
|
||||
}
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
|
||||
&hw_state);
|
||||
|
||||
if (hw_state.next || hw_state.ctx_reload) {
|
||||
g->ops.channel.enable(ch);
|
||||
nvgpu_channel_enable(ch);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
|
||||
continue;
|
||||
}
|
||||
|
||||
g->ops.channel.enable(ch);
|
||||
nvgpu_channel_enable(ch);
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -46,7 +46,7 @@ void gv11b_tsg_enable(struct nvgpu_tsg *tsg)
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||
g->ops.channel.enable(ch);
|
||||
nvgpu_channel_enable(ch);
|
||||
last_ch = ch;
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
|
||||
@@ -1230,4 +1230,17 @@ static inline void nvgpu_channel_set_wdt_debug_dump(struct nvgpu_channel *ch,
|
||||
*/
|
||||
u32 nvgpu_channel_get_max_subctx_count(struct nvgpu_channel *ch);
|
||||
|
||||
/**
|
||||
* @brief Enable the channel.
|
||||
*
|
||||
* @param ch [in] The channel to enable.
|
||||
*/
|
||||
void nvgpu_channel_enable(struct nvgpu_channel *ch);
|
||||
|
||||
/**
|
||||
* @brief Disable the channel.
|
||||
*
|
||||
* @param ch [in] The channel to disable.
|
||||
*/
|
||||
void nvgpu_channel_disable(struct nvgpu_channel *ch);
|
||||
#endif
|
||||
|
||||
@@ -43,24 +43,28 @@ struct gops_channel {
|
||||
/**
|
||||
* @brief Enable channel for h/w scheduling.
|
||||
*
|
||||
* @param ch [in] Channel pointer.
|
||||
* @param g [in] The GPU driver struct.
|
||||
* @param runlist_id [in] ID of the runlist the channel belongs to.
|
||||
* @param chid [in] The channel to enable.
|
||||
*
|
||||
* The HAL writes CCSR register to enable channel for h/w scheduling.
|
||||
* Once enabled, the channel can be scheduled to run when this
|
||||
* channel is next on the runlist.
|
||||
*/
|
||||
void (*enable)(struct nvgpu_channel *ch);
|
||||
void (*enable)(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
|
||||
/**
|
||||
* @brief Disable channel from h/w scheduling.
|
||||
*
|
||||
* @param ch [in] Channel pointer.
|
||||
* @param g [in] The GPU driver struct.
|
||||
* @param runlist_id [in] ID of the runlist the channel belongs to.
|
||||
* @param chid [in] The channel to disable.
|
||||
*
|
||||
* The HAL writes CCSR register to disable channel from h/w scheduling.
|
||||
* Once disabled, the channel is not scheduled to run even if it
|
||||
* is next on the runlist.
|
||||
*/
|
||||
void (*disable)(struct nvgpu_channel *ch);
|
||||
void (*disable)(struct gk20a *g, u32 runlist_id, u32 chid);
|
||||
|
||||
/**
|
||||
* @brief Get number of channels.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Graphics channel
|
||||
*
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -1484,7 +1484,7 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
break;
|
||||
}
|
||||
if (ch->g->ops.channel.enable)
|
||||
ch->g->ops.channel.enable(ch);
|
||||
nvgpu_channel_enable(ch);
|
||||
else
|
||||
err = -ENOSYS;
|
||||
gk20a_idle(ch->g);
|
||||
@@ -1498,7 +1498,7 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
break;
|
||||
}
|
||||
if (ch->g->ops.channel.disable)
|
||||
ch->g->ops.channel.disable(ch);
|
||||
nvgpu_channel_disable(ch);
|
||||
else
|
||||
err = -ENOSYS;
|
||||
gk20a_idle(ch->g);
|
||||
|
||||
@@ -68,7 +68,7 @@ int test_gk20a_channel_enable(struct unit_module *m,
|
||||
privileged, getpid(), getpid());
|
||||
unit_assert(ch, goto done);
|
||||
|
||||
gk20a_channel_enable(ch);
|
||||
gk20a_channel_enable(g, ch->runlist->id, ch->chid);
|
||||
unit_assert((nvgpu_readl(ch->g, ccsr_channel_r(ch->chid))
|
||||
& ccsr_channel_enable_set_true_f()) != 0, goto done);
|
||||
|
||||
@@ -93,7 +93,7 @@ int test_gk20a_channel_disable(struct unit_module *m,
|
||||
privileged, getpid(), getpid());
|
||||
unit_assert(ch, goto done);
|
||||
|
||||
gk20a_channel_disable(ch);
|
||||
gk20a_channel_disable(g, ch->runlist->id, ch->chid);
|
||||
unit_assert((nvgpu_readl(ch->g, ccsr_channel_r(ch->chid))
|
||||
& ccsr_channel_enable_clr_true_f()) != 0, goto done);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -96,9 +96,9 @@ static void subtest_setup(u32 branches)
|
||||
#define branches_str test_fifo_flags_str
|
||||
#define pruned test_fifo_subtest_pruned
|
||||
|
||||
static void stub_channel_enable(struct nvgpu_channel *ch)
|
||||
static void stub_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
stub[0].chid = ch->chid;
|
||||
stub[0].chid = chid;
|
||||
stub[0].count++;
|
||||
}
|
||||
|
||||
|
||||
@@ -1090,10 +1090,10 @@ static const char *f_tsg_enable[] = {
|
||||
"stub"
|
||||
};
|
||||
|
||||
static void stub_channel_enable(struct nvgpu_channel *ch)
|
||||
static void stub_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
stub[0].name = __func__;
|
||||
stub[0].chid = ch->chid;
|
||||
stub[0].chid = chid;
|
||||
stub[0].count++;
|
||||
}
|
||||
|
||||
@@ -1104,10 +1104,10 @@ static void stub_usermode_ring_doorbell(struct nvgpu_channel *ch)
|
||||
stub[1].count++;
|
||||
}
|
||||
|
||||
static void stub_channel_disable(struct nvgpu_channel *ch)
|
||||
static void stub_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid)
|
||||
{
|
||||
stub[2].name = __func__;
|
||||
stub[2].chid = ch->chid;
|
||||
stub[2].chid = chid;
|
||||
stub[2].count++;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user