diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c index 252ffe480..80a1b2eab 100644 --- a/drivers/gpu/nvgpu/common/fifo/channel.c +++ b/drivers/gpu/nvgpu/common/fifo/channel.c @@ -157,6 +157,16 @@ int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add) return c->g->ops.runlist.update(c->g, c->runlist, c, add, true); } +void nvgpu_channel_enable(struct nvgpu_channel *ch) +{ + ch->g->ops.channel.enable(ch->g, ch->runlist->id, ch->chid); +} + +void nvgpu_channel_disable(struct nvgpu_channel *ch) +{ + ch->g->ops.channel.disable(ch->g, ch->runlist->id, ch->chid); +} + int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch) { struct nvgpu_tsg *tsg; diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index 1eb24eeed..c5c2e50e8 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -193,12 +193,11 @@ int nvgpu_tsg_validate_cilp_config(struct nvgpu_channel *ch) void nvgpu_tsg_disable(struct nvgpu_tsg *tsg) { - struct gk20a *g = tsg->g; struct nvgpu_channel *ch; nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { - g->ops.channel.disable(ch); + nvgpu_channel_disable(ch); } nvgpu_rwsem_up_read(&tsg->ch_list_lock); } @@ -639,7 +638,7 @@ static int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg, /* another thread could have re-enabled the channel because it was * still on the list at that time, so make sure it's truly disabled */ - g->ops.channel.disable(ch); + nvgpu_channel_disable(ch); nvgpu_rwsem_up_write(&tsg->ch_list_lock); /* diff --git a/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c index 8eff9a145..580fd2b66 100644 --- a/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -110,35 +110,35 @@ void vgpu_channel_free_inst(struct gk20a *g, struct nvgpu_channel *ch) WARN_ON(err || msg.ret); } -void vgpu_channel_enable(struct nvgpu_channel *ch) +void vgpu_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid) { struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; + struct nvgpu_channel *ch = &g->fifo.channel[chid]; int err; - struct gk20a *g = ch->g; nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; - msg.handle = vgpu_get_handle(ch->g); + msg.handle = vgpu_get_handle(g); p->handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); } -void vgpu_channel_disable(struct nvgpu_channel *ch) +void vgpu_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid) { struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; + struct nvgpu_channel *ch = &g->fifo.channel[chid]; int err; - struct gk20a *g = ch->g; nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; - msg.handle = vgpu_get_handle(ch->g); + msg.handle = vgpu_get_handle(g); p->handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); diff --git a/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.h b/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.h index 7b3f33518..614a4f2b8 100644 --- a/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.h +++ b/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,8 +30,8 @@ void vgpu_channel_bind(struct nvgpu_channel *ch); void vgpu_channel_unbind(struct nvgpu_channel *ch); int vgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch); void vgpu_channel_free_inst(struct gk20a *g, struct nvgpu_channel *ch); -void vgpu_channel_enable(struct nvgpu_channel *ch); -void vgpu_channel_disable(struct nvgpu_channel *ch); +void vgpu_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid); +void vgpu_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid); u32 vgpu_channel_count(struct gk20a *g); void vgpu_channel_set_ctx_mmu_error(struct gk20a *g, struct nvgpu_channel *ch); void vgpu_channel_set_error_notifier(struct gk20a *g, diff --git a/drivers/gpu/nvgpu/common/vgpu/fifo/tsg_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/fifo/tsg_vgpu.c index 5f3d37d4b..8378b27c0 100644 --- a/drivers/gpu/nvgpu/common/vgpu/fifo/tsg_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/fifo/tsg_vgpu.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -81,12 +81,11 @@ void vgpu_tsg_release(struct nvgpu_tsg *tsg) void vgpu_tsg_enable(struct nvgpu_tsg *tsg) { - struct gk20a *g = tsg->g; struct nvgpu_channel *ch; nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { - g->ops.channel.enable(ch); + nvgpu_channel_enable(ch); } nvgpu_rwsem_up_read(&tsg->ch_list_lock); } diff --git a/drivers/gpu/nvgpu/hal/fifo/channel_ga10b.h b/drivers/gpu/nvgpu/hal/fifo/channel_ga10b.h index bc17fbefd..1283c12ae 100644 --- a/drivers/gpu/nvgpu/hal/fifo/channel_ga10b.h +++ b/drivers/gpu/nvgpu/hal/fifo/channel_ga10b.h @@ -30,8 +30,8 @@ struct nvgpu_channel; struct nvgpu_channel_hw_state; u32 ga10b_channel_count(struct gk20a *g); -void ga10b_channel_enable(struct nvgpu_channel *ch); -void ga10b_channel_disable(struct nvgpu_channel *ch); +void ga10b_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid); +void ga10b_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid); void ga10b_channel_bind(struct nvgpu_channel *ch); void ga10b_channel_unbind(struct nvgpu_channel *ch); void ga10b_channel_clear(struct gk20a *g, u32 runlist_id, u32 chid); diff --git a/drivers/gpu/nvgpu/hal/fifo/channel_ga10b_fusa.c b/drivers/gpu/nvgpu/hal/fifo/channel_ga10b_fusa.c index 735c96b53..4478ddda3 100644 --- a/drivers/gpu/nvgpu/hal/fifo/channel_ga10b_fusa.c +++ b/drivers/gpu/nvgpu/hal/fifo/channel_ga10b_fusa.c @@ -56,26 +56,20 @@ u32 ga10b_channel_count(struct gk20a *g) return num_channels; } -void ga10b_channel_enable(struct nvgpu_channel *ch) +void ga10b_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid) { - struct gk20a *g = ch->g; - struct nvgpu_runlist *runlist = NULL; - - runlist = ch->runlist; - - nvgpu_chram_bar0_writel(g, runlist, runlist_chram_channel_r(ch->chid), + nvgpu_chram_bar0_writel(g, + g->fifo.runlists[runlist_id], + runlist_chram_channel_r(chid), runlist_chram_channel_update_f( runlist_chram_channel_update_enable_channel_v())); } -void ga10b_channel_disable(struct nvgpu_channel *ch) +void ga10b_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid) { - struct gk20a *g = ch->g; - struct nvgpu_runlist *runlist = NULL; - - runlist = ch->runlist; - - nvgpu_chram_bar0_writel(g, runlist, runlist_chram_channel_r(ch->chid), + nvgpu_chram_bar0_writel(g, + g->fifo.runlists[runlist_id], + runlist_chram_channel_r(chid), runlist_chram_channel_update_f( runlist_chram_channel_update_disable_channel_v())); } @@ -95,7 +89,7 @@ void ga10b_channel_bind(struct nvgpu_channel *ch) } /* Enable channel */ - g->ops.channel.enable(ch); + nvgpu_channel_enable(ch); nvgpu_atomic_set(&ch->bound, CHANNEL_BOUND); } diff --git a/drivers/gpu/nvgpu/hal/fifo/channel_gk20a.h b/drivers/gpu/nvgpu/hal/fifo/channel_gk20a.h index babf77b89..96621a746 100644 --- a/drivers/gpu/nvgpu/hal/fifo/channel_gk20a.h +++ b/drivers/gpu/nvgpu/hal/fifo/channel_gk20a.h @@ -29,8 +29,8 @@ struct nvgpu_channel_hw_state; struct nvgpu_debug_context; struct nvgpu_channel_dump_info; -void gk20a_channel_enable(struct nvgpu_channel *ch); -void gk20a_channel_disable(struct nvgpu_channel *ch); +void gk20a_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid); +void gk20a_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid); void gk20a_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid, struct nvgpu_channel_hw_state *state); diff --git a/drivers/gpu/nvgpu/hal/fifo/channel_gk20a_fusa.c b/drivers/gpu/nvgpu/hal/fifo/channel_gk20a_fusa.c index 6b60f76d1..3bd679d45 100644 --- a/drivers/gpu/nvgpu/hal/fifo/channel_gk20a_fusa.c +++ b/drivers/gpu/nvgpu/hal/fifo/channel_gk20a_fusa.c @@ -35,18 +35,20 @@ #include -void gk20a_channel_enable(struct nvgpu_channel *ch) +void gk20a_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid) { - nvgpu_writel(ch->g, ccsr_channel_r(ch->chid), - gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) | + (void)runlist_id; + nvgpu_writel(g, ccsr_channel_r(chid), + gk20a_readl(g, ccsr_channel_r(chid)) | ccsr_channel_enable_set_true_f()); } -void gk20a_channel_disable(struct nvgpu_channel *ch) +void gk20a_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid) { - nvgpu_writel(ch->g, ccsr_channel_r(ch->chid), - gk20a_readl(ch->g, - ccsr_channel_r(ch->chid)) | + (void)runlist_id; + nvgpu_writel(g, ccsr_channel_r(chid), + gk20a_readl(g, + ccsr_channel_r(chid)) | ccsr_channel_enable_clr_true_f()); } diff --git a/drivers/gpu/nvgpu/hal/fifo/tsg_gk20a.c b/drivers/gpu/nvgpu/hal/fifo/tsg_gk20a.c index 84c2df17c..9405a7177 100644 --- a/drivers/gpu/nvgpu/hal/fifo/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/hal/fifo/tsg_gk20a.c @@ -57,7 +57,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg) &hw_state); if (hw_state.next || hw_state.ctx_reload) { - g->ops.channel.enable(ch); + nvgpu_channel_enable(ch); } } @@ -71,7 +71,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg) continue; } - g->ops.channel.enable(ch); + nvgpu_channel_enable(ch); } nvgpu_rwsem_up_read(&tsg->ch_list_lock); diff --git a/drivers/gpu/nvgpu/hal/fifo/tsg_gv11b_fusa.c b/drivers/gpu/nvgpu/hal/fifo/tsg_gv11b_fusa.c index cbd77728d..246ec5bb0 100644 --- a/drivers/gpu/nvgpu/hal/fifo/tsg_gv11b_fusa.c +++ b/drivers/gpu/nvgpu/hal/fifo/tsg_gv11b_fusa.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -46,7 +46,7 @@ void gv11b_tsg_enable(struct nvgpu_tsg *tsg) nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { - g->ops.channel.enable(ch); + nvgpu_channel_enable(ch); last_ch = ch; } nvgpu_rwsem_up_read(&tsg->ch_list_lock); diff --git a/drivers/gpu/nvgpu/include/nvgpu/channel.h b/drivers/gpu/nvgpu/include/nvgpu/channel.h index 2880aaf07..f3512f354 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/channel.h +++ b/drivers/gpu/nvgpu/include/nvgpu/channel.h @@ -1230,4 +1230,17 @@ static inline void nvgpu_channel_set_wdt_debug_dump(struct nvgpu_channel *ch, */ u32 nvgpu_channel_get_max_subctx_count(struct nvgpu_channel *ch); +/** + * @brief Enable the channel. + * + * @param ch [in] The channel to enable. + */ +void nvgpu_channel_enable(struct nvgpu_channel *ch); + +/** + * @brief Disable the channel. + * + * @param ch [in] The channel to disable. + */ +void nvgpu_channel_disable(struct nvgpu_channel *ch); #endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/gops/channel.h b/drivers/gpu/nvgpu/include/nvgpu/gops/channel.h index ce80214a7..cdd34fb49 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gops/channel.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gops/channel.h @@ -43,24 +43,28 @@ struct gops_channel { /** * @brief Enable channel for h/w scheduling. * - * @param ch [in] Channel pointer. + * @param g [in] The GPU driver struct. + * @param runlist_id [in] ID of the runlist the channel belongs to. + * @param chid [in] The channel to enable. * * The HAL writes CCSR register to enable channel for h/w scheduling. * Once enabled, the channel can be scheduled to run when this * channel is next on the runlist. */ - void (*enable)(struct nvgpu_channel *ch); + void (*enable)(struct gk20a *g, u32 runlist_id, u32 chid); /** * @brief Disable channel from h/w scheduling. * - * @param ch [in] Channel pointer. + * @param g [in] The GPU driver struct. + * @param runlist_id [in] ID of the runlist the channel belongs to. + * @param chid [in] The channel to disable. * * The HAL writes CCSR register to disable channel from h/w scheduling. * Once disabled, the channel is not scheduled to run even if it * is next on the runlist. */ - void (*disable)(struct nvgpu_channel *ch); + void (*disable)(struct gk20a *g, u32 runlist_id, u32 chid); /** * @brief Get number of channels. diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c index 06d7a6767..d45248c29 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c @@ -1,7 +1,7 @@ /* * GK20A Graphics channel * - * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1484,7 +1484,7 @@ long gk20a_channel_ioctl(struct file *filp, break; } if (ch->g->ops.channel.enable) - ch->g->ops.channel.enable(ch); + nvgpu_channel_enable(ch); else err = -ENOSYS; gk20a_idle(ch->g); @@ -1498,7 +1498,7 @@ long gk20a_channel_ioctl(struct file *filp, break; } if (ch->g->ops.channel.disable) - ch->g->ops.channel.disable(ch); + nvgpu_channel_disable(ch); else err = -ENOSYS; gk20a_idle(ch->g); diff --git a/userspace/units/fifo/channel/gk20a/nvgpu-channel-gk20a.c b/userspace/units/fifo/channel/gk20a/nvgpu-channel-gk20a.c index 41b8b9f30..a1318602e 100644 --- a/userspace/units/fifo/channel/gk20a/nvgpu-channel-gk20a.c +++ b/userspace/units/fifo/channel/gk20a/nvgpu-channel-gk20a.c @@ -68,7 +68,7 @@ int test_gk20a_channel_enable(struct unit_module *m, privileged, getpid(), getpid()); unit_assert(ch, goto done); - gk20a_channel_enable(ch); + gk20a_channel_enable(g, ch->runlist->id, ch->chid); unit_assert((nvgpu_readl(ch->g, ccsr_channel_r(ch->chid)) & ccsr_channel_enable_set_true_f()) != 0, goto done); @@ -93,7 +93,7 @@ int test_gk20a_channel_disable(struct unit_module *m, privileged, getpid(), getpid()); unit_assert(ch, goto done); - gk20a_channel_disable(ch); + gk20a_channel_disable(g, ch->runlist->id, ch->chid); unit_assert((nvgpu_readl(ch->g, ccsr_channel_r(ch->chid)) & ccsr_channel_enable_clr_true_f()) != 0, goto done); diff --git a/userspace/units/fifo/tsg/gv11b/nvgpu-tsg-gv11b.c b/userspace/units/fifo/tsg/gv11b/nvgpu-tsg-gv11b.c index f8beba0d0..f07a3ec10 100644 --- a/userspace/units/fifo/tsg/gv11b/nvgpu-tsg-gv11b.c +++ b/userspace/units/fifo/tsg/gv11b/nvgpu-tsg-gv11b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -96,9 +96,9 @@ static void subtest_setup(u32 branches) #define branches_str test_fifo_flags_str #define pruned test_fifo_subtest_pruned -static void stub_channel_enable(struct nvgpu_channel *ch) +static void stub_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid) { - stub[0].chid = ch->chid; + stub[0].chid = chid; stub[0].count++; } diff --git a/userspace/units/fifo/tsg/nvgpu-tsg.c b/userspace/units/fifo/tsg/nvgpu-tsg.c index fb76fd697..633689b6f 100644 --- a/userspace/units/fifo/tsg/nvgpu-tsg.c +++ b/userspace/units/fifo/tsg/nvgpu-tsg.c @@ -1090,10 +1090,10 @@ static const char *f_tsg_enable[] = { "stub" }; -static void stub_channel_enable(struct nvgpu_channel *ch) +static void stub_channel_enable(struct gk20a *g, u32 runlist_id, u32 chid) { stub[0].name = __func__; - stub[0].chid = ch->chid; + stub[0].chid = chid; stub[0].count++; } @@ -1104,10 +1104,10 @@ static void stub_usermode_ring_doorbell(struct nvgpu_channel *ch) stub[1].count++; } -static void stub_channel_disable(struct nvgpu_channel *ch) +static void stub_channel_disable(struct gk20a *g, u32 runlist_id, u32 chid) { stub[2].name = __func__; - stub[2].chid = ch->chid; + stub[2].chid = chid; stub[2].count++; }