mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: vgpu: suspend/resume contexts
Add ability to suspend/resume contexts for a debug session (NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_CONTEXTS), in virtualized case: - added hal function to resume contexts. - added vgpu support for suspend contexts, i.e. build a list of channel ids, and send TEGRA_VGPU_CMD_SUSPEND_CONTEXTS - added vgpu support for resume contexts, i.e. build a list of channel ids, and send TEGRA_VGPU_CMD_RESUME_CONTEXTS Bug 1791111 Change-Id: Icc1c00d94a94dab6384ac263fb811c00fa4b07bf Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: http://git-master/r/1294761 (cherry picked from commit d17a38eda312ffa92ce92e5bafc30727a8b76c4e) Reviewed-on: http://git-master/r/1299059 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Cory Perry <cperry@nvidia.com> Tested-by: Cory Perry <cperry@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
bc47d82229
commit
6c35cebdcb
@@ -777,7 +777,7 @@ nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s,
|
||||
break;
|
||||
|
||||
case NVGPU_DBG_GPU_RESUME_ALL_CONTEXTS:
|
||||
err = gr_gk20a_resume_contexts(g, dbg_s,
|
||||
err = g->ops.gr.resume_contexts(g, dbg_s,
|
||||
&ctx_resident_ch_fd);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -309,6 +309,9 @@ struct gpu_ops {
|
||||
int (*suspend_contexts)(struct gk20a *g,
|
||||
struct dbg_session_gk20a *dbg_s,
|
||||
int *ctx_resident_ch_fd);
|
||||
int (*resume_contexts)(struct gk20a *g,
|
||||
struct dbg_session_gk20a *dbg_s,
|
||||
int *ctx_resident_ch_fd);
|
||||
int (*set_preemption_mode)(struct channel_gk20a *ch,
|
||||
u32 graphics_preempt_mode,
|
||||
u32 compute_preempt_mode);
|
||||
|
||||
@@ -9304,6 +9304,7 @@ void gk20a_init_gr_ops(struct gpu_ops *gops)
|
||||
gops->gr.update_sm_error_state = gk20a_gr_update_sm_error_state;
|
||||
gops->gr.clear_sm_error_state = gk20a_gr_clear_sm_error_state;
|
||||
gops->gr.suspend_contexts = gr_gk20a_suspend_contexts;
|
||||
gops->gr.resume_contexts = gr_gk20a_resume_contexts;
|
||||
gops->gr.get_preemption_mode_flags = gr_gk20a_get_preemption_mode_flags;
|
||||
gops->gr.program_active_tpc_counts = gr_gk20a_program_active_tpc_counts;
|
||||
gops->gr.program_sm_id_numbering = gr_gk20a_program_sm_id_numbering;
|
||||
|
||||
@@ -1589,6 +1589,7 @@ void gm20b_init_gr(struct gpu_ops *gops)
|
||||
gops->gr.update_sm_error_state = gm20b_gr_update_sm_error_state;
|
||||
gops->gr.clear_sm_error_state = gm20b_gr_clear_sm_error_state;
|
||||
gops->gr.suspend_contexts = gr_gk20a_suspend_contexts;
|
||||
gops->gr.resume_contexts = gr_gk20a_resume_contexts;
|
||||
gops->gr.get_preemption_mode_flags = gr_gm20b_get_preemption_mode_flags;
|
||||
gops->gr.fuse_override = gm20b_gr_fuse_override;
|
||||
gops->gr.init_sm_id_table = gr_gk20a_init_sm_id_table;
|
||||
|
||||
@@ -405,6 +405,7 @@ int vgpu_init_fifo_support(struct gk20a *g)
|
||||
static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct channel_gk20a *ch = &f->channel[hw_chid];
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
struct tegra_vgpu_channel_config_params *p =
|
||||
&msg.params.channel_config;
|
||||
@@ -412,9 +413,12 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (!atomic_read(&ch->bound))
|
||||
return 0;
|
||||
|
||||
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT;
|
||||
msg.handle = vgpu_get_handle(g);
|
||||
p->handle = f->channel[hw_chid].virt_ctx;
|
||||
p->handle = ch->virt_ctx;
|
||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||
|
||||
if (err || msg.ret) {
|
||||
|
||||
@@ -1085,6 +1085,83 @@ static int vgpu_gr_clear_sm_error_state(struct gk20a *g,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
|
||||
struct dbg_session_gk20a *dbg_s,
|
||||
int *ctx_resident_ch_fd, u32 cmd)
|
||||
{
|
||||
struct dbg_session_channel_data *ch_data;
|
||||
struct tegra_vgpu_cmd_msg *msg;
|
||||
struct tegra_vgpu_suspend_resume_contexts *p;
|
||||
size_t size_out = offsetof(struct tegra_vgpu_cmd_msg,
|
||||
params.suspend_contexts.chids);
|
||||
size_t size_in;
|
||||
size_t n;
|
||||
int channel_fd = -1;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&g->dbg_sessions_lock);
|
||||
mutex_lock(&dbg_s->ch_list_lock);
|
||||
|
||||
n = 0;
|
||||
list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry)
|
||||
n++;
|
||||
|
||||
size_in = size_out + n * sizeof(u16);
|
||||
|
||||
msg = kmalloc(size_in, GFP_KERNEL);
|
||||
if (!msg)
|
||||
return -ENOMEM;
|
||||
|
||||
msg->cmd = cmd;
|
||||
msg->handle = vgpu_get_handle(g);
|
||||
p = &msg->params.suspend_contexts;
|
||||
p->num_channels = n;
|
||||
n = 0;
|
||||
list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) {
|
||||
p->chids[n++] = (u16)ch_data->chid;
|
||||
}
|
||||
|
||||
err = vgpu_comm_sendrecv(msg, size_in, size_out);
|
||||
if (err || msg->ret) {
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (p->resident_chid != (u16)~0) {
|
||||
list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) {
|
||||
if (ch_data->chid == p->resident_chid) {
|
||||
channel_fd = ch_data->channel_fd;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
mutex_unlock(&dbg_s->ch_list_lock);
|
||||
mutex_unlock(&g->dbg_sessions_lock);
|
||||
|
||||
*ctx_resident_ch_fd = channel_fd;
|
||||
kfree(msg);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vgpu_gr_suspend_contexts(struct gk20a *g,
|
||||
struct dbg_session_gk20a *dbg_s,
|
||||
int *ctx_resident_ch_fd)
|
||||
{
|
||||
return vgpu_gr_suspend_resume_contexts(g, dbg_s,
|
||||
ctx_resident_ch_fd, TEGRA_VGPU_CMD_SUSPEND_CONTEXTS);
|
||||
}
|
||||
|
||||
static int vgpu_gr_resume_contexts(struct gk20a *g,
|
||||
struct dbg_session_gk20a *dbg_s,
|
||||
int *ctx_resident_ch_fd)
|
||||
{
|
||||
return vgpu_gr_suspend_resume_contexts(g, dbg_s,
|
||||
ctx_resident_ch_fd, TEGRA_VGPU_CMD_RESUME_CONTEXTS);
|
||||
}
|
||||
|
||||
void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
|
||||
struct tegra_vgpu_sm_esr_info *info)
|
||||
{
|
||||
@@ -1133,6 +1210,8 @@ void vgpu_init_gr_ops(struct gpu_ops *gops)
|
||||
gops->gr.update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode;
|
||||
gops->gr.update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode;
|
||||
gops->gr.clear_sm_error_state = vgpu_gr_clear_sm_error_state;
|
||||
gops->gr.suspend_contexts = vgpu_gr_suspend_contexts;
|
||||
gops->gr.resume_contexts = vgpu_gr_resume_contexts;
|
||||
gops->gr.dump_gr_regs = NULL;
|
||||
gops->gr.set_boosted_ctx = NULL;
|
||||
gops->gr.update_boosted_ctx = NULL;
|
||||
|
||||
@@ -99,6 +99,8 @@ enum {
|
||||
TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT = 63,
|
||||
TEGRA_VGPU_CMD_TSG_OPEN = 64,
|
||||
TEGRA_VGPU_CMD_GET_GPU_LOAD = 65,
|
||||
TEGRA_VGPU_CMD_SUSPEND_CONTEXTS = 66,
|
||||
TEGRA_VGPU_CMD_RESUME_CONTEXTS = 67,
|
||||
};
|
||||
|
||||
struct tegra_vgpu_connect_params {
|
||||
@@ -454,6 +456,12 @@ struct tegra_vgpu_gpu_load_params {
|
||||
u32 load;
|
||||
};
|
||||
|
||||
struct tegra_vgpu_suspend_resume_contexts {
|
||||
u32 num_channels;
|
||||
u16 resident_chid;
|
||||
u16 chids[];
|
||||
};
|
||||
|
||||
struct tegra_vgpu_cmd_msg {
|
||||
u32 cmd;
|
||||
int ret;
|
||||
@@ -500,6 +508,8 @@ struct tegra_vgpu_cmd_msg {
|
||||
struct tegra_vgpu_constants_params constants;
|
||||
struct tegra_vgpu_channel_cyclestats_snapshot_params cyclestats_snapshot;
|
||||
struct tegra_vgpu_gpu_load_params gpu_load;
|
||||
struct tegra_vgpu_suspend_resume_contexts suspend_contexts;
|
||||
struct tegra_vgpu_suspend_resume_contexts resume_contexts;
|
||||
char padding[192];
|
||||
} params;
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user