gpu: nvgpu: vgpu: use ivm to send auxiliary data

RM server retrieves auxiliary data only from IVM.
Modify IVC commands to send auxiliary data to RM
server using IVM and not as a part command message.

VFND-4166

Change-Id: I9bfe33cf9301f7c70709318b810c622ec57b1cdf
Signed-off-by: Aparna Das <aparnad@nvidia.com>
Reviewed-on: http://git-master/r/1484130
Reviewed-by: svcboomerang <svcboomerang@nvidia.com>
Tested-by: svcboomerang <svcboomerang@nvidia.com>
This commit is contained in:
Aparna Das
2017-05-17 10:57:59 -07:00
committed by svcboomerang
parent abba815d83
commit 85ff2a31eb
3 changed files with 53 additions and 39 deletions

View File

@@ -461,28 +461,38 @@ static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
static int vgpu_submit_runlist(struct gk20a *g, u64 handle, u8 runlist_id, static int vgpu_submit_runlist(struct gk20a *g, u64 handle, u8 runlist_id,
u16 *runlist, u32 num_entries) u16 *runlist, u32 num_entries)
{ {
struct tegra_vgpu_cmd_msg *msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_runlist_params *p; struct tegra_vgpu_runlist_params *p;
size_t size = sizeof(*msg) + sizeof(*runlist) * num_entries;
char *ptr;
int err; int err;
void *oob_handle;
void *oob;
size_t size, oob_size;
msg = nvgpu_kmalloc(g, size); oob_handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
if (!msg) tegra_gr_comm_get_server_vmid(), TEGRA_VGPU_QUEUE_CMD,
return -1; &oob, &oob_size);
if (!oob_handle)
return -EINVAL;
msg->cmd = TEGRA_VGPU_CMD_SUBMIT_RUNLIST; size = sizeof(*runlist) * num_entries;
msg->handle = handle; if (oob_size < size) {
p = &msg->params.runlist; err = -ENOMEM;
goto done;
}
msg.cmd = TEGRA_VGPU_CMD_SUBMIT_RUNLIST;
msg.handle = handle;
p = &msg.params.runlist;
p->runlist_id = runlist_id; p->runlist_id = runlist_id;
p->num_entries = num_entries; p->num_entries = num_entries;
ptr = (char *)msg + sizeof(*msg); memcpy(oob, runlist, size);
memcpy(ptr, runlist, sizeof(*runlist) * num_entries); err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = vgpu_comm_sendrecv(msg, size, sizeof(*msg));
err = (err || msg->ret) ? -1 : 0; err = (err || msg.ret) ? -1 : 0;
nvgpu_kfree(g, msg);
done:
tegra_gr_comm_oob_put_ptr(oob_handle);
return err; return err;
} }

View File

@@ -1100,41 +1100,47 @@ static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
int *ctx_resident_ch_fd, u32 cmd) int *ctx_resident_ch_fd, u32 cmd)
{ {
struct dbg_session_channel_data *ch_data; struct dbg_session_channel_data *ch_data;
struct tegra_vgpu_cmd_msg *msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_suspend_resume_contexts *p; struct tegra_vgpu_suspend_resume_contexts *p;
size_t size_out = offsetof(struct tegra_vgpu_cmd_msg,
params.suspend_contexts.chids);
size_t size_in;
size_t n; size_t n;
int channel_fd = -1; int channel_fd = -1;
int err = 0; int err = 0;
void *handle = NULL;
u16 *oob;
size_t oob_size;
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
nvgpu_mutex_acquire(&dbg_s->ch_list_lock); nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
tegra_gr_comm_get_server_vmid(), TEGRA_VGPU_QUEUE_CMD,
(void **)&oob, &oob_size);
if (!handle) {
err = -EINVAL;
goto done;
}
n = 0; n = 0;
list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry)
n++; n++;
size_in = size_out + n * sizeof(u16); if (oob_size < n * sizeof(u16)) {
err = -ENOMEM;
msg = nvgpu_kmalloc(g, size_in); goto done;
if (!msg)
return -ENOMEM;
msg->cmd = cmd;
msg->handle = vgpu_get_handle(g);
p = &msg->params.suspend_contexts;
p->num_channels = n;
n = 0;
list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) {
p->chids[n++] = (u16)ch_data->chid;
} }
err = vgpu_comm_sendrecv(msg, size_in, size_out); msg.cmd = cmd;
if (err || msg->ret) { msg.handle = vgpu_get_handle(g);
p = &msg.params.suspend_contexts;
p->num_channels = n;
n = 0;
list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry)
oob[n++] = (u16)ch_data->chid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) {
err = -ENOMEM; err = -ENOMEM;
goto fail; goto done;
} }
if (p->resident_chid != (u16)~0) { if (p->resident_chid != (u16)~0) {
@@ -1146,13 +1152,12 @@ static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
} }
} }
fail: done:
if (handle)
tegra_gr_comm_oob_put_ptr(handle);
nvgpu_mutex_release(&dbg_s->ch_list_lock); nvgpu_mutex_release(&dbg_s->ch_list_lock);
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
*ctx_resident_ch_fd = channel_fd; *ctx_resident_ch_fd = channel_fd;
nvgpu_kfree(g, msg);
return err; return err;
} }

View File

@@ -475,7 +475,6 @@ struct tegra_vgpu_gpu_load_params {
struct tegra_vgpu_suspend_resume_contexts { struct tegra_vgpu_suspend_resume_contexts {
u32 num_channels; u32 num_channels;
u16 resident_chid; u16 resident_chid;
u16 chids[];
}; };
struct tegra_vgpu_clear_sm_error_state { struct tegra_vgpu_clear_sm_error_state {