gpu: nvgpu: vgpu: remove cmd TSG_BIND_GR_CTX

RM server only needed TSG_BIND_GR_CTX to set vm and tsg for gr_ctx. It
could be done when alloc gr_ctx, so removing TSG_BIND_GR_CTX.

Jira GVSCI-179

Change-Id: Ic7fdcceecd2fa0ea1f29a50b797c8261d6e0720b
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1977576
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Aparna Das <aparnad@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Richard Zhao
2018-12-20 15:50:13 -08:00
committed by mobile promotions
parent d37187f1f8
commit 3b75042842
2 changed files with 0 additions and 30 deletions

View File

@@ -87,7 +87,6 @@ enum {
TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX = 47, TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX = 47,
TEGRA_VGPU_CMD_GR_CTX_ALLOC = 48, TEGRA_VGPU_CMD_GR_CTX_ALLOC = 48,
TEGRA_VGPU_CMD_GR_CTX_FREE = 49, TEGRA_VGPU_CMD_GR_CTX_FREE = 49,
TEGRA_VGPU_CMD_TSG_BIND_GR_CTX = 51,
TEGRA_VGPU_CMD_TSG_BIND_CHANNEL = 52, TEGRA_VGPU_CMD_TSG_BIND_CHANNEL = 52,
TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL = 53, TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL = 53,
TEGRA_VGPU_CMD_TSG_PREEMPT = 54, TEGRA_VGPU_CMD_TSG_PREEMPT = 54,
@@ -403,11 +402,6 @@ struct tegra_vgpu_gr_ctx_params {
u32 tsg_id; u32 tsg_id;
}; };
struct tegra_vgpu_tsg_bind_gr_ctx_params {
u32 tsg_id;
u64 gr_ctx_handle;
};
struct tegra_vgpu_tsg_bind_unbind_channel_params { struct tegra_vgpu_tsg_bind_unbind_channel_params {
u32 tsg_id; u32 tsg_id;
u64 ch_handle; u64 ch_handle;
@@ -655,7 +649,6 @@ struct tegra_vgpu_cmd_msg {
struct tegra_vgpu_channel_set_ctxsw_mode set_ctxsw_mode; struct tegra_vgpu_channel_set_ctxsw_mode set_ctxsw_mode;
struct tegra_vgpu_channel_free_hwpm_ctx free_hwpm_ctx; struct tegra_vgpu_channel_free_hwpm_ctx free_hwpm_ctx;
struct tegra_vgpu_gr_ctx_params gr_ctx; struct tegra_vgpu_gr_ctx_params gr_ctx;
struct tegra_vgpu_tsg_bind_gr_ctx_params tsg_bind_gr_ctx;
struct tegra_vgpu_tsg_bind_unbind_channel_params tsg_bind_unbind_channel; struct tegra_vgpu_tsg_bind_unbind_channel_params tsg_bind_unbind_channel;
struct tegra_vgpu_tsg_open_rel_params tsg_open; struct tegra_vgpu_tsg_open_rel_params tsg_open;
struct tegra_vgpu_tsg_open_rel_params tsg_release; struct tegra_vgpu_tsg_open_rel_params tsg_release;

View File

@@ -450,25 +450,6 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
} }
} }
static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
{
struct nvgpu_gr_ctx *gr_ctx = tsg->gr_ctx;
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_bind_gr_ctx_params *p =
&msg.params.tsg_bind_gr_ctx;
int err;
msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX;
msg.handle = vgpu_get_handle(tsg->g);
p->tsg_id = tsg->tsgid;
p->gr_ctx_handle = gr_ctx->virt_ctx;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
WARN_ON(err);
return err;
}
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
@@ -505,10 +486,6 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
gr_ctx->tsgid = tsg->tsgid; gr_ctx->tsgid = tsg->tsgid;
err = g->ops.gr.alloc_gr_ctx(g, gr_ctx, err = g->ops.gr.alloc_gr_ctx(g, gr_ctx,
c->vm); c->vm);
if (!err) {
gr_ctx->tsgid = tsg->tsgid;
err = vgpu_gr_tsg_bind_gr_ctx(tsg);
}
if (err) { if (err) {
nvgpu_err(g, nvgpu_err(g,
"fail to allocate TSG gr ctx buffer, err=%d", err); "fail to allocate TSG gr ctx buffer, err=%d", err);