mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: vgpu: remove virt_ctx from tegra_gr_comm
queue index can already index the queues. It also help make the api more common. Jira EVLR-2364 Change-Id: I98a5014ba0510a2687fdf096a160c497bd1f6985 Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1646197 Reviewed-by: Damian Halas <dhalas@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Aingara Paramakuru <aparamakuru@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: Nirav Patel <nipatel@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1a7484c901
commit
1a9d4c1cfa
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -42,8 +42,7 @@ int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
|
||||
gk20a_dbg_fn("");
|
||||
BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op));
|
||||
|
||||
handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
|
||||
tegra_gr_comm_get_server_vmid(),
|
||||
handle = tegra_gr_comm_oob_get_ptr(tegra_gr_comm_get_server_vmid(),
|
||||
TEGRA_VGPU_QUEUE_CMD,
|
||||
&oob, &oob_size);
|
||||
if (!handle)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Virtualized GPU Fifo
|
||||
*
|
||||
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -499,8 +499,8 @@ static int vgpu_submit_runlist(struct gk20a *g, u64 handle, u8 runlist_id,
|
||||
void *oob;
|
||||
size_t size, oob_size;
|
||||
|
||||
oob_handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
|
||||
tegra_gr_comm_get_server_vmid(), TEGRA_VGPU_QUEUE_CMD,
|
||||
oob_handle = tegra_gr_comm_oob_get_ptr(tegra_gr_comm_get_server_vmid(),
|
||||
TEGRA_VGPU_QUEUE_CMD,
|
||||
&oob, &oob_size);
|
||||
if (!oob_handle)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -95,8 +95,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
}
|
||||
}
|
||||
|
||||
handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
|
||||
tegra_gr_comm_get_server_vmid(),
|
||||
handle = tegra_gr_comm_oob_get_ptr(tegra_gr_comm_get_server_vmid(),
|
||||
TEGRA_VGPU_QUEUE_CMD,
|
||||
(void **)&mem_desc, &oob_size);
|
||||
if (!handle) {
|
||||
|
||||
@@ -1121,8 +1121,8 @@ static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
|
||||
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
|
||||
nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
|
||||
|
||||
handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
|
||||
tegra_gr_comm_get_server_vmid(), TEGRA_VGPU_QUEUE_CMD,
|
||||
handle = tegra_gr_comm_oob_get_ptr(tegra_gr_comm_get_server_vmid(),
|
||||
TEGRA_VGPU_QUEUE_CMD,
|
||||
(void **)&oob, &oob_size);
|
||||
if (!handle) {
|
||||
err = -EINVAL;
|
||||
@@ -1238,8 +1238,7 @@ int vgpu_gr_init_sm_id_table(struct gk20a *g)
|
||||
return err;
|
||||
}
|
||||
|
||||
handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
|
||||
tegra_gr_comm_get_server_vmid(),
|
||||
handle = tegra_gr_comm_oob_get_ptr(tegra_gr_comm_get_server_vmid(),
|
||||
TEGRA_VGPU_QUEUE_CMD,
|
||||
(void **)&entry, &oob_size);
|
||||
if (!handle)
|
||||
|
||||
@@ -53,8 +53,7 @@ static inline int vgpu_comm_init(struct platform_device *pdev)
|
||||
{
|
||||
size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
|
||||
|
||||
return tegra_gr_comm_init(pdev, TEGRA_GR_COMM_CTX_CLIENT, 3,
|
||||
queue_sizes, TEGRA_VGPU_QUEUE_CMD,
|
||||
return tegra_gr_comm_init(pdev, 3, queue_sizes, TEGRA_VGPU_QUEUE_CMD,
|
||||
ARRAY_SIZE(queue_sizes));
|
||||
}
|
||||
|
||||
@@ -62,8 +61,7 @@ static inline void vgpu_comm_deinit(void)
|
||||
{
|
||||
size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
|
||||
|
||||
tegra_gr_comm_deinit(TEGRA_GR_COMM_CTX_CLIENT, TEGRA_VGPU_QUEUE_CMD,
|
||||
ARRAY_SIZE(queue_sizes));
|
||||
tegra_gr_comm_deinit(TEGRA_VGPU_QUEUE_CMD, ARRAY_SIZE(queue_sizes));
|
||||
}
|
||||
|
||||
int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
|
||||
@@ -74,8 +72,7 @@ int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
|
||||
void *data = msg;
|
||||
int err;
|
||||
|
||||
err = tegra_gr_comm_sendrecv(TEGRA_GR_COMM_CTX_CLIENT,
|
||||
tegra_gr_comm_get_server_vmid(),
|
||||
err = tegra_gr_comm_sendrecv(tegra_gr_comm_get_server_vmid(),
|
||||
TEGRA_VGPU_QUEUE_CMD, &handle, &data, &size);
|
||||
if (!err) {
|
||||
WARN_ON(size < size_out);
|
||||
@@ -152,8 +149,7 @@ static int vgpu_intr_thread(void *dev_id)
|
||||
size_t size;
|
||||
int err;
|
||||
|
||||
err = tegra_gr_comm_recv(TEGRA_GR_COMM_CTX_CLIENT,
|
||||
TEGRA_VGPU_QUEUE_INTR, &handle,
|
||||
err = tegra_gr_comm_recv(TEGRA_VGPU_QUEUE_INTR, &handle,
|
||||
(void **)&msg, &size, &sender);
|
||||
if (err == -ETIME)
|
||||
continue;
|
||||
@@ -229,8 +225,7 @@ static void vgpu_remove_support(struct gk20a *g)
|
||||
g->mm.remove_support(&g->mm);
|
||||
|
||||
msg.event = TEGRA_VGPU_EVENT_ABORT;
|
||||
err = tegra_gr_comm_send(TEGRA_GR_COMM_CTX_CLIENT,
|
||||
TEGRA_GR_COMM_ID_SELF, TEGRA_VGPU_QUEUE_INTR,
|
||||
err = tegra_gr_comm_send(TEGRA_GR_COMM_ID_SELF, TEGRA_VGPU_QUEUE_INTR,
|
||||
&msg, sizeof(msg));
|
||||
WARN_ON(err);
|
||||
nvgpu_thread_stop(&priv->intr_handler);
|
||||
|
||||
Reference in New Issue
Block a user