mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: allow global regops before ctx is created
In nvgpu_ioctl_channel_reg_ops(), we right now first check if context is allocated or not and if context is not allocated we fail the regops operation But it is possible that the regops operation only includes global regops which does not need global context allocated So move this global context check from nvgpu_ioctl_channel_reg_ops() to exec_regops_gk20a() and only if we have context ops included in the regops Bug 200431958 Change-Id: Iaa4953235d95b2106d5f81a456141d3a57603fb9 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1789262 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
63e6e8ee3e
commit
7216f3dd71
@@ -100,9 +100,6 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
|
||||
struct gr_gk20a *gr);
|
||||
|
||||
static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset);
|
||||
|
||||
static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
|
||||
@@ -870,13 +867,6 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* be sure that ctx info is in place */
|
||||
if (!g->is_virtual &&
|
||||
!gr_context_info_available(dbg_s, &g->gr)) {
|
||||
nvgpu_err(g, "gr context data not available");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* since exec_reg_ops sends methods to the ucode, it must take the
|
||||
* global gpu lock to protect against mixing methods from debug sessions
|
||||
* on other channels */
|
||||
@@ -1653,29 +1643,6 @@ static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
|
||||
args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE;
|
||||
}
|
||||
|
||||
/* In order to perform a context relative op the context has
|
||||
* to be created already... which would imply that the
|
||||
* context switch mechanism has already been put in place.
|
||||
* So by the time we perform such an opertation it should always
|
||||
* be possible to query for the appropriate context offsets, etc.
|
||||
*
|
||||
* But note: while the dbg_gpu bind requires the a channel fd,
|
||||
* it doesn't require an allocated gr/compute obj at that point...
|
||||
*/
|
||||
static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
|
||||
struct gr_gk20a *gr)
|
||||
{
|
||||
int err;
|
||||
|
||||
nvgpu_mutex_acquire(&gr->ctx_mutex);
|
||||
err = !gr->ctx_vars.golden_image_initialized;
|
||||
nvgpu_mutex_release(&gr->ctx_mutex);
|
||||
if (err)
|
||||
return false;
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
|
||||
Reference in New Issue
Block a user