gpu: nvgpu: move cg_enable after pmu_init is complete

This patch help resolve the boot time failures happening with
pmu_exterr for porg. cg_enable can race with pmu_init thread,
cg_enable is moved post pmu init thread to avoid the above race.

Bug 200565050

Change-Id: I2192053eff8767847ea012ca20b3607d2f6cd26f
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2239959
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Debarshi Dutta
2019-11-15 14:59:31 +05:30
committed by mobile promotions
parent 380e6b2c0c
commit e45e7b5cf8
4 changed files with 45 additions and 5 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -526,6 +526,13 @@ static int nvgpu_pg_init_task(void *arg)
nvgpu_pmu_dbg(g, "loaded zbc"); nvgpu_pmu_dbg(g, "loaded zbc");
pmu_setup_hw_enable_elpg(g); pmu_setup_hw_enable_elpg(g);
nvgpu_pmu_dbg(g, "PMU booted, thread exiting"); nvgpu_pmu_dbg(g, "PMU booted, thread exiting");
gk20a_gr_wait_initialized(g);
nvgpu_cg_blcg_enable_no_wait(g);
nvgpu_cg_elcg_enable_no_wait(g);
return 0; return 0;
default: default:
nvgpu_pmu_dbg(g, "invalid state"); nvgpu_pmu_dbg(g, "invalid state");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -84,6 +84,34 @@ void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g)
nvgpu_mutex_release(&g->cg_pg_lock); nvgpu_mutex_release(&g->cg_pg_lock);
} }
void nvgpu_cg_blcg_disable_no_wait(struct gk20a *g) {
nvgpu_log_fn(g, " ");
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
return;
}
nvgpu_mutex_acquire(&g->cg_pg_lock);
if (g->blcg_enabled) {
nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_RUN);
}
nvgpu_mutex_release(&g->cg_pg_lock);
}
void nvgpu_cg_blcg_enable_no_wait(struct gk20a *g) {
nvgpu_log_fn(g, " ");
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
return;
}
nvgpu_mutex_acquire(&g->cg_pg_lock);
if (g->blcg_enabled) {
nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_AUTO);
}
nvgpu_mutex_release(&g->cg_pg_lock);
}
void nvgpu_cg_elcg_enable(struct gk20a *g) void nvgpu_cg_elcg_enable(struct gk20a *g)
{ {
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Graphics * GK20A Graphics
* *
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -4692,6 +4692,9 @@ static int gk20a_init_gr_prepare(struct gk20a *g)
/* Disable elcg until it gets enabled later in the init*/ /* Disable elcg until it gets enabled later in the init*/
nvgpu_cg_elcg_disable_no_wait(g); nvgpu_cg_elcg_disable_no_wait(g);
/* Disable blcg until it gets enabled later in the init*/
nvgpu_cg_blcg_disable_no_wait(g);
/* enable fifo access */ /* enable fifo access */
gk20a_writel(g, gr_gpfifo_ctl_r(), gk20a_writel(g, gr_gpfifo_ctl_r(),
gr_gpfifo_ctl_access_enabled_f() | gr_gpfifo_ctl_access_enabled_f() |
@@ -5012,7 +5015,6 @@ int gk20a_init_gr_support(struct gk20a *g)
} }
} }
nvgpu_cg_elcg_enable_no_wait(g);
/* GR is inialized, signal possible waiters */ /* GR is inialized, signal possible waiters */
g->gr.initialized = true; g->gr.initialized = true;
nvgpu_cond_signal(&g->gr.init_wq); nvgpu_cond_signal(&g->gr.init_wq);
@@ -5159,6 +5161,7 @@ int gk20a_gr_reset(struct gk20a *g)
nvgpu_cg_init_gr_load_gating_prod(g); nvgpu_cg_init_gr_load_gating_prod(g);
nvgpu_cg_elcg_enable_no_wait(g); nvgpu_cg_elcg_enable_no_wait(g);
nvgpu_cg_blcg_enable_no_wait(g);
/* GR is inialized, signal possible waiters */ /* GR is inialized, signal possible waiters */
g->gr.initialized = true; g->gr.initialized = true;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -35,6 +35,8 @@ void nvgpu_cg_elcg_disable(struct gk20a *g);
void nvgpu_cg_elcg_enable_no_wait(struct gk20a *g); void nvgpu_cg_elcg_enable_no_wait(struct gk20a *g);
void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g); void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g);
void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable); void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable);
void nvgpu_cg_blcg_disable_no_wait(struct gk20a *g);
void nvgpu_cg_blcg_enable_no_wait(struct gk20a *g);
void nvgpu_cg_blcg_mode_enable(struct gk20a *g); void nvgpu_cg_blcg_mode_enable(struct gk20a *g);
void nvgpu_cg_blcg_mode_disable(struct gk20a *g); void nvgpu_cg_blcg_mode_disable(struct gk20a *g);