gpu: nvgpu: update all ctx headers in the tsg when update hwpm mode

FECS could use any ctx headers for context switch, so needs to update
all ctx headers in the same tsg with hwpm buffer address.

Bug 2404093
Bug 200454109

Change-Id: I99e74cd8c768c06c3d215779db899a1318522db0
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1917756
(cherry picked from commit f447a8d279b0d0abc8e2d3eb6f0596ea5cd87cbf)
Signed-off-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1920608
GVS: Gerrit_Virtual_Submit
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Richard Zhao
2018-10-02 16:42:01 -07:00
committed by mobile promotions
parent 501156ba22
commit 12acc96687

View File

@@ -1857,7 +1857,13 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
nvgpu_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data); nvgpu_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data);
if (ctxheader->gpu_va) { if (ctxheader->gpu_va) {
g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr); struct channel_gk20a *ch;
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
g->ops.gr.write_pm_ptr(g, &ch->ctx_header, virt_addr);
}
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} else { } else {
g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr); g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr);
} }