gpu: nvgpu: add TSG enable/disable operations

Add TSG enable/disable operations for gv11b/gv100

To disable a TSG we continue to use gk20a_disable_tsg()

To enable a TSG add new API gv11b_fifo_enable_tsg() since TSG enable sequence is
different for Volta than previous versions
For Volta it is sufficient to loop over all the channels in TSG and enable them
sequentially

Bug 1739362

Change-Id: Id4b4684959204c6101ceda83487a41fbfcba8b5f
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1560642
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-09-13 17:35:03 +05:30
committed by mobile promotions
parent 56d03664d0
commit 52f50addc6
6 changed files with 93 additions and 1 deletions

View File

@@ -397,6 +397,8 @@ static const struct gpu_ops gv100_ops = {
.pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
.preempt_channel = gv11b_fifo_preempt_channel,
.preempt_tsg = gv11b_fifo_preempt_tsg,
.enable_tsg = gv11b_fifo_enable_tsg,
.disable_tsg = gk20a_disable_tsg,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,

View File

@@ -785,6 +785,21 @@ static int __locked_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
return ret;
}
/* TSG enable sequence applicable for Volta and onwards */
int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
{
struct gk20a *g = tsg->g;
struct channel_gk20a *ch;
down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
g->ops.fifo.enable_channel(ch);
}
up_read(&tsg->ch_list_lock);
return 0;
}
int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
{
struct fifo_gk20a *f = &g->fifo;

View File

@@ -69,6 +69,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type, unsigned int timeout_rc_type);
int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid);
int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg);
int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
unsigned int id_type, unsigned int timeout_rc_type);
void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,

View File

@@ -430,6 +430,8 @@ static const struct gpu_ops gv11b_ops = {
.pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
.preempt_channel = gv11b_fifo_preempt_channel,
.preempt_tsg = gv11b_fifo_preempt_tsg,
.enable_tsg = gv11b_fifo_enable_tsg,
.disable_tsg = gk20a_disable_tsg,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,

View File

@@ -110,6 +110,42 @@ static inline u32 ccsr_channel_status_v(u32 r)
{
return (r >> 24) & 0xf;
}
static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
{
return 0x00000002;
}
static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
{
return 0x00000004;
}
static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
{
return 0x0000000a;
}
static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
{
return 0x0000000b;
}
static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
{
return 0x0000000c;
}
static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
{
return 0x0000000d;
}
static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
{
return 0x0000000e;
}
static inline u32 ccsr_channel_next_v(u32 r)
{
return (r >> 1) & 0x1;
}
static inline u32 ccsr_channel_next_true_v(void)
{
return 0x00000001;
}
static inline u32 ccsr_channel_pbdma_faulted_f(u32 v)
{
return (v & 0x1) << 22;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -110,6 +110,42 @@ static inline u32 ccsr_channel_status_v(u32 r)
{
return (r >> 24) & 0xf;
}
static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
{
return 0x00000002;
}
static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
{
return 0x00000004;
}
static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
{
return 0x0000000a;
}
static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
{
return 0x0000000b;
}
static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
{
return 0x0000000c;
}
static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
{
return 0x0000000d;
}
static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
{
return 0x0000000e;
}
static inline u32 ccsr_channel_next_v(u32 r)
{
return (r >> 1) & 0x1;
}
static inline u32 ccsr_channel_next_true_v(void)
{
return 0x00000001;
}
static inline u32 ccsr_channel_pbdma_faulted_f(u32 v)
{
return (v & 0x1) << 22;