gpu: nvgpu: move set_interleave to tsg

Renamed
- gk20a_tsg_set_runlist_interleave -> nvgpu_tsg_set_interleave

Moved set_interleave from runlist to tsg
- runlist.set_interleave -> tsg.set_interleave

Existing HAL was only setting tsg->interleave, and was not
accessing any register. This is now done in nvgpu_tsg_set_interleave
and tsg.set_interleave is only used in vgpu case.

Jira NVGPU-3156

Change-Id: I5dac1305afcbd950214316289cf704ee8b43fc89
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2100610
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-04-17 16:24:12 -07:00
committed by mobile promotions
parent 3fde3ae650
commit 124cdb4509
22 changed files with 57 additions and 83 deletions

View File

@@ -595,8 +595,7 @@ u32 nvgpu_ce_create_context(struct gk20a *g,
/* -1 means default channel runlist level */
if (runlist_level != -1) {
err = gk20a_tsg_set_runlist_interleave(ce_ctx->tsg,
runlist_level);
err = nvgpu_tsg_set_interleave(ce_ctx->tsg, runlist_level);
if (err != 0) {
nvgpu_err(g, "ce: set runlist interleave failed");
goto end;

View File

@@ -507,32 +507,34 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct tsg_gk20a *tsg,
return recover;
}
int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
int nvgpu_tsg_set_interleave(struct tsg_gk20a *tsg, u32 level)
{
struct gk20a *g = tsg->g;
int ret;
nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level);
nvgpu_log(g, gpu_dbg_sched,
"tsgid=%u interleave=%u", tsg->tsgid, level);
nvgpu_speculation_barrier();
switch (level) {
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW:
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM:
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH:
ret = g->ops.runlist.set_interleave(g, tsg->tsgid,
0, level);
if (ret == 0) {
tsg->interleave_level = level;
ret = g->ops.runlist.reload(g, tsg->runlist_id,
true, true);
}
break;
default:
ret = -EINVAL;
break;
if ((level != NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW) &&
(level != NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM) &&
(level != NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH)) {
return -EINVAL;
}
return ret;
if (g->ops.tsg.set_interleave != NULL) {
ret = g->ops.tsg.set_interleave(tsg, level);
if (ret != 0) {
nvgpu_err(g,
"set interleave failed tsgid=%u", tsg->tsgid);
return ret;
}
}
tsg->interleave_level = level;
return g->ops.runlist.reload(g, tsg->runlist_id, true, true);
}
int nvgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice_us)

View File

@@ -55,6 +55,7 @@ void vgpu_tsg_release(struct tsg_gk20a *tsg);
int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch);
int vgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch);
int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
int vgpu_tsg_set_interleave(struct tsg_gk20a *tsg, u32 level);
void vgpu_tsg_enable(struct tsg_gk20a *tsg);
int vgpu_set_sm_exception_type_mask(struct channel_gk20a *ch, u32 mask);
void vgpu_channel_free_ctx_header(struct channel_gk20a *c);

View File

@@ -199,27 +199,6 @@ int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
return vgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish);
}
int vgpu_runlist_set_interleave(struct gk20a *g,
u32 id,
u32 runlist_id,
u32 new_level)
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_runlist_interleave_params *p =
&msg.params.tsg_interleave;
int err;
nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
msg.handle = vgpu_get_handle(g);
p->tsg_id = id;
p->level = new_level;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret);
return err ? err : msg.ret;
}
u32 vgpu_runlist_length_max(struct gk20a *g)
{
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);

View File

@@ -30,9 +30,5 @@ int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish);
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish);
int vgpu_runlist_set_interleave(struct gk20a *g,
u32 id,
u32 runlist_id,
u32 new_level);
u32 vgpu_runlist_length_max(struct gk20a *g);
u32 vgpu_runlist_entry_size(struct gk20a *g);

View File

@@ -508,7 +508,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.reschedule = NULL,
.update_for_channel = vgpu_runlist_update_for_channel,
.reload = vgpu_runlist_reload,
.set_interleave = vgpu_runlist_set_interleave,
.count_max = gk20a_runlist_count_max,
.entry_size = vgpu_runlist_entry_size,
.length_max = vgpu_runlist_length_max,
@@ -562,6 +561,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.post_event_id = nvgpu_tsg_post_event_id,
.set_timeslice = vgpu_tsg_set_timeslice,
.default_timeslice_us = vgpu_tsg_default_timeslice_us,
.set_interleave = vgpu_tsg_set_interleave,
},
.netlist = {
.get_netlist_name = gp10b_netlist_get_name,

View File

@@ -595,7 +595,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.reschedule = NULL,
.update_for_channel = vgpu_runlist_update_for_channel,
.reload = vgpu_runlist_reload,
.set_interleave = vgpu_runlist_set_interleave,
.count_max = gv11b_runlist_count_max,
.entry_size = vgpu_runlist_entry_size,
.length_max = vgpu_runlist_length_max,
@@ -649,6 +648,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.post_event_id = nvgpu_tsg_post_event_id,
.set_timeslice = vgpu_tsg_set_timeslice,
.default_timeslice_us = vgpu_tsg_default_timeslice_us,
.set_interleave = vgpu_tsg_set_interleave,
},
.usermode = {
.setup_hw = NULL,

View File

@@ -178,3 +178,21 @@ int vgpu_set_sm_exception_type_mask(struct channel_gk20a *ch,
return err;
}
int vgpu_tsg_set_interleave(struct tsg_gk20a *tsg, u32 new_level)
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_runlist_interleave_params *p =
&msg.params.tsg_interleave;
int err;
struct gk20a *g = tsg->g;
nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
msg.handle = vgpu_get_handle(g);
p->tsg_id = tsg->tsgid;
p->level = new_level;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret);
return err ? err : msg.ret;
}

View File

@@ -33,11 +33,6 @@ struct nvgpu_semaphore;
struct channel_gk20a;
struct tsg_gk20a;
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW 0U
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM 1U
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH 2U
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_NUM_LEVELS 3U
#define MAX_RUNLIST_BUFFERS 2U
#define FIFO_INVAL_ENGINE_ID (~U32(0U))

View File

@@ -118,18 +118,6 @@ int gk20a_fifo_reschedule_preempt_next(struct channel_gk20a *ch,
return ret;
}
int gk20a_runlist_set_interleave(struct gk20a *g,
u32 id,
u32 runlist_id,
u32 new_level)
{
nvgpu_log_fn(g, " ");
g->fifo.tsg[id].interleave_level = new_level;
return 0;
}
u32 gk20a_runlist_count_max(void)
{
return fifo_eng_runlist_base__size_1_v();
@@ -160,7 +148,7 @@ void gk20a_runlist_get_tsg_entry(struct tsg_gk20a *tsg,
}
if (scale > RL_MAX_TIMESLICE_SCALE) {
nvgpu_err(g, "requested timeslice value is clamped\n");
nvgpu_err(g, "requested timeslice value is clamped");
timeout = RL_MAX_TIMESLICE_TIMEOUT;
scale = RL_MAX_TIMESLICE_SCALE;
}

View File

@@ -31,10 +31,6 @@ struct gk20a;
int gk20a_runlist_reschedule(struct channel_gk20a *ch, bool preempt_next);
int gk20a_fifo_reschedule_preempt_next(struct channel_gk20a *ch,
bool wait_preempt);
int gk20a_runlist_set_interleave(struct gk20a *g,
u32 id,
u32 runlist_id,
u32 new_level);
u32 gk20a_runlist_count_max(void);
u32 gk20a_runlist_entry_size(struct gk20a *g);
u32 gk20a_runlist_length_max(struct gk20a *g);

View File

@@ -64,7 +64,7 @@ void gv11b_runlist_get_tsg_entry(struct tsg_gk20a *tsg,
}
if (scale > RL_MAX_TIMESLICE_SCALE) {
nvgpu_err(g, "requested timeslice value is clamped\n");
nvgpu_err(g, "requested timeslice value is clamped");
timeout = RL_MAX_TIMESLICE_TIMEOUT;
scale = RL_MAX_TIMESLICE_SCALE;
}
@@ -76,7 +76,7 @@ void gv11b_runlist_get_tsg_entry(struct tsg_gk20a *tsg,
runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid);
runlist[3] = 0;
nvgpu_log_info(g, "gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n",
nvgpu_log_info(g, "gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x",
runlist[0], runlist[1], runlist[2], runlist[3]);
}
@@ -116,7 +116,7 @@ void gv11b_runlist_get_ch_entry(struct channel_gk20a *ch, u32 *runlist)
ram_rl_entry_chid_f(ch->chid);
runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi);
nvgpu_log_info(g, "gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n",
nvgpu_log_info(g, "gv11b channel runlist [0] %x [1] %x [2] %x [3] %x",
runlist[0], runlist[1], runlist[2], runlist[3]);
}

View File

@@ -788,7 +788,6 @@ static const struct gpu_ops gm20b_ops = {
.runlist = {
.update_for_channel = gk20a_runlist_update_for_channel,
.reload = gk20a_runlist_reload,
.set_interleave = gk20a_runlist_set_interleave,
.count_max = gk20a_runlist_count_max,
.entry_size = gk20a_runlist_entry_size,
.length_max = gk20a_runlist_length_max,

View File

@@ -867,7 +867,6 @@ static const struct gpu_ops gp10b_ops = {
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
.update_for_channel = gk20a_runlist_update_for_channel,
.reload = gk20a_runlist_reload,
.set_interleave = gk20a_runlist_set_interleave,
.count_max = gk20a_runlist_count_max,
.entry_size = gk20a_runlist_entry_size,
.length_max = gk20a_runlist_length_max,

View File

@@ -1043,7 +1043,6 @@ static const struct gpu_ops gv100_ops = {
.runlist = {
.update_for_channel = gk20a_runlist_update_for_channel,
.reload = gk20a_runlist_reload,
.set_interleave = gk20a_runlist_set_interleave,
.count_max = gv100_runlist_count_max,
.entry_size = gv11b_runlist_entry_size,
.length_max = gk20a_runlist_length_max,

View File

@@ -1018,7 +1018,6 @@ static const struct gpu_ops gv11b_ops = {
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
.update_for_channel = gk20a_runlist_update_for_channel,
.reload = gk20a_runlist_reload,
.set_interleave = gk20a_runlist_set_interleave,
.count_max = gv11b_runlist_count_max,
.entry_size = gv11b_runlist_entry_size,
.length_max = gk20a_runlist_length_max,

View File

@@ -1079,7 +1079,6 @@ static const struct gpu_ops tu104_ops = {
.runlist = {
.update_for_channel = gk20a_runlist_update_for_channel,
.reload = gk20a_runlist_reload,
.set_interleave = gk20a_runlist_set_interleave,
.count_max = tu104_runlist_count_max,
.entry_size = tu104_runlist_entry_size,
.length_max = gk20a_runlist_length_max,

View File

@@ -1076,8 +1076,6 @@ struct gpu_ops {
bool wait_for_finish);
int (*reload)(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish);
int (*set_interleave)(struct gk20a *g, u32 id,
u32 runlist_id, u32 new_level);
u32 (*count_max)(void);
u32 (*entry_size)(struct gk20a *g);
u32 (*length_max)(struct gk20a *g);
@@ -1241,6 +1239,7 @@ struct gpu_ops {
enum nvgpu_event_id_type event_id);
int (*set_timeslice)(struct tsg_gk20a *tsg, u32 timeslice_us);
u32 (*default_timeslice_us)(struct gk20a *g);
int (*set_interleave)(struct tsg_gk20a *tsg, u32 new_level);
} tsg;
struct {
void (*setup_hw)(struct gk20a *g);

View File

@@ -31,8 +31,13 @@ struct tsg_gk20a;
struct fifo_gk20a;
struct channel_gk20a;
#define RUNLIST_APPEND_FAILURE U32_MAX
#define RUNLIST_INVALID_ID U32_MAX
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW 0U
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM 1U
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH 2U
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_NUM_LEVELS 3U
#define RUNLIST_APPEND_FAILURE U32_MAX
#define RUNLIST_INVALID_ID U32_MAX
u32 nvgpu_runlist_construct_locked(struct fifo_gk20a *f,
struct fifo_runlist_info_gk20a *runlist,

View File

@@ -129,6 +129,7 @@ u32 nvgpu_tsg_get_timeslice(struct tsg_gk20a *tsg);
u32 nvgpu_tsg_default_timeslice_us(struct gk20a *g);
void nvgpu_tsg_enable_sched(struct gk20a *g, struct tsg_gk20a *tsg);
void nvgpu_tsg_disable_sched(struct gk20a *g, struct tsg_gk20a *tsg);
int nvgpu_tsg_set_interleave(struct tsg_gk20a *tsg, u32 level);
int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
u32 priority);
int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g,

View File

@@ -518,7 +518,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
}
level = nvgpu_get_common_runlist_level(level);
err = gk20a_tsg_set_runlist_interleave(tsg, level);
err = nvgpu_tsg_set_interleave(tsg, level);
gk20a_idle(g);
done:

View File

@@ -276,7 +276,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
if (err)
goto done;
err = gk20a_tsg_set_runlist_interleave(tsg, arg->runlist_interleave);
err = nvgpu_tsg_set_interleave(tsg, arg->runlist_interleave);
gk20a_idle(g);