gpu: nvgpu: make engine queue_head|tail APIs depend on queue id & index

Since we plan to separate engine DMEM/EMEM and FB queues into separate
implementations, let's make the engine queue_head and queue_tail APIs
independent of nvgpu_falcon_queue parameter.

JIRA NVGPU-1994

Change-Id: I389cc48d4045d9df8f768166f6a1d7074a69a309
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2016283
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-02-11 10:16:47 +05:30
committed by mobile promotions
parent e87161b807
commit c3ea3e283f
7 changed files with 22 additions and 34 deletions

View File

@@ -162,9 +162,9 @@ struct nvgpu_falcon_queue {
/* ops which are falcon engine specific */
struct nvgpu_falcon_engine_dependency_ops {
int (*reset_eng)(struct gk20a *g);
int (*queue_head)(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int (*queue_head)(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *head, bool set);
int (*queue_tail)(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int (*queue_tail)(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *tail, bool set);
int (*copy_from_emem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst,
u32 size, u8 port);

View File

@@ -33,8 +33,8 @@ static int falcon_queue_head(struct nvgpu_falcon *flcn,
int err = -ENOSYS;
if (flcn->flcn_engine_dep_ops.queue_head != NULL) {
err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue,
head, set);
err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue->id,
queue->index, head, set);
}
return err;
@@ -46,8 +46,8 @@ static int falcon_queue_tail(struct nvgpu_falcon *flcn,
int err = -ENOSYS;
if (flcn->flcn_engine_dep_ops.queue_tail != NULL) {
err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue,
tail, set);
err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue->id,
queue->index, tail, set);
}
return err;
@@ -149,7 +149,7 @@ static int falcon_queue_tail_fb(struct nvgpu_falcon *flcn,
} else {
if (flcn->flcn_engine_dep_ops.queue_tail != NULL) {
err = flcn->flcn_engine_dep_ops.queue_tail(g,
queue, tail, set);
queue->id, queue->index, tail, set);
}
}

View File

@@ -393,12 +393,10 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
return 0;
}
int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int gk20a_pmu_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *head, bool set)
{
u32 queue_head_size = 0;
u32 queue_id = nvgpu_falcon_queue_get_id(queue);
u32 queue_index = nvgpu_falcon_queue_get_index(queue);
if (g->ops.pmu.pmu_get_queue_head_size != NULL) {
queue_head_size = g->ops.pmu.pmu_get_queue_head_size();
@@ -435,12 +433,10 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
return 0;
}
int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int gk20a_pmu_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *tail, bool set)
{
u32 queue_tail_size = 0;
u32 queue_id = nvgpu_falcon_queue_get_id(queue);
u32 queue_index = nvgpu_falcon_queue_get_index(queue);
if (g->ops.pmu.pmu_get_queue_tail_size != NULL) {
queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();

View File

@@ -49,9 +49,9 @@ void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id);
int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token);
int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int gk20a_pmu_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *head, bool set);
int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int gk20a_pmu_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *tail, bool set);
void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set);

View File

@@ -1189,10 +1189,10 @@ struct gpu_ops {
u32 (*pmu_get_queue_tail_size)(void);
u32 (*pmu_get_queue_tail)(u32 i);
int (*pmu_reset)(struct gk20a *g);
int (*pmu_queue_head)(struct gk20a *g,
struct nvgpu_falcon_queue *queue, u32 *head, bool set);
int (*pmu_queue_tail)(struct gk20a *g,
struct nvgpu_falcon_queue *queue, u32 *tail, bool set);
int (*pmu_queue_head)(struct gk20a *g, u32 queue_id,
u32 queue_index, u32 *head, bool set);
int (*pmu_queue_tail)(struct gk20a *g, u32 queue_id,
u32 queue_index, u32 *tail, bool set);
void (*pmu_msgq_tail)(struct nvgpu_pmu *pmu,
u32 *tail, bool set);
u32 (*pmu_mutex_size)(void);
@@ -1576,10 +1576,10 @@ struct gpu_ops {
int (*sec2_copy_from_emem)(struct nvgpu_falcon *flcn,
u32 src, u8 *dst, u32 size, u8 port);
int (*sec2_queue_head)(struct gk20a *g,
struct nvgpu_falcon_queue *queue,
u32 queue_id, u32 queue_index,
u32 *head, bool set);
int (*sec2_queue_tail)(struct gk20a *g,
struct nvgpu_falcon_queue *queue,
u32 queue_id, u32 queue_index,
u32 *tail, bool set);
} sec2;
struct {

View File

@@ -236,14 +236,10 @@ int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
return tu104_sec2_flcn_bl_bootstrap(g, bl_info);
}
int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int tu104_sec2_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *head, bool set)
{
u32 queue_head_size = 8;
u32 queue_id, queue_index;
queue_id = nvgpu_falcon_queue_get_id(queue);
queue_index = nvgpu_falcon_queue_get_index(queue);
if (queue_id <= SEC2_NV_CMDQ_LOG_ID__LAST) {
if (queue_index >= queue_head_size) {
@@ -271,14 +267,10 @@ int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
return 0;
}
int tu104_sec2_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int tu104_sec2_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *tail, bool set)
{
u32 queue_tail_size = 8;
u32 queue_id, queue_index;
queue_id = nvgpu_falcon_queue_get_id(queue);
queue_index = nvgpu_falcon_queue_get_index(queue);
if (queue_id <= SEC2_NV_CMDQ_LOG_ID__LAST) {
if (queue_index >= queue_tail_size) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -34,9 +34,9 @@ int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
struct hs_acr *acr_desc,
struct nvgpu_falcon_bl_info *bl_info);
int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int tu104_sec2_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *head, bool set);
int tu104_sec2_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
int tu104_sec2_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index,
u32 *tail, bool set);
void tu104_sec2_msgq_tail(struct gk20a *g, struct nvgpu_sec2 *sec2,
u32 *tail, bool set);