diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_priv.h b/drivers/gpu/nvgpu/common/falcon/falcon_priv.h index b4922b5f4..4f0a1e9ee 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon_priv.h +++ b/drivers/gpu/nvgpu/common/falcon/falcon_priv.h @@ -162,9 +162,9 @@ struct nvgpu_falcon_queue { /* ops which are falcon engine specific */ struct nvgpu_falcon_engine_dependency_ops { int (*reset_eng)(struct gk20a *g); - int (*queue_head)(struct gk20a *g, struct nvgpu_falcon_queue *queue, + int (*queue_head)(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *head, bool set); - int (*queue_tail)(struct gk20a *g, struct nvgpu_falcon_queue *queue, + int (*queue_tail)(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *tail, bool set); int (*copy_from_emem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst, u32 size, u8 port); diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c index fe5fc2005..10ed1f496 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c +++ b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c @@ -33,8 +33,8 @@ static int falcon_queue_head(struct nvgpu_falcon *flcn, int err = -ENOSYS; if (flcn->flcn_engine_dep_ops.queue_head != NULL) { - err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue, - head, set); + err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue->id, + queue->index, head, set); } return err; @@ -46,8 +46,8 @@ static int falcon_queue_tail(struct nvgpu_falcon *flcn, int err = -ENOSYS; if (flcn->flcn_engine_dep_ops.queue_tail != NULL) { - err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue, - tail, set); + err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue->id, + queue->index, tail, set); } return err; @@ -149,7 +149,7 @@ static int falcon_queue_tail_fb(struct nvgpu_falcon *flcn, } else { if (flcn->flcn_engine_dep_ops.queue_tail != NULL) { err = flcn->flcn_engine_dep_ops.queue_tail(g, - queue, tail, set); + queue->id, queue->index, tail, set); } } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.c b/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.c index fdcd5acde..77d923890 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.c @@ -393,12 +393,10 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) return 0; } -int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, +int gk20a_pmu_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *head, bool set) { u32 queue_head_size = 0; - u32 queue_id = nvgpu_falcon_queue_get_id(queue); - u32 queue_index = nvgpu_falcon_queue_get_index(queue); if (g->ops.pmu.pmu_get_queue_head_size != NULL) { queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); @@ -435,12 +433,10 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, return 0; } -int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, +int gk20a_pmu_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *tail, bool set) { u32 queue_tail_size = 0; - u32 queue_id = nvgpu_falcon_queue_get_id(queue); - u32 queue_index = nvgpu_falcon_queue_get_index(queue); if (g->ops.pmu.pmu_get_queue_tail_size != NULL) { queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.h b/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.h index 08dd36e10..f57cfad3a 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.h @@ -49,9 +49,9 @@ void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id); int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); -int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, +int gk20a_pmu_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *head, bool set); -int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, +int gk20a_pmu_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *tail, bool set); void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set); diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h index 490ce6033..ea261ce7a 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h @@ -1189,10 +1189,10 @@ struct gpu_ops { u32 (*pmu_get_queue_tail_size)(void); u32 (*pmu_get_queue_tail)(u32 i); int (*pmu_reset)(struct gk20a *g); - int (*pmu_queue_head)(struct gk20a *g, - struct nvgpu_falcon_queue *queue, u32 *head, bool set); - int (*pmu_queue_tail)(struct gk20a *g, - struct nvgpu_falcon_queue *queue, u32 *tail, bool set); + int (*pmu_queue_head)(struct gk20a *g, u32 queue_id, + u32 queue_index, u32 *head, bool set); + int (*pmu_queue_tail)(struct gk20a *g, u32 queue_id, + u32 queue_index, u32 *tail, bool set); void (*pmu_msgq_tail)(struct nvgpu_pmu *pmu, u32 *tail, bool set); u32 (*pmu_mutex_size)(void); @@ -1576,10 +1576,10 @@ struct gpu_ops { int (*sec2_copy_from_emem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst, u32 size, u8 port); int (*sec2_queue_head)(struct gk20a *g, - struct nvgpu_falcon_queue *queue, + u32 queue_id, u32 queue_index, u32 *head, bool set); int (*sec2_queue_tail)(struct gk20a *g, - struct nvgpu_falcon_queue *queue, + u32 queue_id, u32 queue_index, u32 *tail, bool set); } sec2; struct { diff --git a/drivers/gpu/nvgpu/tu104/sec2_tu104.c b/drivers/gpu/nvgpu/tu104/sec2_tu104.c index 3b8e06dd9..9aaeee34b 100644 --- a/drivers/gpu/nvgpu/tu104/sec2_tu104.c +++ b/drivers/gpu/nvgpu/tu104/sec2_tu104.c @@ -236,14 +236,10 @@ int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g, return tu104_sec2_flcn_bl_bootstrap(g, bl_info); } -int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, +int tu104_sec2_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *head, bool set) { u32 queue_head_size = 8; - u32 queue_id, queue_index; - - queue_id = nvgpu_falcon_queue_get_id(queue); - queue_index = nvgpu_falcon_queue_get_index(queue); if (queue_id <= SEC2_NV_CMDQ_LOG_ID__LAST) { if (queue_index >= queue_head_size) { @@ -271,14 +267,10 @@ int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, return 0; } -int tu104_sec2_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, +int tu104_sec2_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *tail, bool set) { u32 queue_tail_size = 8; - u32 queue_id, queue_index; - - queue_id = nvgpu_falcon_queue_get_id(queue); - queue_index = nvgpu_falcon_queue_get_index(queue); if (queue_id <= SEC2_NV_CMDQ_LOG_ID__LAST) { if (queue_index >= queue_tail_size) { diff --git a/drivers/gpu/nvgpu/tu104/sec2_tu104.h b/drivers/gpu/nvgpu/tu104/sec2_tu104.h index ea025e659..42de6f756 100644 --- a/drivers/gpu/nvgpu/tu104/sec2_tu104.h +++ b/drivers/gpu/nvgpu/tu104/sec2_tu104.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -34,9 +34,9 @@ int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g, struct hs_acr *acr_desc, struct nvgpu_falcon_bl_info *bl_info); -int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, +int tu104_sec2_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *head, bool set); -int tu104_sec2_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, +int tu104_sec2_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index, u32 *tail, bool set); void tu104_sec2_msgq_tail(struct gk20a *g, struct nvgpu_sec2 *sec2, u32 *tail, bool set);