mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: make engine dependent functions hal ops
Engine falcon reset, emem copy and queue head/tail management has to be accessed through hal APIs. Introduce these for PMU & SEC2 engines. JIRA NVGPU-1459 Change-Id: I1d8f5103decb0bcba387886304d899ecc7b42cf1 Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2016282 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
00aeab6cca
commit
e87161b807
@@ -19,7 +19,6 @@
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
|
||||
@@ -593,7 +592,7 @@ static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
|
||||
|
||||
switch (flcn->flcn_id) {
|
||||
case FALCON_ID_PMU:
|
||||
flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset;
|
||||
flcn_eng_dep_ops->reset_eng = g->ops.pmu.pmu_reset;
|
||||
flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head;
|
||||
flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail;
|
||||
break;
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include "falcon_gk20a.h"
|
||||
#include "falcon_gp106.h"
|
||||
#include "falcon_priv.h"
|
||||
#include "gp106/sec2_gp106.h"
|
||||
|
||||
static void gp106_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
@@ -34,12 +33,12 @@ static void gp106_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
|
||||
|
||||
switch (flcn->flcn_id) {
|
||||
case FALCON_ID_PMU:
|
||||
flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset;
|
||||
flcn_eng_dep_ops->reset_eng = g->ops.pmu.pmu_reset;
|
||||
flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head;
|
||||
flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail;
|
||||
break;
|
||||
case FALCON_ID_SEC2:
|
||||
flcn_eng_dep_ops->reset_eng = gp106_sec2_reset;
|
||||
flcn_eng_dep_ops->reset_eng = g->ops.sec2.sec2_reset;
|
||||
break;
|
||||
default:
|
||||
flcn_eng_dep_ops->reset_eng = NULL;
|
||||
|
||||
@@ -25,16 +25,16 @@
|
||||
#include "falcon_gp106.h"
|
||||
#include "falcon_gv100.h"
|
||||
#include "falcon_priv.h"
|
||||
#include "gv100/gsp_gv100.h"
|
||||
|
||||
static void gv100_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
|
||||
&flcn->flcn_engine_dep_ops;
|
||||
struct gk20a *g = flcn->g;
|
||||
|
||||
switch (flcn->flcn_id) {
|
||||
case FALCON_ID_GSPLITE:
|
||||
flcn_eng_dep_ops->reset_eng = gv100_gsp_reset;
|
||||
flcn_eng_dep_ops->reset_eng = g->ops.gsp.gsp_reset;
|
||||
break;
|
||||
default:
|
||||
flcn_eng_dep_ops->reset_eng = NULL;
|
||||
|
||||
@@ -25,21 +25,21 @@
|
||||
#include "falcon_gv100.h"
|
||||
#include "falcon_tu104.h"
|
||||
#include "falcon_priv.h"
|
||||
#include "tu104/sec2_tu104.h"
|
||||
|
||||
static void tu104_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
|
||||
&flcn->flcn_engine_dep_ops;
|
||||
struct gk20a *g = flcn->g;
|
||||
|
||||
switch (flcn->flcn_id) {
|
||||
case FALCON_ID_SEC2:
|
||||
flcn_eng_dep_ops->reset_eng = tu104_sec2_reset;
|
||||
flcn_eng_dep_ops->copy_to_emem = tu104_sec2_flcn_copy_to_emem;
|
||||
flcn_eng_dep_ops->reset_eng = g->ops.sec2.sec2_reset;
|
||||
flcn_eng_dep_ops->copy_to_emem = g->ops.sec2.sec2_copy_to_emem;
|
||||
flcn_eng_dep_ops->copy_from_emem =
|
||||
tu104_sec2_flcn_copy_from_emem;
|
||||
flcn_eng_dep_ops->queue_head = tu104_sec2_queue_head;
|
||||
flcn_eng_dep_ops->queue_tail = tu104_sec2_queue_tail;
|
||||
g->ops.sec2.sec2_copy_from_emem;
|
||||
flcn_eng_dep_ops->queue_head = g->ops.sec2.sec2_queue_head;
|
||||
flcn_eng_dep_ops->queue_tail = g->ops.sec2.sec2_queue_tail;
|
||||
break;
|
||||
default:
|
||||
flcn_eng_dep_ops->reset_eng = NULL;
|
||||
|
||||
@@ -626,6 +626,7 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
|
||||
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
|
||||
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
|
||||
.pmu_reset = nvgpu_pmu_reset,
|
||||
.pmu_queue_head = gk20a_pmu_queue_head,
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
|
||||
@@ -704,6 +704,7 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
|
||||
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
|
||||
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
|
||||
.pmu_reset = nvgpu_pmu_reset,
|
||||
.pmu_queue_head = gk20a_pmu_queue_head,
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
|
||||
@@ -903,6 +903,7 @@ static const struct gpu_ops gv100_ops = {
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list,
|
||||
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
|
||||
.pmu_reset = nvgpu_pmu_reset,
|
||||
.pmu_queue_head = gk20a_pmu_queue_head,
|
||||
.pmu_pg_param_post_init = nvgpu_lpwr_post_init,
|
||||
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
|
||||
@@ -1141,9 +1142,11 @@ static const struct gpu_ops gv100_ops = {
|
||||
},
|
||||
.sec2 = {
|
||||
.falcon_base_addr = gp106_sec2_falcon_base_addr,
|
||||
.sec2_reset = gp106_sec2_reset,
|
||||
},
|
||||
.gsp = {
|
||||
.falcon_base_addr = gv100_gsp_falcon_base_addr,
|
||||
.gsp_reset = gv100_gsp_reset,
|
||||
},
|
||||
.chip_init_gpu_characteristics = gv100_init_gpu_characteristics,
|
||||
.get_litter_value = gv100_get_litter_value,
|
||||
|
||||
@@ -832,6 +832,7 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
|
||||
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
|
||||
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
|
||||
.pmu_reset = nvgpu_pmu_reset,
|
||||
.pmu_queue_head = gk20a_pmu_queue_head,
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
|
||||
@@ -1188,6 +1188,7 @@ struct gpu_ops {
|
||||
u32 (*pmu_get_queue_head_size)(void);
|
||||
u32 (*pmu_get_queue_tail_size)(void);
|
||||
u32 (*pmu_get_queue_tail)(u32 i);
|
||||
int (*pmu_reset)(struct gk20a *g);
|
||||
int (*pmu_queue_head)(struct gk20a *g,
|
||||
struct nvgpu_falcon_queue *queue, u32 *head, bool set);
|
||||
int (*pmu_queue_tail)(struct gk20a *g,
|
||||
@@ -1569,9 +1570,21 @@ struct gpu_ops {
|
||||
void (*msgq_tail)(struct gk20a *g, struct nvgpu_sec2 *sec2,
|
||||
u32 *tail, bool set);
|
||||
u32 (*falcon_base_addr)(void);
|
||||
int (*sec2_reset)(struct gk20a *g);
|
||||
int (*sec2_copy_to_emem)(struct nvgpu_falcon *flcn, u32 dst,
|
||||
u8 *src, u32 size, u8 port);
|
||||
int (*sec2_copy_from_emem)(struct nvgpu_falcon *flcn,
|
||||
u32 src, u8 *dst, u32 size, u8 port);
|
||||
int (*sec2_queue_head)(struct gk20a *g,
|
||||
struct nvgpu_falcon_queue *queue,
|
||||
u32 *head, bool set);
|
||||
int (*sec2_queue_tail)(struct gk20a *g,
|
||||
struct nvgpu_falcon_queue *queue,
|
||||
u32 *tail, bool set);
|
||||
} sec2;
|
||||
struct {
|
||||
u32 (*falcon_base_addr)(void);
|
||||
int (*gsp_reset)(struct gk20a *g);
|
||||
} gsp;
|
||||
void (*semaphore_wakeup)(struct gk20a *g, bool post_events);
|
||||
};
|
||||
|
||||
@@ -935,6 +935,7 @@ static const struct gpu_ops tu104_ops = {
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list,
|
||||
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
|
||||
.pmu_reset = nvgpu_pmu_reset,
|
||||
.pmu_queue_head = gk20a_pmu_queue_head,
|
||||
.pmu_pg_param_post_init = nvgpu_lpwr_post_init,
|
||||
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
|
||||
@@ -1163,9 +1164,15 @@ static const struct gpu_ops tu104_ops = {
|
||||
.isr = tu104_sec2_isr,
|
||||
.msgq_tail = tu104_sec2_msgq_tail,
|
||||
.falcon_base_addr = tu104_sec2_falcon_base_addr,
|
||||
.sec2_reset = tu104_sec2_reset,
|
||||
.sec2_copy_to_emem = tu104_sec2_flcn_copy_to_emem,
|
||||
.sec2_copy_from_emem = tu104_sec2_flcn_copy_from_emem,
|
||||
.sec2_queue_head = tu104_sec2_queue_head,
|
||||
.sec2_queue_tail = tu104_sec2_queue_tail,
|
||||
},
|
||||
.gsp = {
|
||||
.falcon_base_addr = gv100_gsp_falcon_base_addr,
|
||||
.gsp_reset = gv100_gsp_reset,
|
||||
},
|
||||
.top = {
|
||||
.device_info_parse_enum = gm20b_device_info_parse_enum,
|
||||
|
||||
@@ -508,6 +508,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.pmu_get_queue_head_size = NULL,
|
||||
.pmu_get_queue_tail = NULL,
|
||||
.pmu_get_queue_tail_size = NULL,
|
||||
.pmu_reset = NULL,
|
||||
.pmu_queue_head = NULL,
|
||||
.pmu_queue_tail = NULL,
|
||||
.pmu_msgq_tail = NULL,
|
||||
|
||||
@@ -582,6 +582,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.pmu_get_queue_head_size = NULL,
|
||||
.pmu_get_queue_tail = NULL,
|
||||
.pmu_get_queue_tail_size = NULL,
|
||||
.pmu_reset = NULL,
|
||||
.pmu_queue_head = NULL,
|
||||
.pmu_queue_tail = NULL,
|
||||
.pmu_msgq_tail = NULL,
|
||||
|
||||
Reference in New Issue
Block a user