mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: update pbdma intr enable set/clear masks as hals
To reduce the entire duplication of pbdma_intr_enable for future chips, make set and clear masks as HALs. JIRA NVGPU-9325 Change-Id: Id8434fc15ca4bf542680a8452dc294f2c4068084 Signed-off-by: Rajesh Devaraj <rdevaraj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2838036 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Ramalingam C <ramalingamc@nvidia.com> Reviewed-by: Ankur Kishore <ankkishore@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8c710694e8
commit
3e2eff564f
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -69,4 +69,8 @@ void ga10b_pbdma_dump_status(struct gk20a *g, struct nvgpu_debug_context *o);
|
|||||||
u32 ga10b_pbdma_get_mmu_fault_id(struct gk20a *g, u32 pbdma_id);
|
u32 ga10b_pbdma_get_mmu_fault_id(struct gk20a *g, u32 pbdma_id);
|
||||||
u32 ga10b_pbdma_get_num_of_pbdmas(void);
|
u32 ga10b_pbdma_get_num_of_pbdmas(void);
|
||||||
|
|
||||||
|
u32 ga10b_pbdma_intr_0_en_set_tree_mask(void);
|
||||||
|
u32 ga10b_pbdma_intr_0_en_clear_tree_mask(void);
|
||||||
|
u32 ga10b_pbdma_intr_1_en_set_tree_mask(void);
|
||||||
|
u32 ga10b_pbdma_intr_1_en_clear_tree_mask(void);
|
||||||
#endif /* NVGPU_PBDMA_GA10B_H */
|
#endif /* NVGPU_PBDMA_GA10B_H */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -40,7 +40,7 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/ga10b/hw_pbdma_ga10b.h>
|
#include <nvgpu/hw/ga10b/hw_pbdma_ga10b.h>
|
||||||
|
|
||||||
static u32 pbdma_intr_0_en_set_tree_mask(void)
|
u32 ga10b_pbdma_intr_0_en_set_tree_mask(void)
|
||||||
{
|
{
|
||||||
u32 mask = pbdma_intr_0_en_set_tree_gpfifo_enabled_f() |
|
u32 mask = pbdma_intr_0_en_set_tree_gpfifo_enabled_f() |
|
||||||
pbdma_intr_0_en_set_tree_gpptr_enabled_f() |
|
pbdma_intr_0_en_set_tree_gpptr_enabled_f() |
|
||||||
@@ -61,7 +61,7 @@ static u32 pbdma_intr_0_en_set_tree_mask(void)
|
|||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 pbdma_intr_0_en_clear_tree_mask(void)
|
u32 ga10b_pbdma_intr_0_en_clear_tree_mask(void)
|
||||||
{
|
{
|
||||||
u32 mask = pbdma_intr_0_en_clear_tree_gpfifo_enabled_f() |
|
u32 mask = pbdma_intr_0_en_clear_tree_gpfifo_enabled_f() |
|
||||||
pbdma_intr_0_en_clear_tree_gpptr_enabled_f() |
|
pbdma_intr_0_en_clear_tree_gpptr_enabled_f() |
|
||||||
@@ -82,7 +82,7 @@ static u32 pbdma_intr_0_en_clear_tree_mask(void)
|
|||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 pbdma_intr_1_en_set_tree_mask(void)
|
u32 ga10b_pbdma_intr_1_en_set_tree_mask(void)
|
||||||
{ u32 mask = pbdma_intr_1_en_set_tree_hce_re_illegal_op_enabled_f() |
|
{ u32 mask = pbdma_intr_1_en_set_tree_hce_re_illegal_op_enabled_f() |
|
||||||
pbdma_intr_1_en_set_tree_hce_re_alignb_enabled_f() |
|
pbdma_intr_1_en_set_tree_hce_re_alignb_enabled_f() |
|
||||||
pbdma_intr_1_en_set_tree_hce_priv_enabled_f() |
|
pbdma_intr_1_en_set_tree_hce_priv_enabled_f() |
|
||||||
@@ -93,7 +93,7 @@ static u32 pbdma_intr_1_en_set_tree_mask(void)
|
|||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 pbdma_intr_1_en_clear_tree_mask(void)
|
u32 ga10b_pbdma_intr_1_en_clear_tree_mask(void)
|
||||||
{
|
{
|
||||||
u32 mask = pbdma_intr_1_en_clear_tree_hce_re_illegal_op_enabled_f() |
|
u32 mask = pbdma_intr_1_en_clear_tree_hce_re_illegal_op_enabled_f() |
|
||||||
pbdma_intr_1_en_clear_tree_hce_re_alignb_enabled_f() |
|
pbdma_intr_1_en_clear_tree_hce_re_alignb_enabled_f() |
|
||||||
@@ -204,9 +204,9 @@ static void ga10b_pbdma_disable_all_intr(struct gk20a *g)
|
|||||||
for (tree = 0U; tree < pbdma_intr_0_en_clear_tree__size_2_v();
|
for (tree = 0U; tree < pbdma_intr_0_en_clear_tree__size_2_v();
|
||||||
tree++) {
|
tree++) {
|
||||||
nvgpu_writel(g, pbdma_intr_0_en_clear_tree_r(pbdma_id,
|
nvgpu_writel(g, pbdma_intr_0_en_clear_tree_r(pbdma_id,
|
||||||
tree), pbdma_intr_0_en_clear_tree_mask());
|
tree), g->ops.pbdma.intr_0_en_clear_tree_mask());
|
||||||
nvgpu_writel(g, pbdma_intr_1_en_clear_tree_r(pbdma_id,
|
nvgpu_writel(g, pbdma_intr_1_en_clear_tree_r(pbdma_id,
|
||||||
tree), pbdma_intr_1_en_clear_tree_mask());
|
tree), g->ops.pbdma.intr_1_en_clear_tree_mask());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -352,9 +352,9 @@ void ga10b_pbdma_intr_enable(struct gk20a *g, bool enable)
|
|||||||
|
|
||||||
/* enable pbdma interrupts and route to tree_0 */
|
/* enable pbdma interrupts and route to tree_0 */
|
||||||
nvgpu_writel(g, pbdma_intr_0_en_set_tree_r(pbdma_id,
|
nvgpu_writel(g, pbdma_intr_0_en_set_tree_r(pbdma_id,
|
||||||
tree), pbdma_intr_0_en_set_tree_mask());
|
tree), g->ops.pbdma.intr_0_en_set_tree_mask());
|
||||||
nvgpu_writel(g, pbdma_intr_1_en_set_tree_r(pbdma_id,
|
nvgpu_writel(g, pbdma_intr_1_en_set_tree_r(pbdma_id,
|
||||||
tree), pbdma_intr_1_en_set_tree_mask());
|
tree), g->ops.pbdma.intr_1_en_set_tree_mask());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GA100 Tegra HAL interface
|
* GA100 Tegra HAL interface
|
||||||
*
|
*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -1043,6 +1043,10 @@ static const struct gops_pbdma ga100_ops_pbdma = {
|
|||||||
.cleanup_sw = nvgpu_pbdma_cleanup_sw,
|
.cleanup_sw = nvgpu_pbdma_cleanup_sw,
|
||||||
.setup_hw = NULL,
|
.setup_hw = NULL,
|
||||||
.intr_enable = ga10b_pbdma_intr_enable,
|
.intr_enable = ga10b_pbdma_intr_enable,
|
||||||
|
.intr_0_en_set_tree_mask = ga10b_pbdma_intr_0_en_set_tree_mask,
|
||||||
|
.intr_0_en_clear_tree_mask = ga10b_pbdma_intr_0_en_clear_tree_mask,
|
||||||
|
.intr_1_en_set_tree_mask = ga10b_pbdma_intr_1_en_set_tree_mask,
|
||||||
|
.intr_1_en_clear_tree_mask = ga10b_pbdma_intr_1_en_clear_tree_mask,
|
||||||
.acquire_val = gm20b_pbdma_acquire_val,
|
.acquire_val = gm20b_pbdma_acquire_val,
|
||||||
.get_signature = gp10b_pbdma_get_signature,
|
.get_signature = gp10b_pbdma_get_signature,
|
||||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
|
|||||||
@@ -1061,6 +1061,10 @@ static const struct gops_pbdma ga10b_ops_pbdma = {
|
|||||||
.cleanup_sw = nvgpu_pbdma_cleanup_sw,
|
.cleanup_sw = nvgpu_pbdma_cleanup_sw,
|
||||||
.setup_hw = NULL,
|
.setup_hw = NULL,
|
||||||
.intr_enable = ga10b_pbdma_intr_enable,
|
.intr_enable = ga10b_pbdma_intr_enable,
|
||||||
|
.intr_0_en_set_tree_mask = ga10b_pbdma_intr_0_en_set_tree_mask,
|
||||||
|
.intr_0_en_clear_tree_mask = ga10b_pbdma_intr_0_en_clear_tree_mask,
|
||||||
|
.intr_1_en_set_tree_mask = ga10b_pbdma_intr_1_en_set_tree_mask,
|
||||||
|
.intr_1_en_clear_tree_mask = ga10b_pbdma_intr_1_en_clear_tree_mask,
|
||||||
.acquire_val = gm20b_pbdma_acquire_val,
|
.acquire_val = gm20b_pbdma_acquire_val,
|
||||||
.get_signature = gp10b_pbdma_get_signature,
|
.get_signature = gp10b_pbdma_get_signature,
|
||||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -45,6 +45,10 @@ struct gops_pbdma {
|
|||||||
void (*cleanup_sw)(struct gk20a *g);
|
void (*cleanup_sw)(struct gk20a *g);
|
||||||
void (*setup_hw)(struct gk20a *g);
|
void (*setup_hw)(struct gk20a *g);
|
||||||
void (*intr_enable)(struct gk20a *g, bool enable);
|
void (*intr_enable)(struct gk20a *g, bool enable);
|
||||||
|
u32 (*intr_0_en_set_tree_mask)(void);
|
||||||
|
u32 (*intr_0_en_clear_tree_mask)(void);
|
||||||
|
u32 (*intr_1_en_set_tree_mask)(void);
|
||||||
|
u32 (*intr_1_en_clear_tree_mask)(void);
|
||||||
bool (*handle_intr_0)(struct gk20a *g,
|
bool (*handle_intr_0)(struct gk20a *g,
|
||||||
u32 pbdma_id, u32 pbdma_intr_0,
|
u32 pbdma_id, u32 pbdma_intr_0,
|
||||||
u32 *error_notifier);
|
u32 *error_notifier);
|
||||||
|
|||||||
Reference in New Issue
Block a user