mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: Move gops.mc's non-fp members
Move non-function pointer members out of the mc substruct of gpu_ops. Ideally gpu_ops will have only function ponters, better matching its intended purpose and improving readability. gops.mc.intr_mask_restore is now mc_intr_mask_restore Jira NVGPU-74 Change-Id: Iddf5e761012086e993c375d59e1bbdfb1e64db0c Signed-off-by: Sunny He <suhe@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1509602 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
e78153ea1b
commit
47cb48f1e2
@@ -887,7 +887,6 @@ struct gpu_ops {
|
||||
void (*reset)(struct gk20a *g, u32 units);
|
||||
u32 (*boot_0)(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev);
|
||||
bool (*is_intr1_pending)(struct gk20a *g, enum nvgpu_unit unit, u32 mc_intr_1);
|
||||
u32 intr_mask_restore[4];
|
||||
} mc;
|
||||
struct {
|
||||
void (*show_dump)(struct gk20a *g,
|
||||
@@ -1204,6 +1203,7 @@ struct gk20a {
|
||||
int client_refcount; /* open channels and ctrl nodes */
|
||||
|
||||
struct gpu_ops ops;
|
||||
u32 mc_intr_mask_restore[4];
|
||||
|
||||
int irqs_enabled;
|
||||
int irq_stall; /* can be same as irq_nonstall in case of PCI */
|
||||
|
||||
@@ -29,7 +29,7 @@ void mc_gp10b_intr_enable(struct gk20a *g)
|
||||
|
||||
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
|
||||
0xffffffff);
|
||||
g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING] =
|
||||
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
|
||||
mc_intr_pfifo_pending_f() |
|
||||
mc_intr_priv_ring_pending_f() |
|
||||
mc_intr_pbus_pending_f() |
|
||||
@@ -37,15 +37,15 @@ void mc_gp10b_intr_enable(struct gk20a *g)
|
||||
mc_intr_replayable_fault_pending_f() |
|
||||
eng_intr_mask;
|
||||
gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
|
||||
g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING]);
|
||||
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
|
||||
|
||||
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
|
||||
0xffffffff);
|
||||
g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
|
||||
g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
|
||||
mc_intr_pfifo_pending_f() |
|
||||
eng_intr_mask;
|
||||
gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
|
||||
g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
|
||||
g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
|
||||
}
|
||||
|
||||
void mc_gp10b_intr_unit_config(struct gk20a *g, bool enable,
|
||||
@@ -58,11 +58,11 @@ void mc_gp10b_intr_unit_config(struct gk20a *g, bool enable,
|
||||
NVGPU_MC_INTR_NONSTALLING);
|
||||
if (enable) {
|
||||
reg = mc_intr_en_set_r(intr_index);
|
||||
g->ops.mc.intr_mask_restore[intr_index] |= mask;
|
||||
g->mc_intr_mask_restore[intr_index] |= mask;
|
||||
|
||||
} else {
|
||||
reg = mc_intr_en_clear_r(intr_index);
|
||||
g->ops.mc.intr_mask_restore[intr_index] &= ~mask;
|
||||
g->mc_intr_mask_restore[intr_index] &= ~mask;
|
||||
}
|
||||
|
||||
gk20a_writel(g, reg, mask);
|
||||
@@ -136,7 +136,7 @@ void mc_gp10b_intr_stall_pause(struct gk20a *g)
|
||||
void mc_gp10b_intr_stall_resume(struct gk20a *g)
|
||||
{
|
||||
gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
|
||||
g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING]);
|
||||
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
|
||||
}
|
||||
|
||||
u32 mc_gp10b_intr_nonstall(struct gk20a *g)
|
||||
@@ -153,7 +153,7 @@ void mc_gp10b_intr_nonstall_pause(struct gk20a *g)
|
||||
void mc_gp10b_intr_nonstall_resume(struct gk20a *g)
|
||||
{
|
||||
gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
|
||||
g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
|
||||
g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
|
||||
}
|
||||
|
||||
bool mc_gp10b_is_intr1_pending(struct gk20a *g,
|
||||
|
||||
Reference in New Issue
Block a user