gpu: nvgpu: add intr_unit_bitmask i/p param for fb.intr.isr

tu104 onwards, fb interrupt status/enable/disable moved from
fb_niso_intr_* reg to fb_*vector* registers.
At the top level, fb interrupt status/enable/disable is done
using hub_intr bit in mc_intr registers.

Starting nvgpu-next, this has changed.

JIRA NVGPU-5032

Change-Id: Ib54170b055b83e2696312c811c2e3ba678749359
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2330867
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2020-04-18 19:58:06 -07:00
committed by Alex Waterman
parent 470fe3a6d4
commit aff5497907
8 changed files with 26 additions and 25 deletions

View File

@@ -1,7 +1,7 @@
/*
* GV11B FB INTR
*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,7 +29,7 @@ struct gk20a;
void gv11b_fb_intr_enable(struct gk20a *g);
void gv11b_fb_intr_disable(struct gk20a *g);
void gv11b_fb_intr_isr(struct gk20a *g);
void gv11b_fb_intr_isr(struct gk20a *g, u32 intr_unit_bitmask);
bool gv11b_fb_intr_is_mmu_fault_pending(struct gk20a *g);
#endif /* NVGPU_FB_INTR_GV11B_H */

View File

@@ -1,7 +1,7 @@
/*
* GV11B FB
*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -71,7 +71,7 @@ void gv11b_fb_intr_disable(struct gk20a *g)
nvgpu_mc_intr_stall_unit_config(g, MC_INTR_UNIT_HUB, MC_INTR_DISABLE);
}
void gv11b_fb_intr_isr(struct gk20a *g)
void gv11b_fb_intr_isr(struct gk20a *g, u32 intr_unit_bitmask)
{
u32 niso_intr;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -81,7 +81,7 @@ void tu104_fb_intr_disable(struct gk20a *g)
fb_mmu_int_vector_ecc_error_vector_v(ecc_error));
}
void tu104_fb_intr_isr(struct gk20a *g)
void tu104_fb_intr_isr(struct gk20a *g, u32 intr_unit_bitmask)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,7 +29,7 @@ struct gk20a;
void tu104_fb_intr_enable(struct gk20a *g);
void tu104_fb_intr_disable(struct gk20a *g);
void tu104_fb_intr_isr(struct gk20a *g);
void tu104_fb_intr_isr(struct gk20a *g, u32 intr_unit_bitmask);
bool tu104_fb_intr_is_mmu_fault_pending(struct gk20a *g);
#endif /* NVGPU_FB_INTR_TU104_H */

View File

@@ -104,7 +104,7 @@ static void mc_gp10b_isr_stall_secondary_0(struct gk20a *g, u32 mc_intr_0)
{
if ((g->ops.mc.is_intr_hub_pending != NULL) &&
g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) {
g->ops.fb.intr.isr(g);
g->ops.fb.intr.isr(g, 0U);
}
if ((mc_intr_0 & mc_intr_pfifo_pending_f()) != 0U) {
g->ops.fifo.intr_0_isr(g);

View File

@@ -69,11 +69,12 @@ struct gops_fb_intr {
* @brief ISR for fb hub interrupts.
*
* @param g [in] Pointer to GPU driver struct.
* @param intr_unit_bitmask [in] Bitmask of the mmu intr_units.
*
* This is the entry point to handle fb hub interrupts. This function
* handled all the interrupts enabled in enable function.
* handles all the interrupts enabled in enable function.
*/
void (*isr)(struct gk20a *g);
void (*isr)(struct gk20a *g, u32 intr_unit_bitmask);
/**
* @brief Checks any mmu fault interrupt is pending

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -66,12 +66,12 @@ int fb_intr_gv11b_isr_test(struct unit_module *m, struct gk20a *g, void *args)
if (gv11b_fb_intr_is_mmu_fault_pending(g)) {
unit_return_fail(m, "MMU fault should NOT be pending\n");
}
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* Hub access counter notify/error: just causes a nvgpu_info call */
nvgpu_writel(g, fb_niso_intr_r(),
fb_niso_intr_hub_access_counter_notify_m());
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* MMU fault: testing of MMU fault handling is done in other tests */
nvgpu_writel(g, fb_niso_intr_r(),
@@ -79,12 +79,12 @@ int fb_intr_gv11b_isr_test(struct unit_module *m, struct gk20a *g, void *args)
if (!gv11b_fb_intr_is_mmu_fault_pending(g)) {
unit_return_fail(m, "MMU fault should be pending\n");
}
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* ECC fault: testing of ECC fault handling is done in other tests */
nvgpu_writel(g, fb_niso_intr_r(),
fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f());
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* Disable interrupts */
gv11b_fb_intr_disable(g);
@@ -163,35 +163,35 @@ int fb_intr_gv11b_ecc_test(struct unit_module *m, struct gk20a *g, void *args)
/* Set the interrupt status as corrected */
nvgpu_writel(g, p->status_reg, p->corrected_status);
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* Set the interrupt status as uncorrected */
nvgpu_writel(g, p->status_reg, p->uncorrected_status);
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* Set arbitrary number of corrected and uncorrected errors */
nvgpu_writel(g, p->corrected_err_reg, ECC_ERRORS);
nvgpu_writel(g, p->uncorrected_err_reg, ECC_ERRORS);
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* Same but with corrected overflow bit set */
nvgpu_writel(g, p->status_reg, 1 | p->corrected_overflow);
nvgpu_writel(g, p->corrected_err_reg, ECC_ERRORS);
nvgpu_writel(g, p->uncorrected_err_reg, ECC_ERRORS);
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* Same but with uncorrected overflow bit set */
nvgpu_writel(g, p->status_reg, 1 | p->uncorrected_overflow);
nvgpu_writel(g, p->corrected_err_reg, ECC_ERRORS);
nvgpu_writel(g, p->uncorrected_err_reg, ECC_ERRORS);
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* Both overflow but error counts at 0 */
nvgpu_writel(g, p->status_reg, 1 | p->corrected_overflow |
p->uncorrected_overflow);
nvgpu_writel(g, p->corrected_err_reg, 0);
nvgpu_writel(g, p->uncorrected_err_reg, 0);
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
/* Extra case for fillunit */
if (subcase == TEST_ECC_FILLUNIT) {
@@ -199,7 +199,7 @@ int fb_intr_gv11b_ecc_test(struct unit_module *m, struct gk20a *g, void *args)
nvgpu_writel(g, p->status_reg,
fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m() |
fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m());
gv11b_fb_intr_isr(g);
gv11b_fb_intr_isr(g, 0U);
}
/* Clear interrupt status */

View File

@@ -189,7 +189,7 @@ static u32 mock_ce_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
return u.ce_isr_return;
}
static void mock_fb_isr(struct gk20a *g)
static void mock_fb_isr(struct gk20a *g, u32 intr_unit_bitmask)
{
u.fb_isr = true;
}