mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: add mm.mmu_fault.parse_mmu_fault_info gops
Add mm.mmu_fault.parse_mmu_fault_info gops. This is required for nvgpu-next. Also add mmu_engine_id type in mmu_fault structure. This variable will be set in parse_mmu_fault_info hal so that gv11b_mm_mmu_fault_handle_other_fault_notify does not depend upon any chip specific h/w header. This is needed because BAR2 mmu engine id has changed in nvgpu-next. JIRA NVGPU-5032 Change-Id: I0c5e9ef607aff5b105f59582013cbfb31396290a Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2330693 Reviewed-by: automaticguardword <automaticguardword@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Lakshmanan M <lm@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: Lakshmanan M <lm@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
Alex Waterman
parent
91401cc849
commit
68caee196a
@@ -411,7 +411,7 @@ void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
|
||||
fault_status &= ~(fb_mmu_fault_status_valid_m());
|
||||
g->ops.fb.write_mmu_fault_status(g, fault_status);
|
||||
|
||||
gv11b_mm_mmu_fault_parse_mmu_fault_info(mmufault);
|
||||
g->ops.mm.mmu_fault.parse_mmu_fault_info(mmufault);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1179,6 +1179,8 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
|
||||
.setup_hw = gv11b_mm_mmu_fault_setup_hw,
|
||||
.info_mem_destroy = gv11b_mm_mmu_fault_info_mem_destroy,
|
||||
.disable_hw = gv11b_mm_mmu_fault_disable_hw,
|
||||
.parse_mmu_fault_info =
|
||||
gv11b_mm_mmu_fault_parse_mmu_fault_info,
|
||||
},
|
||||
.cache = {
|
||||
.fb_flush = gk20a_mm_fb_flush,
|
||||
|
||||
@@ -1204,6 +1204,8 @@ static const struct gpu_ops tu104_ops = {
|
||||
.setup_hw = gv11b_mm_mmu_fault_setup_hw,
|
||||
.info_mem_destroy = gv11b_mm_mmu_fault_info_mem_destroy,
|
||||
.disable_hw = gv11b_mm_mmu_fault_disable_hw,
|
||||
.parse_mmu_fault_info =
|
||||
gv11b_mm_mmu_fault_parse_mmu_fault_info,
|
||||
},
|
||||
.cache = {
|
||||
.fb_flush = gk20a_mm_fb_flush,
|
||||
|
||||
@@ -124,6 +124,16 @@ static const char *const gv11b_gpc_client_descs[] = {
|
||||
|
||||
void gv11b_mm_mmu_fault_parse_mmu_fault_info(struct mmu_fault_info *mmufault)
|
||||
{
|
||||
if (mmufault->mmu_engine_id == gmmu_fault_mmu_eng_id_bar2_v()) {
|
||||
mmufault->mmu_engine_id_type = NVGPU_MMU_ENGINE_ID_TYPE_BAR2;
|
||||
|
||||
} else if (mmufault->mmu_engine_id ==
|
||||
gmmu_fault_mmu_eng_id_physical_v()) {
|
||||
mmufault->mmu_engine_id_type =
|
||||
NVGPU_MMU_ENGINE_ID_TYPE_PHYSICAL;
|
||||
} else {
|
||||
mmufault->mmu_engine_id_type = NVGPU_MMU_ENGINE_ID_TYPE_OTHER;
|
||||
}
|
||||
if (mmufault->fault_type >= ARRAY_SIZE(gv11b_fault_type_descs)) {
|
||||
nvgpu_do_assert();
|
||||
mmufault->fault_type_desc = mmufault_invalid_str;
|
||||
@@ -287,7 +297,7 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
|
||||
gmmu_fault_buf_entry_valid_w()),
|
||||
rd32_val);
|
||||
|
||||
gv11b_mm_mmu_fault_parse_mmu_fault_info(mmufault);
|
||||
g->ops.mm.mmu_fault.parse_mmu_fault_info(mmufault);
|
||||
}
|
||||
|
||||
static bool gv11b_mm_mmu_fault_handle_mmu_fault_ce(struct gk20a *g,
|
||||
@@ -622,12 +632,12 @@ void gv11b_mm_mmu_fault_handle_other_fault_notify(struct gk20a *g,
|
||||
gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmufault);
|
||||
|
||||
/* BAR2/Physical faults will not be snapped in hw fault buf */
|
||||
if (mmufault->mmu_engine_id == gmmu_fault_mmu_eng_id_bar2_v()) {
|
||||
if (mmufault->mmu_engine_id_type == NVGPU_MMU_ENGINE_ID_TYPE_BAR2) {
|
||||
nvgpu_err(g, "BAR2 MMU FAULT");
|
||||
gv11b_fb_handle_bar2_fault(g, mmufault, fault_status);
|
||||
|
||||
} else if (mmufault->mmu_engine_id ==
|
||||
gmmu_fault_mmu_eng_id_physical_v()) {
|
||||
} else if (mmufault->mmu_engine_id_type ==
|
||||
NVGPU_MMU_ENGINE_ID_TYPE_PHYSICAL) {
|
||||
/* usually means VPR or out of bounds physical accesses */
|
||||
nvgpu_err(g, "PHYSICAL MMU FAULT");
|
||||
} else {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -282,6 +282,16 @@ struct gops_mm {
|
||||
* Disable the hardware setup of GMMU fault buffer.
|
||||
*/
|
||||
void (*disable_hw)(struct gk20a *g);
|
||||
|
||||
/**
|
||||
* @brief HAL to parse mmu fault info read from h/w.
|
||||
*
|
||||
* @param mmufault [in] Pointer to memory containing info
|
||||
* to be parsed.
|
||||
*
|
||||
*/
|
||||
void (*parse_mmu_fault_info)(struct mmu_fault_info *mmufault);
|
||||
|
||||
} mmu_fault;
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -52,6 +52,15 @@
|
||||
/** State which is used for enabling the GMMU fault hardware support. */
|
||||
#define NVGPU_MMU_FAULT_BUF_ENABLED 1U
|
||||
|
||||
/** S/w defined mmu engine id type. */
|
||||
#define NVGPU_MMU_ENGINE_ID_TYPE_OTHER 0U
|
||||
|
||||
/** S/w defined mmu engine id type. */
|
||||
#define NVGPU_MMU_ENGINE_ID_TYPE_BAR2 1U
|
||||
|
||||
/** S/w defined mmu engine id type. */
|
||||
#define NVGPU_MMU_ENGINE_ID_TYPE_PHYSICAL 2U
|
||||
|
||||
/**
|
||||
* Forward declared opaque placeholder type that does not really exist, but
|
||||
* helps the compiler about getting types right.
|
||||
@@ -118,6 +127,12 @@ struct mmu_fault_info {
|
||||
*/
|
||||
u32 mmu_engine_id;
|
||||
|
||||
/**
|
||||
* The s/w defined mmu_engine_id type (BAR2, PHYSICAL).
|
||||
*/
|
||||
|
||||
u32 mmu_engine_id_type;
|
||||
|
||||
/** GPC id if client type is gpc. For gv11b, NUM_GPCS = 1. */
|
||||
u32 gpc_id;
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -34,6 +34,7 @@
|
||||
#include "hal/fb/fb_gv11b.h"
|
||||
#include "hal/fb/fb_mmu_fault_gv11b.h"
|
||||
#include "hal/fb/intr/fb_intr_gv11b.h"
|
||||
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
|
||||
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
|
||||
|
||||
#include "fb_fusa.h"
|
||||
@@ -102,6 +103,8 @@ int fb_mmu_fault_gv11b_init_test(struct unit_module *m, struct gk20a *g,
|
||||
g->ops.bus.bar2_bind = hal_bar2_bind_nop;
|
||||
g->ops.fifo.mmu_fault_id_to_pbdma_id =
|
||||
hal_fifo_mmu_fault_id_to_pbdma_id;
|
||||
g->ops.mm.mmu_fault.parse_mmu_fault_info =
|
||||
gv11b_mm_mmu_fault_parse_mmu_fault_info;
|
||||
|
||||
return UNIT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -150,6 +150,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
gp10b_get_max_page_table_levels;
|
||||
g->ops.mm.mmu_fault.info_mem_destroy =
|
||||
gv11b_mm_mmu_fault_info_mem_destroy;
|
||||
g->ops.mm.mmu_fault.parse_mmu_fault_info =
|
||||
gv11b_mm_mmu_fault_parse_mmu_fault_info;
|
||||
|
||||
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
Reference in New Issue
Block a user