gpu: nvgpu: Replace WAR keyword with "fix"

Replace/remove "WAR" keyword in the comments in nvgpu driver with "fix".
Rename below functions and corresponding gops to replace "war" word with
"errata" word:
- g.pdb_cache_war_mem
- ramin.init_pdb_cache_war
- ramin.deinit_pdb_cache_war
- tu104_ramin_init_pdb_cache_war
- tu104_ramin_deinit_pdb_cache_war
- fb.apply_pdb_cache_war
- tu104_fb_apply_pdb_cache_war
- nvgpu_init_mm_pdb_cache_war
- nvlink.set_sw_war
- gv100_nvlink_set_sw_war

Jira NVGPU-6680

Change-Id: Ieaad2441fac87e4544eddbca3624b82076b2ee73
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2515700
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Vedashree Vidwans
2021-04-15 14:26:02 -07:00
committed by mobile promotions
parent aba26fa082
commit 86cb03d2f1
23 changed files with 77 additions and 77 deletions

View File

@@ -179,7 +179,7 @@ static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
nvgpu_vidmem_destroy(g);
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_INIT_PDB_CACHE)) {
g->ops.ramin.deinit_pdb_cache_war(g);
g->ops.ramin.deinit_pdb_cache_errata(g);
}
#endif
nvgpu_pd_cache_fini(g);
@@ -584,19 +584,19 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
}
#ifdef CONFIG_NVGPU_DGPU
static int nvgpu_init_mm_pdb_cache_war(struct gk20a *g)
static int nvgpu_init_mm_pdb_cache_errata(struct gk20a *g)
{
int err;
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_INIT_PDB_CACHE)) {
err = g->ops.ramin.init_pdb_cache_war(g);
err = g->ops.ramin.init_pdb_cache_errata(g);
if (err != 0) {
return err;
}
}
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_FB_PDB_CACHE)) {
err = g->ops.fb.apply_pdb_cache_war(g);
err = g->ops.fb.apply_pdb_cache_errata(g);
if (err != 0) {
return err;
}
@@ -662,7 +662,7 @@ int nvgpu_init_mm_support(struct gk20a *g)
int err;
#ifdef CONFIG_NVGPU_DGPU
err = nvgpu_init_mm_pdb_cache_war(g);
err = nvgpu_init_mm_pdb_cache_errata(g);
if (err != 0) {
return err;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -26,7 +26,7 @@
#ifdef CONFIG_NVGPU_NVLINK
/*
* WAR: use this function to find detault link, as only one is supported
* Fix: use this function to find detault link, as only one is supported
* on the library for now
* Returns NVLINK_MAX_LINKS_SW on failure
*/

View File

@@ -106,7 +106,7 @@ static int nvgpu_nvlink_enable_links_post_top(struct gk20a *g,
for_each_set_bit(bit, &enabled_links, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_1888034)) {
g->ops.nvlink.set_sw_war(g, link_id);
g->ops.nvlink.set_sw_errata(g, link_id);
}
g->ops.nvlink.intr.init_link_err_intr(g, link_id);
g->ops.nvlink.intr.enable_link_err_intr(g, link_id, true);
@@ -262,7 +262,7 @@ int nvgpu_nvlink_early_init(struct gk20a *g)
* node where we hardcode the link_id. DT method is not scalable as same
* DT node is used for different dGPUs connected over PCIE.
* Remove the DT parsing of link id and use HAL to get link_mask based
* on the GPU. This is temporary WAR while we get the VBIOS updated with
* on the GPU. This is temporary fix while we get the VBIOS updated with
* correct mask.
*/
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_VBIOS_NVLINK_MASK)) {

View File

@@ -375,7 +375,7 @@ nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c)
syncpt_name);
/**
* This is a WAR to handle invalid value of a syncpt.
* This is a fix to handle invalid value of a syncpt.
* Once nvhost update the return value as NVGPU_INVALID_SYNCPT_ID,
* we can remove the zero check.
*/

View File

@@ -90,7 +90,7 @@ nvgpu_channel_user_syncpt_create(struct nvgpu_channel *ch)
syncpt_name);
/**
* This is a WAR to handle invalid value of a syncpt.
* This is a fix to handle invalid value of a syncpt.
* Once nvhost update the return value as NVGPU_INVALID_SYNCPT_ID,
* we can remove the zero check.
*/

View File

@@ -175,18 +175,18 @@ static int tu104_fb_wait_mmu_bind(struct gk20a *g)
return -ETIMEDOUT;
}
int tu104_fb_apply_pdb_cache_war(struct gk20a *g)
int tu104_fb_apply_pdb_cache_errata(struct gk20a *g)
{
u64 inst_blk_base_addr;
u32 inst_blk_addr;
u32 i;
int err;
if (!nvgpu_mem_is_valid(&g->pdb_cache_war_mem)) {
if (!nvgpu_mem_is_valid(&g->pdb_cache_errata_mem)) {
return -EINVAL;
}
inst_blk_base_addr = nvgpu_mem_get_addr(g, &g->pdb_cache_war_mem);
inst_blk_base_addr = nvgpu_mem_get_addr(g, &g->pdb_cache_errata_mem);
/* Bind 256 instance blocks to unused engine ID 0x0 */
for (i = 0U; i < 256U; i++) {
@@ -196,7 +196,7 @@ int tu104_fb_apply_pdb_cache_war(struct gk20a *g)
nvgpu_writel(g, fb_mmu_bind_imb_r(),
fb_mmu_bind_imb_addr_f(inst_blk_addr) |
nvgpu_aperture_mask(g, &g->pdb_cache_war_mem,
nvgpu_aperture_mask(g, &g->pdb_cache_errata_mem,
fb_mmu_bind_imb_aperture_sys_mem_nc_f(),
fb_mmu_bind_imb_aperture_sys_mem_c_f(),
fb_mmu_bind_imb_aperture_vid_mem_f()));
@@ -241,7 +241,7 @@ int tu104_fb_apply_pdb_cache_war(struct gk20a *g)
nvgpu_writel(g, fb_mmu_bind_imb_r(),
fb_mmu_bind_imb_addr_f(inst_blk_addr) |
nvgpu_aperture_mask(g, &g->pdb_cache_war_mem,
nvgpu_aperture_mask(g, &g->pdb_cache_errata_mem,
fb_mmu_bind_imb_aperture_sys_mem_nc_f(),
fb_mmu_bind_imb_aperture_sys_mem_c_f(),
fb_mmu_bind_imb_aperture_vid_mem_f()));

View File

@@ -33,7 +33,7 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb);
struct nvgpu_cbc;
void tu104_fb_cbc_configure(struct gk20a *g, struct nvgpu_cbc *cbc);
#endif
int tu104_fb_apply_pdb_cache_war(struct gk20a *g);
int tu104_fb_apply_pdb_cache_errata(struct gk20a *g);
#ifdef CONFIG_NVGPU_DGPU
size_t tu104_fb_get_vidmem_size(struct gk20a *g);
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -30,7 +30,7 @@
#include <nvgpu/hw/tu104/hw_ram_tu104.h>
int tu104_ramin_init_pdb_cache_war(struct gk20a *g)
int tu104_ramin_init_pdb_cache_errata(struct gk20a *g)
{
u32 size = NVGPU_CPU_PAGE_SIZE * 258U;
u64 last_bind_pdb_addr;
@@ -39,7 +39,7 @@ int tu104_ramin_init_pdb_cache_war(struct gk20a *g)
u32 i;
int err;
if (nvgpu_mem_is_valid(&g->pdb_cache_war_mem)) {
if (nvgpu_mem_is_valid(&g->pdb_cache_errata_mem)) {
return 0;
}
@@ -47,7 +47,7 @@ int tu104_ramin_init_pdb_cache_war(struct gk20a *g)
* Allocate memory for 257 instance block binds +
* PDB bound to 257th instance block
*/
err = nvgpu_dma_alloc_sys(g, size, &g->pdb_cache_war_mem);
err = nvgpu_dma_alloc_sys(g, size, &g->pdb_cache_errata_mem);
if (err != 0) {
return err;
}
@@ -58,7 +58,7 @@ int tu104_ramin_init_pdb_cache_war(struct gk20a *g)
* First 256 binds can happen to dummy addresses
*/
pdb_addr = NVGPU_CPU_PAGE_SIZE;
last_bind_pdb_addr = nvgpu_mem_get_addr(g, &g->pdb_cache_war_mem) +
last_bind_pdb_addr = nvgpu_mem_get_addr(g, &g->pdb_cache_errata_mem) +
(257U * NVGPU_CPU_PAGE_SIZE);
/* Setup first 256 instance blocks */
@@ -66,9 +66,9 @@ int tu104_ramin_init_pdb_cache_war(struct gk20a *g)
pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
pdb_addr_hi = u64_hi32(pdb_addr);
nvgpu_mem_wr32(g, &g->pdb_cache_war_mem,
nvgpu_mem_wr32(g, &g->pdb_cache_errata_mem,
ram_in_page_dir_base_lo_w() + (i * NVGPU_CPU_PAGE_SIZE / 4U),
nvgpu_aperture_mask(g, &g->pdb_cache_war_mem,
nvgpu_aperture_mask(g, &g->pdb_cache_errata_mem,
ram_in_page_dir_base_target_sys_mem_ncoh_f(),
ram_in_page_dir_base_target_sys_mem_coh_f(),
ram_in_page_dir_base_target_vid_mem_f()) |
@@ -77,7 +77,7 @@ int tu104_ramin_init_pdb_cache_war(struct gk20a *g)
ram_in_page_dir_base_lo_f(pdb_addr_lo) |
ram_in_use_ver2_pt_format_true_f());
nvgpu_mem_wr32(g, &g->pdb_cache_war_mem,
nvgpu_mem_wr32(g, &g->pdb_cache_errata_mem,
ram_in_page_dir_base_hi_w() + (i * NVGPU_CPU_PAGE_SIZE / 4U),
ram_in_page_dir_base_hi_f(pdb_addr_hi));
@@ -88,9 +88,9 @@ int tu104_ramin_init_pdb_cache_war(struct gk20a *g)
pdb_addr_lo = u64_lo32(last_bind_pdb_addr >> ram_in_base_shift_v());
pdb_addr_hi = u64_hi32(last_bind_pdb_addr);
nvgpu_mem_wr32(g, &g->pdb_cache_war_mem,
nvgpu_mem_wr32(g, &g->pdb_cache_errata_mem,
ram_in_page_dir_base_lo_w() + (256U * NVGPU_CPU_PAGE_SIZE / 4U),
nvgpu_aperture_mask(g, &g->pdb_cache_war_mem,
nvgpu_aperture_mask(g, &g->pdb_cache_errata_mem,
ram_in_page_dir_base_target_sys_mem_ncoh_f(),
ram_in_page_dir_base_target_sys_mem_coh_f(),
ram_in_page_dir_base_target_vid_mem_f()) |
@@ -99,16 +99,16 @@ int tu104_ramin_init_pdb_cache_war(struct gk20a *g)
ram_in_page_dir_base_lo_f(pdb_addr_lo) |
ram_in_use_ver2_pt_format_true_f());
nvgpu_mem_wr32(g, &g->pdb_cache_war_mem,
nvgpu_mem_wr32(g, &g->pdb_cache_errata_mem,
ram_in_page_dir_base_hi_w() + (256U * NVGPU_CPU_PAGE_SIZE / 4U),
ram_in_page_dir_base_hi_f(pdb_addr_hi));
return 0;
}
void tu104_ramin_deinit_pdb_cache_war(struct gk20a *g)
void tu104_ramin_deinit_pdb_cache_errata(struct gk20a *g)
{
if (nvgpu_mem_is_valid(&g->pdb_cache_war_mem)) {
nvgpu_dma_free(g, &g->pdb_cache_war_mem);
if (nvgpu_mem_is_valid(&g->pdb_cache_errata_mem)) {
nvgpu_dma_free(g, &g->pdb_cache_errata_mem);
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -27,7 +27,7 @@
struct gk20a;
int tu104_ramin_init_pdb_cache_war(struct gk20a *g);
void tu104_ramin_deinit_pdb_cache_war(struct gk20a *g);
int tu104_ramin_init_pdb_cache_errata(struct gk20a *g);
void tu104_ramin_deinit_pdb_cache_errata(struct gk20a *g);
#endif /* NVGPU_RAMIN_TU104_H */

View File

@@ -1581,7 +1581,7 @@ static int gr_exec_ctx_ops(struct nvgpu_tsg *tsg,
if (current_mem == nvgpu_gr_ctx_get_ctx_mem(gr_ctx) &&
g->ops.gr.ctx_patch_smpc != NULL) {
/* check to see if we need to add a special WAR
/* check to see if we need to add a special fix
for some of the SMPC perf regs */
g->ops.gr.ctx_patch_smpc(g,
offset_addrs[j],

View File

@@ -91,7 +91,7 @@ int gp10b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
return -EINVAL;
}
static void gr_gp10b_sm_lrf_ecc_overcount_war(bool single_err,
static void gr_gp10b_sm_lrf_ecc_overcount_errata(bool single_err,
u32 sed_status,
u32 ded_status,
u32 *count_to_adjust,
@@ -176,7 +176,7 @@ int gp10b_gr_intr_handle_sm_exception(struct gk20a *g,
if (nvgpu_is_errata_present(g,
NVGPU_ERRATA_LRF_ECC_OVERCOUNT)) {
gr_gp10b_sm_lrf_ecc_overcount_war(true,
gr_gp10b_sm_lrf_ecc_overcount_errata(true,
lrf_ecc_sed_status,
lrf_ecc_ded_status,
&lrf_single_count_delta,
@@ -193,7 +193,7 @@ int gp10b_gr_intr_handle_sm_exception(struct gk20a *g,
if (nvgpu_is_errata_present(g,
NVGPU_ERRATA_LRF_ECC_OVERCOUNT)) {
gr_gp10b_sm_lrf_ecc_overcount_war(false,
gr_gp10b_sm_lrf_ecc_overcount_errata(false,
lrf_ecc_sed_status,
lrf_ecc_ded_status,
&lrf_double_count_delta,

View File

@@ -831,7 +831,7 @@ static const struct gops_fb tu104_ops_fb = {
#ifdef CONFIG_NVGPU_DGPU
.get_vidmem_size = tu104_fb_get_vidmem_size,
#endif
.apply_pdb_cache_war = tu104_fb_apply_pdb_cache_war,
.apply_pdb_cache_errata = tu104_fb_apply_pdb_cache_errata,
};
static const struct gops_nvdec tu104_ops_nvdec = {
@@ -996,8 +996,8 @@ static const struct gops_ramin tu104_ops_ramin = {
.base_shift = gk20a_ramin_base_shift,
.alloc_size = gk20a_ramin_alloc_size,
.set_eng_method_buffer = gv11b_ramin_set_eng_method_buffer,
.init_pdb_cache_war = tu104_ramin_init_pdb_cache_war,
.deinit_pdb_cache_war = tu104_ramin_deinit_pdb_cache_war,
.init_pdb_cache_errata = tu104_ramin_init_pdb_cache_errata,
.deinit_pdb_cache_errata = tu104_ramin_deinit_pdb_cache_errata,
};
static const struct gops_runlist tu104_ops_runlist = {
@@ -1548,7 +1548,7 @@ static const struct gops_nvlink tu104_ops_nvlink = {
.discover_link = gv100_nvlink_discover_link,
.rxdet = tu104_nvlink_rxdet,
.get_connected_link_mask = tu104_nvlink_get_connected_link_mask,
.set_sw_war = NULL,
.set_sw_errata = NULL,
.configure_ac_coupling = gv100_nvlink_configure_ac_coupling,
.prog_alt_clk = gv100_nvlink_prog_alt_clk,
.clear_link_reset = gv100_nvlink_clear_link_reset,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -537,11 +537,10 @@ void gv100_nvlink_enable_link_an0(struct gk20a *g, u32 link_id)
}
void gv100_nvlink_set_sw_war(struct gk20a *g, u32 link_id)
void gv100_nvlink_set_sw_errata(struct gk20a *g, u32 link_id)
{
u32 reg;
/* WAR for HW bug 1888034 */
reg = DLPL_REG_RD32(g, link_id, nvl_sl0_safe_ctrl2_tx_r());
reg = set_field(reg, nvl_sl0_safe_ctrl2_tx_ctr_init_m(),
nvl_sl0_safe_ctrl2_tx_ctr_init_init_f());

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,7 +31,7 @@ u32 gv100_nvlink_get_link_reset_mask(struct gk20a *g);
int gv100_nvlink_discover_link(struct gk20a *g);
int gv100_nvlink_init(struct gk20a *g);
void gv100_nvlink_get_connected_link_mask(u32 *link_mask);
void gv100_nvlink_set_sw_war(struct gk20a *g, u32 link_id);
void gv100_nvlink_set_sw_errata(struct gk20a *g, u32 link_id);
int gv100_nvlink_configure_ac_coupling(struct gk20a *g,
unsigned long mask, bool sync);
void gv100_nvlink_prog_alt_clk(struct gk20a *g);

View File

@@ -778,7 +778,7 @@ struct gk20a {
struct nvgpu_list_node boardobj_head;
struct nvgpu_list_node boardobjgrp_head;
struct nvgpu_mem pdb_cache_war_mem;
struct nvgpu_mem pdb_cache_errata_mem;
/** @endcond */
#ifdef CONFIG_NVGPU_DGPU

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -44,7 +44,7 @@ struct gops_class {
*
* List of valid class numbers:
*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* 1. Graphics classes: (WAR: Lot of qnx safety tests are still using
* 1. Graphics classes: (Fix: Lot of qnx safety tests are still using
* graphics 3d class. Until these tests get fixed,
* allowing 3d graphics class as valid class for
* safety build.)

View File

@@ -470,7 +470,7 @@ struct gops_fb {
int (*init_nvlink)(struct gk20a *g);
int (*enable_nvlink)(struct gk20a *g);
size_t (*get_vidmem_size)(struct gk20a *g);
int (*apply_pdb_cache_war)(struct gk20a *g);
int (*apply_pdb_cache_errata)(struct gk20a *g);
int (*init_fbpa)(struct gk20a *g);
void (*handle_fbpa_intr)(struct gk20a *g, u32 fbpa_id);
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -73,22 +73,22 @@ struct gops_nvlink_intr {
};
struct gops_nvlink {
int (*init)(struct gk20a *g);
u32 (*get_link_reset_mask)(struct gk20a *g);
int (*discover_link)(struct gk20a *g);
int (*rxdet)(struct gk20a *g, u32 link_id);
void (*get_connected_link_mask)(u32 *link_mask);
void (*set_sw_war)(struct gk20a *g, u32 link_id);
int (*configure_ac_coupling)(struct gk20a *g,
unsigned long mask, bool sync);
void (*prog_alt_clk)(struct gk20a *g);
void (*clear_link_reset)(struct gk20a *g, u32 link_id);
void (*enable_link_an0)(struct gk20a *g, u32 link_id);
/* API */
struct gops_nvlink_link_mode_transitions link_mode_transitions;
int (*reg_init)(struct gk20a *g);
struct gops_nvlink_minion minion;
struct gops_nvlink_intr intr;
int (*init)(struct gk20a *g);
u32 (*get_link_reset_mask)(struct gk20a *g);
int (*discover_link)(struct gk20a *g);
int (*rxdet)(struct gk20a *g, u32 link_id);
void (*get_connected_link_mask)(u32 *link_mask);
void (*set_sw_errata)(struct gk20a *g, u32 link_id);
int (*configure_ac_coupling)(struct gk20a *g,
unsigned long mask, bool sync);
void (*prog_alt_clk)(struct gk20a *g);
void (*clear_link_reset)(struct gk20a *g, u32 link_id);
void (*enable_link_an0)(struct gk20a *g, u32 link_id);
/* API */
struct gops_nvlink_link_mode_transitions link_mode_transitions;
int (*reg_init)(struct gk20a *g);
struct gops_nvlink_minion minion;
struct gops_nvlink_intr intr;
};
#endif /* NVGPU_GOPS_NVLINK_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -144,7 +144,7 @@ struct gops_ramin {
#ifdef CONFIG_NVGPU_DGPU
/**
* @brief Init WAR for PDB cache.
* @brief Init fix for PDB cache.
*
* @param g [in] Pointer to GPU driver struct.
*
@@ -153,17 +153,17 @@ struct gops_ramin {
*
* @return 0 in case of success, < 0 in case of failure.
*/
int (*init_pdb_cache_war)(struct gk20a *g);
int (*init_pdb_cache_errata)(struct gk20a *g);
/**
* @brief Deinit WAR for PDB cache.
* @brief Deinit fix for PDB cache.
*
* @param g [in] Pointer to GPU driver struct.
*
* This HAL allows implementing chip specific de-initialization
* related to PDB cache.
*/
void (*deinit_pdb_cache_war)(struct gk20a *g);
void (*deinit_pdb_cache_errata)(struct gk20a *g);
#endif
void (*set_adr_limit)(struct gk20a *g,

View File

@@ -1161,7 +1161,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
if (err)
nvgpu_err(g,
"error (%d) during pm ctxsw mode update", err);
/* gk20a would require a WAR to set the core PM_ENABLE bit, not
/* gk20a would require a fix to set the core PM_ENABLE bit, not
* added here with gk20a being deprecated
*/
clean_up:

View File

@@ -104,7 +104,8 @@ static struct gk20a_platform nvgpu_pci_device[] = {
.clk_round_rate = nvgpu_pci_clk_round_rate,
/*
* WAR: PCIE X1 is very slow, set to very high value till nvlink is up
* Fix: PCIE X1 is very slow, set to very high value till
* nvlink is up
*/
.ch_wdt_init_limit_ms = 30000,

View File

@@ -280,7 +280,7 @@ struct gk20a_platform {
bool honors_aperture;
/* unified or split memory with separate vidmem? */
bool unified_memory;
/* WAR for gm20b chips. */
/* Fix for gm20b chips. */
bool force_128K_pmu_vm;
/*

View File

@@ -866,7 +866,7 @@ static int gk20a_tegra_probe(struct device *dev)
platform->g->clk.gpc_pll.id = GK20A_GPC_PLL;
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_1547668)) {
/* WAR for bug 1547668: Disable railgating and scaling
/* Disable railgating and scaling
irrespective of platform data if the rework was not made. */
np = of_find_node_by_path("/gpu-dvfs-rework");
if (!(np && of_device_is_available(np))) {