mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: hal: fix compile error of new compile flags
It's preparing to add bellow CFLAGS:
-Werror -Wall -Wextra \
-Wmissing-braces -Wpointer-arith -Wundef \
-Wconversion -Wsign-conversion \
-Wformat-security \
-Wmissing-declarations -Wredundant-decls -Wimplicit-fallthrough
Jira GVSCI-11640
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: Ia16ef186da1e97badff9dd0bf8cbd6700dd77b15
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2555057
Reviewed-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-by: Aparna Das <aparnad@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
9ab1271269
commit
e81a36e56a
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -34,5 +34,7 @@ void gv11b_bus_configure_debug_bus(struct gk20a *g)
|
||||
nvgpu_writel(g, bus_debug_sel_1_r(), 0U);
|
||||
nvgpu_writel(g, bus_debug_sel_2_r(), 0U);
|
||||
nvgpu_writel(g, bus_debug_sel_3_r(), 0U);
|
||||
#else
|
||||
(void)g;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GM20B CBC
|
||||
*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -221,12 +221,12 @@ u32 gm20b_cbc_fix_config(struct gk20a *g, int base)
|
||||
u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
|
||||
|
||||
if (val == 2U) {
|
||||
return base * 2;
|
||||
return (u32)(base * 2);
|
||||
} else if (val != 1U) {
|
||||
nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
|
||||
}
|
||||
|
||||
return base;
|
||||
return (u32)base;
|
||||
}
|
||||
|
||||
|
||||
@@ -268,7 +268,7 @@ void gm20b_cbc_init(struct gk20a *g, struct nvgpu_cbc *cbc)
|
||||
/* Bug 1477079 indicates sw adjustment on the posted divided base. */
|
||||
if (g->ops.cbc.fix_config != NULL) {
|
||||
compbit_base_post_divide =
|
||||
g->ops.cbc.fix_config(g, compbit_base_post_divide);
|
||||
g->ops.cbc.fix_config(g, (int)compbit_base_post_divide);
|
||||
}
|
||||
|
||||
gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Graphics Copy Engine (gr host)
|
||||
*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -43,6 +43,9 @@ void gk20a_ce2_stall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
|
||||
u32 ce2_intr = nvgpu_readl(g, ce2_intr_status_r());
|
||||
u32 clear_intr = 0U;
|
||||
|
||||
(void)inst_id;
|
||||
(void)pri_base;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x", ce2_intr);
|
||||
|
||||
/* clear blocking interrupts: they exibit broken behavior */
|
||||
@@ -64,6 +67,9 @@ u32 gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
|
||||
u32 ops = 0U;
|
||||
u32 ce2_intr = nvgpu_readl(g, ce2_intr_status_r());
|
||||
|
||||
(void)inst_id;
|
||||
(void)pri_base;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x", ce2_intr);
|
||||
|
||||
if ((ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) != 0U) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Pascal GPU series Copy Engine.
|
||||
*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -37,6 +37,8 @@ void gp10b_ce_stall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
|
||||
u32 ce_intr = nvgpu_readl(g, ce_intr_status_r(inst_id));
|
||||
u32 clear_intr = 0U;
|
||||
|
||||
(void)pri_base;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "ce isr %08x %08x", ce_intr, inst_id);
|
||||
|
||||
/* clear blocking interrupts: they exibit broken behavior */
|
||||
@@ -63,6 +65,8 @@ u32 gp10b_ce_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
|
||||
u32 nonstall_ops = 0U;
|
||||
u32 ce_intr = nvgpu_readl(g, ce_intr_status_r(inst_id));
|
||||
|
||||
(void)pri_base;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "ce nonstall isr %08x %08x",
|
||||
ce_intr, inst_id);
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GM20B Clocks
|
||||
*
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -68,6 +68,7 @@ static struct pll_parms gpc_pll_params_b1 = {
|
||||
500, /* Locking and ramping timeout */
|
||||
40, /* Lock delay in NA mode */
|
||||
5, /* IDDQ mode exit delay */
|
||||
0, /* DFS control settings */
|
||||
};
|
||||
|
||||
static struct pll_parms gpc_pll_params_c1 = {
|
||||
@@ -216,7 +217,7 @@ static void clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
|
||||
if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
|
||||
lwv = (vco_f + (nvgpu_pl_to_div(pl) / 2U))
|
||||
/ nvgpu_pl_to_div(pl);
|
||||
delta = abs(S32(lwv) -
|
||||
delta = (u32)abs(S32(lwv) -
|
||||
S32(target_clk_f));
|
||||
|
||||
if (delta < best_delta) {
|
||||
@@ -272,13 +273,13 @@ static inline u32 fuse_get_gpcpll_adc_rev(u32 val)
|
||||
static inline int fuse_get_gpcpll_adc_slope_uv(u32 val)
|
||||
{
|
||||
/* Integer part in mV * 1000 + fractional part in uV */
|
||||
return ((val >> 24) & 0x3fU) * 1000U + ((val >> 14) & 0x3ffU);
|
||||
return (int)(((val >> 24) & 0x3fU) * 1000U + ((val >> 14) & 0x3ffU));
|
||||
}
|
||||
|
||||
static inline int fuse_get_gpcpll_adc_intercept_uv(u32 val)
|
||||
{
|
||||
/* Integer part in mV * 1000 + fractional part in 100uV */
|
||||
return ((val >> 4) & 0x3ffU) * 1000U + ((val >> 0) & 0xfU) * 100U;
|
||||
return (int)(((val >> 4) & 0x3ffU) * 1000U + ((val >> 0) & 0xfU) * 100U);
|
||||
}
|
||||
|
||||
static int nvgpu_fuse_calib_gpcpll_get_adc(struct gk20a *g,
|
||||
@@ -338,10 +339,11 @@ static void clk_config_dvfs_detection(int mv, struct na_dvfs *d)
|
||||
|
||||
coeff_max = trim_sys_gpcpll_dvfs0_dfs_coeff_v(
|
||||
trim_sys_gpcpll_dvfs0_dfs_coeff_m());
|
||||
coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
|
||||
coeff = DIV_ROUND_CLOSEST(coeff, 1000);
|
||||
coeff = (u32)(DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) +
|
||||
p->coeff_offs);
|
||||
coeff = DIV_ROUND_CLOSEST(coeff, 1000U);
|
||||
coeff = min(coeff, coeff_max);
|
||||
d->dfs_coeff = coeff;
|
||||
d->dfs_coeff = (int)coeff;
|
||||
|
||||
d->dfs_ext_cal = DIV_ROUND_CLOSEST(mv * 1000 - p->uvdet_offs,
|
||||
p->uvdet_slope);
|
||||
@@ -473,14 +475,14 @@ static void clk_setup_dvfs_detection(struct gk20a *g, struct pll *gpll)
|
||||
data &= ~DFS_EXT_STROBE;
|
||||
gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
|
||||
|
||||
clk_set_dfs_ext_cal(g, d->dfs_ext_cal);
|
||||
clk_set_dfs_ext_cal(g, (u32)d->dfs_ext_cal);
|
||||
}
|
||||
|
||||
/* Enable NA/DVFS mode */
|
||||
static int clk_enbale_pll_dvfs(struct gk20a *g)
|
||||
{
|
||||
u32 data, cfg = 0;
|
||||
int delay = gpc_pll_params.iddq_exit_delay; /* iddq & calib delay */
|
||||
u32 delay = gpc_pll_params.iddq_exit_delay; /* iddq & calib delay */
|
||||
struct pll_parms *p = &gpc_pll_params;
|
||||
bool calibrated = (p->uvdet_slope != 0) && (p->uvdet_offs != 0);
|
||||
|
||||
@@ -605,6 +607,8 @@ static void clk_setup_slide(struct gk20a *g, u32 clk_u)
|
||||
default:
|
||||
nvgpu_err(g, "Unexpected reference rate %u kHz", clk_u);
|
||||
BUG();
|
||||
step_a = 0U;
|
||||
step_b = 0U;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -623,7 +627,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
|
||||
{
|
||||
u32 data, coeff;
|
||||
u32 nold, sdm_old;
|
||||
int ramp_timeout = gpc_pll_params.lock_timeout;
|
||||
int ramp_timeout = (int)gpc_pll_params.lock_timeout;
|
||||
|
||||
/* get old coefficients */
|
||||
coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
|
||||
@@ -1114,8 +1118,8 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
|
||||
* end-points.
|
||||
*/
|
||||
clk_set_dfs_coeff(g, 0);
|
||||
clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal);
|
||||
clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff);
|
||||
clk_set_dfs_ext_cal(g, (u32)gpll_new->dvfs.dfs_ext_cal);
|
||||
clk_set_dfs_coeff(g, (u32)gpll_new->dvfs.dfs_coeff);
|
||||
|
||||
gk20a_dbg_clk(g, "config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
|
||||
gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL,
|
||||
@@ -1317,6 +1321,7 @@ int gm20b_clk_is_prepared(struct clk_gk20a *clk)
|
||||
|
||||
unsigned long gm20b_recalc_rate(struct clk_gk20a *clk, unsigned long parent_rate)
|
||||
{
|
||||
(void)parent_rate;
|
||||
return rate_gpc2clk_to_gpu(clk->gpc_pll.freq);
|
||||
}
|
||||
|
||||
@@ -1326,9 +1331,11 @@ int gm20b_gpcclk_set_rate(struct clk_gk20a *clk, unsigned long rate,
|
||||
u32 old_freq;
|
||||
int ret = -ENODATA;
|
||||
|
||||
(void)parent_rate;
|
||||
|
||||
nvgpu_mutex_acquire(&clk->clk_mutex);
|
||||
old_freq = clk->gpc_pll.freq;
|
||||
ret = set_pll_target(clk->g, rate_gpu_to_gpc2clk(rate), old_freq);
|
||||
ret = set_pll_target(clk->g, (u32)rate_gpu_to_gpc2clk(rate), old_freq);
|
||||
if ((ret == 0) && clk->gpc_pll.enabled && clk->clk_hw_on) {
|
||||
ret = set_pll_freq(clk->g, true);
|
||||
}
|
||||
@@ -1345,13 +1352,15 @@ long gm20b_round_rate(struct clk_gk20a *clk, unsigned long rate,
|
||||
unsigned long maxrate;
|
||||
struct gk20a *g = clk->g;
|
||||
|
||||
(void)parent_rate;
|
||||
|
||||
maxrate = g->ops.clk.get_maxrate(g, CTRL_CLK_DOMAIN_GPCCLK);
|
||||
if (rate > maxrate) {
|
||||
rate = maxrate;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&clk->clk_mutex);
|
||||
freq = rate_gpu_to_gpc2clk(rate);
|
||||
freq = (u32)rate_gpu_to_gpc2clk(rate);
|
||||
if (freq > gpc_pll_params.max_freq) {
|
||||
freq = gpc_pll_params.max_freq;
|
||||
} else if (freq < gpc_pll_params.min_freq) {
|
||||
@@ -1364,7 +1373,7 @@ long gm20b_round_rate(struct clk_gk20a *clk, unsigned long rate,
|
||||
clk_config_pll(clk, &tmp_pll, &gpc_pll_params, &freq, true);
|
||||
nvgpu_mutex_release(&clk->clk_mutex);
|
||||
|
||||
return rate_gpc2clk_to_gpu(tmp_pll.freq);
|
||||
return (long)rate_gpc2clk_to_gpu(tmp_pll.freq);
|
||||
}
|
||||
|
||||
static int gm20b_init_clk_setup_hw(struct gk20a *g)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GM20B GPC MMU
|
||||
*
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -75,16 +75,19 @@ bool gm20b_fb_set_use_full_comp_tag_line(struct gk20a *g)
|
||||
|
||||
u64 gm20b_fb_compression_page_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return SZ_128K;
|
||||
}
|
||||
|
||||
unsigned int gm20b_fb_compressible_page_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return (unsigned int)SZ_64K;
|
||||
}
|
||||
|
||||
u64 gm20b_fb_compression_align_mask(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return SZ_64K - 1UL;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GP10B FB
|
||||
*
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -30,11 +30,13 @@
|
||||
#ifdef CONFIG_NVGPU_COMPRESSION
|
||||
u64 gp10b_fb_compression_page_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return SZ_64K;
|
||||
}
|
||||
|
||||
unsigned int gp10b_fb_compressible_page_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return (unsigned int)SZ_4K;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B FB
|
||||
*
|
||||
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -83,7 +83,7 @@ void gv11b_fb_cbc_configure(struct gk20a *g, struct nvgpu_cbc *cbc)
|
||||
|
||||
if (g->ops.cbc.fix_config != NULL) {
|
||||
compbit_base_post_divide =
|
||||
g->ops.cbc.fix_config(g, compbit_base_post_divide);
|
||||
g->ops.cbc.fix_config(g, (int)compbit_base_post_divide);
|
||||
}
|
||||
|
||||
nvgpu_writel(g, fb_mmu_cbc_base_r(),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B FB
|
||||
*
|
||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -48,6 +48,7 @@
|
||||
|
||||
static void gv11b_init_nvlink_soc_credits(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
#ifndef __NVGPU_POSIX__
|
||||
if (nvgpu_platform_is_silicon(g)) {
|
||||
nvgpu_log(g, gpu_dbg_info, "nvlink soc credits init done by bpmp");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -39,6 +39,7 @@
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/ltc.h>
|
||||
#include <nvgpu/rc.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "hal/fb/fb_mmu_fault_gv11b.h"
|
||||
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B ECC INTR
|
||||
*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -38,6 +38,7 @@ void gv11b_fb_intr_inject_hubmmu_ecc_error(struct gk20a *g,
|
||||
{
|
||||
unsigned int reg_addr = err->get_reg_addr();
|
||||
|
||||
(void)error_info;
|
||||
nvgpu_info(g, "Injecting HUBMMU fault %s", err->name);
|
||||
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
||||
}
|
||||
@@ -92,6 +93,8 @@ static struct nvgpu_hw_err_inject_info_desc hubmmu_err_desc;
|
||||
struct nvgpu_hw_err_inject_info_desc *
|
||||
gv11b_fb_intr_get_hubmmu_err_desc(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
hubmmu_err_desc.info_ptr = hubmmu_ecc_err_desc;
|
||||
hubmmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||
sizeof(hubmmu_ecc_err_desc) /
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B FB
|
||||
*
|
||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -75,6 +75,8 @@ void gv11b_fb_intr_isr(struct gk20a *g, u32 intr_unit_bitmask)
|
||||
{
|
||||
u32 niso_intr;
|
||||
|
||||
(void)intr_unit_bitmask;
|
||||
|
||||
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
|
||||
|
||||
niso_intr = nvgpu_readl(g, fb_niso_intr_r());
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -35,5 +35,6 @@
|
||||
|
||||
u32 gm20b_channel_count(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return ccsr_channel__size_1_v();
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -52,6 +52,7 @@ void gv11b_channel_unbind(struct nvgpu_channel *ch)
|
||||
|
||||
u32 gv11b_channel_count(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return ccsr_channel__size_1_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <nvgpu/engine_status.h>
|
||||
#include <nvgpu/engines.h>
|
||||
#include <nvgpu/fifo.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -32,6 +32,7 @@
|
||||
|
||||
bool gm20b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
|
||||
{
|
||||
(void)g;
|
||||
return (engine_subid == fifo_intr_mmu_fault_info_engine_subid_gpc_v());
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -26,5 +26,6 @@
|
||||
|
||||
bool gv11b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
|
||||
{
|
||||
(void)g;
|
||||
return (engine_subid == gmmu_fault_client_type_gpc_v());
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Graphics FIFO (gr host)
|
||||
*
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -54,12 +54,15 @@ bool gk20a_fifo_find_pbdma_for_runlist(struct gk20a *g,
|
||||
|
||||
u32 gk20a_fifo_get_runlist_timeslice(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return fifo_runlist_timeslice_timeout_128_f() |
|
||||
fifo_runlist_timeslice_timescale_3_f() |
|
||||
fifo_runlist_timeslice_enable_true_f();
|
||||
}
|
||||
|
||||
u32 gk20a_fifo_get_pb_timeslice(struct gk20a *g) {
|
||||
u32 gk20a_fifo_get_pb_timeslice(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return fifo_pb_timeslice_timeout_16_f() |
|
||||
fifo_pb_timeslice_timescale_0_f() |
|
||||
fifo_pb_timeslice_enable_true_f();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -273,6 +273,8 @@ void ga10b_fifo_intr_0_enable(struct gk20a *g, bool enable)
|
||||
|
||||
void ga10b_fifo_intr_1_enable(struct gk20a *g, bool enable)
|
||||
{
|
||||
(void)g;
|
||||
(void)enable;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -50,6 +50,7 @@ static u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g)
|
||||
fifo_intr_0_lb_error_pending_f() |
|
||||
fifo_intr_0_pio_error_pending_f();
|
||||
|
||||
(void)g;
|
||||
return intr_0_error_mask;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -86,6 +86,7 @@ static u32 gv11b_fifo_intr_0_en_mask(struct gk20a *g)
|
||||
{
|
||||
u32 intr_0_en_mask = fifo_intr_0_err_mask();
|
||||
|
||||
(void)g;
|
||||
intr_0_en_mask |= fifo_intr_0_pbdma_intr_pending_f() |
|
||||
fifo_intr_0_ctxsw_timeout_pending_f();
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -36,6 +36,7 @@
|
||||
#include <nvgpu/gr/fecs_trace.h>
|
||||
#include <nvgpu/channel.h>
|
||||
#include <nvgpu/tsg.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include <hal/fifo/mmu_fault_gk20a.h>
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/fifo.h>
|
||||
#include <nvgpu/engines.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include <hal/fifo/mmu_fault_gp10b.h>
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -522,6 +522,8 @@ bool ga10b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_1,
|
||||
|
||||
u32 pbdma_intr_1_current = nvgpu_readl(g, pbdma_intr_1_r(pbdma_id));
|
||||
|
||||
(void)error_notifier;
|
||||
|
||||
/* minimize race with the gpu clearing the pending interrupt */
|
||||
if ((pbdma_intr_1_current &
|
||||
pbdma_intr_1_ctxnotvalid_pending_f()) == 0U) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -79,6 +79,8 @@ bool gm20b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_1,
|
||||
u32 *error_notifier)
|
||||
{
|
||||
bool recover = true;
|
||||
|
||||
(void)error_notifier;
|
||||
/*
|
||||
* all of the interrupts in _intr_1 are "host copy engine"
|
||||
* related, which is not supported. For now just make them
|
||||
@@ -92,6 +94,7 @@ bool gm20b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_1,
|
||||
|
||||
u32 gm20b_pbdma_get_signature(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -282,6 +282,7 @@ void gm20b_pbdma_format_gpfifo_entry(struct gk20a *g,
|
||||
struct nvgpu_gpfifo_entry *gpfifo_entry,
|
||||
u64 pb_gpu_va, u32 method_size)
|
||||
{
|
||||
(void)g;
|
||||
gpfifo_entry->entry0 = u64_lo32(pb_gpu_va);
|
||||
gpfifo_entry->entry1 = u64_hi32(pb_gpu_va) |
|
||||
pbdma_gp_entry1_length_f(method_size);
|
||||
@@ -388,6 +389,7 @@ u32 gm20b_pbdma_get_fc_subdevice(void)
|
||||
|
||||
u32 gm20b_pbdma_get_fc_target(const struct nvgpu_device *dev)
|
||||
{
|
||||
(void)dev;
|
||||
return pbdma_target_engine_sw_f();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -177,6 +177,7 @@ bool gv11b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_1,
|
||||
|
||||
u32 pbdma_intr_1_current = gk20a_readl(g, pbdma_intr_1_r(pbdma_id));
|
||||
|
||||
(void)error_notifier;
|
||||
/* minimize race with the gpu clearing the pending interrupt */
|
||||
if ((pbdma_intr_1_current &
|
||||
pbdma_intr_1_ctxnotvalid_pending_f()) == 0U) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pbdma_status.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -72,6 +72,8 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
u32 delay = POLL_DELAY_MIN_US;
|
||||
int ret;
|
||||
|
||||
(void)preempt_retries_left;
|
||||
|
||||
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
|
||||
|
||||
ret = -EBUSY;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -162,6 +162,8 @@ static int gv11b_fifo_check_eng_intr_pending(struct gk20a *g, u32 id,
|
||||
bool check_preempt_retry = false;
|
||||
int ret = -EBUSY;
|
||||
|
||||
(void)g;
|
||||
|
||||
if (engine_status->ctxsw_status == NVGPU_CTX_STATUS_CTXSW_SWITCH) {
|
||||
/* Eng save hasn't started yet. Continue polling */
|
||||
if (eng_intr_pending != 0U) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -46,6 +46,8 @@ int ga10b_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
|
||||
u32 eng_bitmask = 0U;
|
||||
bool replayable = false;
|
||||
|
||||
(void)flags;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -63,6 +63,8 @@ int gk20a_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
|
||||
struct gk20a *g = ch->g;
|
||||
struct nvgpu_mem *mem = &ch->inst_block;
|
||||
|
||||
(void)flags;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -39,6 +39,8 @@ int gp10b_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
|
||||
struct gk20a *g = ch->g;
|
||||
struct nvgpu_mem *mem = &ch->inst_block;
|
||||
|
||||
(void)flags;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -40,6 +40,8 @@ int gv11b_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
|
||||
u32 data;
|
||||
bool replayable = false;
|
||||
|
||||
(void)flags;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GA10B Runlist
|
||||
*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -39,12 +39,14 @@
|
||||
|
||||
u32 ga10b_runlist_count_max(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
/* TODO Needs to be read from litter values */
|
||||
return 4U;
|
||||
}
|
||||
|
||||
u32 ga10b_runlist_length_max(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return runlist_submit_length_max_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -38,6 +38,7 @@
|
||||
|
||||
u32 gk20a_runlist_count_max(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return fifo_eng_runlist_base__size_1_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -35,6 +35,7 @@
|
||||
|
||||
u32 gk20a_runlist_length_max(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return fifo_eng_runlist_length_max_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -29,6 +29,7 @@
|
||||
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
|
||||
int gv11b_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next)
|
||||
{
|
||||
(void)preempt_next;
|
||||
/*
|
||||
* gv11b allows multiple outstanding preempts,
|
||||
* so always preempt next for best reschedule effect
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -29,5 +29,6 @@ struct gk20a;
|
||||
|
||||
u32 gv11b_runlist_count_max(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return fifo_eng_runlist_base__size_1_v();
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -40,6 +40,7 @@
|
||||
|
||||
u32 gk20a_runlist_entry_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return ram_rl_entry_size_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -34,6 +34,7 @@
|
||||
|
||||
u32 gv11b_runlist_entry_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return ram_rl_entry_size_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A USERD
|
||||
*
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -89,5 +89,6 @@ void gk20a_userd_gp_put(struct gk20a *g, struct nvgpu_channel *c)
|
||||
|
||||
u32 gk20a_userd_entry_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return BIT32(ram_userd_base_shift_v());
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -33,11 +33,13 @@
|
||||
|
||||
u64 gv11b_usermode_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return usermode_cfg0_r();
|
||||
}
|
||||
|
||||
u64 gv11b_usermode_bus_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return usermode_cfg0_r();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -36,11 +36,13 @@
|
||||
|
||||
u64 tu104_usermode_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return func_cfg0_r();
|
||||
}
|
||||
|
||||
u64 tu104_usermode_bus_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return U64(func_full_phys_offset_v() + func_cfg0_r());
|
||||
}
|
||||
|
||||
|
||||
@@ -29,5 +29,6 @@
|
||||
|
||||
u32 ga10b_func_get_full_phys_offset(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return func_full_phys_offset_v();
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GA10B FUSE
|
||||
*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -40,7 +40,7 @@
|
||||
int ga10b_fuse_read_gcplex_config_fuse(struct gk20a *g, u32 *val)
|
||||
{
|
||||
u32 reg_val = 0U;
|
||||
int fuse_val = 0;
|
||||
u32 fuse_val = 0U;
|
||||
|
||||
/*
|
||||
* SOC FUSE_GCPLEX_CONFIG_FUSE_0 bit(2) mapped to
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -40,6 +40,8 @@ int gm20b_gr_config_init_sm_id_table(struct gk20a *g,
|
||||
nvgpu_gr_config_get_sm_count_per_tpc(gr_config));
|
||||
nvgpu_gr_config_set_no_of_sm(gr_config, num_sm);
|
||||
|
||||
(void)g;
|
||||
|
||||
for (tpc = 0;
|
||||
tpc < nvgpu_gr_config_get_max_tpc_per_gpc_count(gr_config);
|
||||
tpc++) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -47,6 +47,8 @@ u32 gm20b_gr_config_get_tpc_count_in_gpc(struct gk20a *g,
|
||||
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||
u32 tmp, tmp1, tmp2;
|
||||
|
||||
(void)config;
|
||||
|
||||
tmp1 = nvgpu_safe_mult_u32(gpc_stride, gpc_index);
|
||||
tmp2 = nvgpu_safe_add_u32(gr_gpc0_fs_gpc_r(), tmp1);
|
||||
tmp = nvgpu_readl(g, tmp2);
|
||||
@@ -60,6 +62,8 @@ u32 gm20b_gr_config_get_pes_tpc_mask(struct gk20a *g,
|
||||
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||
u32 tmp, tmp1, tmp2;
|
||||
|
||||
(void)config;
|
||||
|
||||
tmp1 = nvgpu_safe_mult_u32(gpc_index, gpc_stride);
|
||||
tmp2 = nvgpu_safe_add_u32(gr_gpc0_gpm_pd_pes_tpc_id_mask_r(pes_index),
|
||||
tmp1);
|
||||
@@ -97,6 +101,8 @@ u32 gm20b_gr_config_get_zcull_count_in_gpc(struct gk20a *g,
|
||||
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||
u32 tmp, tmp1, tmp2;
|
||||
|
||||
(void)config;
|
||||
|
||||
tmp1 = nvgpu_safe_mult_u32(gpc_stride, gpc_index);
|
||||
tmp2 = nvgpu_safe_add_u32(gr_gpc0_fs_gpc_r(), tmp1);
|
||||
tmp = nvgpu_readl(g, tmp2);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -133,6 +133,8 @@ static int gr_gv100_remove_logical_tpc(struct nvgpu_gr_config *gr_config,
|
||||
int err = 0;
|
||||
u32 num_tpc_mask = gpc_tpc_mask[gpc_id];
|
||||
|
||||
(void)gr_config;
|
||||
|
||||
if ((gpc_id == disable_gpc_id) &&
|
||||
((num_tpc_mask & BIT32(disable_tpc_id)) != 0U)) {
|
||||
/* Safety check if a TPC is removed twice */
|
||||
@@ -173,6 +175,8 @@ static int gr_gr100_find_perf_reduction_rate_gpc(struct gk20a *g,
|
||||
bool is_tpc_removed_pes = false;
|
||||
u32 tpc_cnt = 0U;
|
||||
|
||||
(void)g;
|
||||
|
||||
for (gpc_id = 0;
|
||||
gpc_id < nvgpu_gr_config_get_gpc_count(gr_config);
|
||||
gpc_id++) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -32,6 +32,7 @@
|
||||
void gv11b_gr_intr_inject_fecs_ecc_error(struct gk20a *g,
|
||||
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
||||
{
|
||||
(void)error_info;
|
||||
nvgpu_info(g, "Injecting FECS fault %s", err->name);
|
||||
nvgpu_writel(g, err->get_reg_addr(), err->get_reg_val(1U));
|
||||
}
|
||||
@@ -123,6 +124,7 @@ static struct nvgpu_hw_err_inject_info_desc fecs_err_desc;
|
||||
struct nvgpu_hw_err_inject_info_desc *
|
||||
gv11b_gr_intr_get_fecs_err_desc(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
fecs_err_desc.info_ptr = fecs_ecc_err_desc;
|
||||
fecs_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||
sizeof(fecs_ecc_err_desc) /
|
||||
@@ -162,6 +164,7 @@ static struct nvgpu_hw_err_inject_info_desc gpccs_err_desc;
|
||||
struct nvgpu_hw_err_inject_info_desc *
|
||||
gv11b_gr_intr_get_gpccs_err_desc(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
gpccs_err_desc.info_ptr = gpccs_ecc_err_desc;
|
||||
gpccs_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||
sizeof(gpccs_ecc_err_desc) /
|
||||
@@ -277,6 +280,7 @@ static struct nvgpu_hw_err_inject_info_desc sm_err_desc;
|
||||
struct nvgpu_hw_err_inject_info_desc *
|
||||
gv11b_gr_intr_get_sm_err_desc(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
sm_err_desc.info_ptr = sm_ecc_err_desc;
|
||||
sm_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||
sizeof(sm_ecc_err_desc) /
|
||||
@@ -297,6 +301,7 @@ static struct nvgpu_hw_err_inject_info_desc mmu_err_desc;
|
||||
struct nvgpu_hw_err_inject_info_desc *
|
||||
gv11b_gr_intr_get_mmu_err_desc(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
mmu_err_desc.info_ptr = mmu_ecc_err_desc;
|
||||
mmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||
sizeof(mmu_ecc_err_desc) /
|
||||
@@ -317,6 +322,7 @@ static struct nvgpu_hw_err_inject_info_desc gcc_err_desc;
|
||||
struct nvgpu_hw_err_inject_info_desc *
|
||||
gv11b_gr_intr_get_gcc_err_desc(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
gcc_err_desc.info_ptr = gcc_ecc_err_desc;
|
||||
gcc_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||
sizeof(gcc_ecc_err_desc) /
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -30,6 +30,7 @@
|
||||
#include <nvgpu/gr/gr_utils.h>
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/pmu/clk/clk.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "gr_falcon_gm20b.h"
|
||||
#include "common/gr/gr_falcon_priv.h"
|
||||
@@ -325,6 +326,7 @@ static bool gm20b_gr_falcon_gr_opcode_less(u32 opc_status, bool is_fail,
|
||||
static void gm20b_gr_falcon_gr_opcode_less_equal(u32 opc_status, bool is_fail,
|
||||
u32 mailbox_status, u32 reg, enum wait_ucode_status *check)
|
||||
{
|
||||
(void)opc_status;
|
||||
if (reg <= mailbox_status) {
|
||||
if (is_fail) {
|
||||
*check = WAIT_UCODE_ERROR;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -48,20 +48,20 @@ int gm20b_fecs_trace_flush(struct gk20a *g)
|
||||
int gm20b_fecs_trace_get_read_index(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_pg_elpg_protected_call(g,
|
||||
nvgpu_readl(g, gr_fecs_mailbox1_r()));
|
||||
(int)nvgpu_readl(g, gr_fecs_mailbox1_r()));
|
||||
}
|
||||
|
||||
int gm20b_fecs_trace_get_write_index(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_pg_elpg_protected_call(g,
|
||||
nvgpu_readl(g, gr_fecs_mailbox0_r()));
|
||||
(int)nvgpu_readl(g, gr_fecs_mailbox0_r()));
|
||||
}
|
||||
|
||||
int gm20b_fecs_trace_set_read_index(struct gk20a *g, int index)
|
||||
{
|
||||
nvgpu_log(g, gpu_dbg_ctxsw, "set read=%d", index);
|
||||
return nvgpu_pg_elpg_protected_call(g,
|
||||
(nvgpu_writel(g, gr_fecs_mailbox1_r(), index), 0));
|
||||
(nvgpu_writel(g, gr_fecs_mailbox1_r(), (u32)index), 0));
|
||||
}
|
||||
|
||||
u32 gm20b_fecs_trace_get_buffer_full_mailbox_val(void)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Graphics
|
||||
*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -490,7 +490,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
|
||||
priv_registers[i],
|
||||
nvgpu_gr_obj_ctx_get_local_golden_image_ptr(
|
||||
gr->golden_image),
|
||||
nvgpu_gr_obj_ctx_get_golden_image_size(
|
||||
(u32)nvgpu_gr_obj_ctx_get_golden_image_size(
|
||||
gr->golden_image),
|
||||
&priv_offset);
|
||||
if (err != 0) {
|
||||
@@ -680,6 +680,7 @@ int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
|
||||
void gk20a_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
|
||||
u32 **ovr_perf_regs)
|
||||
{
|
||||
(void)g;
|
||||
*num_ovr_perf_regs = _num_ovr_perf_regs;
|
||||
*ovr_perf_regs = _ovr_perf_regs;
|
||||
}
|
||||
@@ -717,6 +718,8 @@ int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
|
||||
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
|
||||
u32 tpc_gpc_mask = (tpc_in_gpc_stride - 1U);
|
||||
|
||||
(void)context_buffer_size;
|
||||
|
||||
/* Only have TPC registers in extended region, so if not a TPC reg,
|
||||
then return error so caller can look elsewhere. */
|
||||
if (pri_is_gpc_addr(g, addr)) {
|
||||
@@ -921,6 +924,8 @@ gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g,
|
||||
u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE);
|
||||
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
|
||||
|
||||
(void)ppc_mask;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
|
||||
|
||||
if (!g->netlist_valid) {
|
||||
@@ -1833,6 +1838,9 @@ void gk20a_gr_resume_single_sm(struct gk20a *g,
|
||||
{
|
||||
u32 dbgr_control0;
|
||||
u32 offset;
|
||||
|
||||
(void)sm;
|
||||
|
||||
/*
|
||||
* The following requires some clarification. Despite the fact that both
|
||||
* RUN_TRIGGER and STOP_TRIGGER have the word "TRIGGER" in their
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GM20B GPC MMU
|
||||
*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -39,6 +39,7 @@
|
||||
#include <nvgpu/gr/warpstate.h>
|
||||
#include <nvgpu/engines.h>
|
||||
#include <nvgpu/engine_status.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "gr_gk20a.h"
|
||||
#include "gr_gm20b.h"
|
||||
@@ -198,6 +199,7 @@ void gr_gm20b_get_sm_dsm_perf_regs(struct gk20a *g,
|
||||
u32 **sm_dsm_perf_regs,
|
||||
u32 *perf_register_stride)
|
||||
{
|
||||
(void)g;
|
||||
*num_sm_dsm_perf_regs = _num_sm_dsm_perf_regs;
|
||||
*sm_dsm_perf_regs = _sm_dsm_perf_regs;
|
||||
*perf_register_stride = 0;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GP10B GPU GR
|
||||
*
|
||||
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -608,6 +608,8 @@ u32 gp10b_gr_get_sm_hww_warp_esr(struct gk20a *g,
|
||||
u32 hww_warp_esr = gk20a_readl(g,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
|
||||
|
||||
(void)sm;
|
||||
|
||||
if ((hww_warp_esr & gr_gpc0_tpc0_sm_hww_warp_esr_addr_valid_m()) == 0U) {
|
||||
hww_warp_esr = set_field(hww_warp_esr,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_m(),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV100 GPU GR
|
||||
*
|
||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -42,6 +42,8 @@
|
||||
#ifdef CONFIG_NVGPU_TEGRA_FUSE
|
||||
void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
|
||||
{
|
||||
(void)g;
|
||||
(void)gpc_index;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11b GPU GR
|
||||
*
|
||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -44,6 +44,7 @@
|
||||
#include <nvgpu/engine_status.h>
|
||||
#include <nvgpu/fbp.h>
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "gr_pri_gk20a.h"
|
||||
#include "gr_pri_gv11b.h"
|
||||
@@ -1439,6 +1440,7 @@ void gv11b_gr_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
|
||||
void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
|
||||
u32 **ovr_perf_regs)
|
||||
{
|
||||
(void)g;
|
||||
*num_ovr_perf_regs = _num_ovr_perf_regs;
|
||||
*ovr_perf_regs = _ovr_perf_regs;
|
||||
}
|
||||
@@ -1730,6 +1732,7 @@ void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr,
|
||||
|
||||
u32 gv11b_gr_get_egpc_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return EGPC_PRI_BASE;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -107,6 +107,7 @@ void gr_tu104_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
|
||||
u32 **sm_dsm_perf_ctrl_regs,
|
||||
u32 *ctrl_register_stride)
|
||||
{
|
||||
(void)g;
|
||||
*num_sm_dsm_perf_ctrl_regs = 0;
|
||||
*sm_dsm_perf_ctrl_regs = NULL;
|
||||
*ctrl_register_stride = 0;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -31,16 +31,19 @@
|
||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||
u32 ga100_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
|
||||
}
|
||||
|
||||
u32 ga100_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
|
||||
}
|
||||
|
||||
u32 ga100_gr_init_get_ctx_spill_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return nvgpu_safe_mult_u32(
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(),
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v());
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -35,16 +35,19 @@
|
||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||
u32 ga10b_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
|
||||
}
|
||||
|
||||
u32 ga10b_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
|
||||
}
|
||||
|
||||
u32 ga10b_gr_init_get_ctx_spill_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return nvgpu_safe_mult_u32(
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(),
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v());
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -240,16 +240,19 @@ void ga10b_gr_init_commit_global_bundle_cb(struct gk20a *g,
|
||||
|
||||
u32 ga10b_gr_init_get_min_gpm_fifo_depth(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v();
|
||||
}
|
||||
|
||||
u32 ga10b_gr_init_get_bundle_cb_token_limit(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_pd_ab_dist_cfg2_token_limit_init_v();
|
||||
}
|
||||
|
||||
u32 ga10b_gr_init_get_attrib_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -119,6 +119,7 @@ void gm20b_gr_init_get_access_map(struct gk20a *g,
|
||||
};
|
||||
size_t array_size;
|
||||
|
||||
(void)g;
|
||||
*whitelist = wl_addr_gm20b;
|
||||
array_size = ARRAY_SIZE(wl_addr_gm20b);
|
||||
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
|
||||
@@ -137,6 +138,10 @@ void gm20b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
|
||||
u32 tpc_offset = nvgpu_safe_mult_u32(tpc_in_gpc_stride, tpc);
|
||||
u32 offset_sum = nvgpu_safe_add_u32(gpc_offset, tpc_offset);
|
||||
|
||||
(void)gr_config;
|
||||
(void)gr_ctx;
|
||||
(void)patch;
|
||||
|
||||
nvgpu_writel(g,
|
||||
nvgpu_safe_add_u32(gr_gpc0_tpc0_sm_cfg_r(), offset_sum),
|
||||
gr_gpc0_tpc0_sm_cfg_sm_id_f(smid));
|
||||
@@ -165,6 +170,9 @@ int gm20b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
|
||||
u32 tpc_cnt = nvgpu_safe_sub_u32(
|
||||
nvgpu_gr_config_get_tpc_count(gr_config), 1U);
|
||||
|
||||
(void)gr_ctx;
|
||||
(void)patch;
|
||||
|
||||
/* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/
|
||||
for (i = 0U;
|
||||
i <= (tpc_cnt / 4U);
|
||||
@@ -211,6 +219,7 @@ int gm20b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
|
||||
|
||||
void gm20b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask)
|
||||
{
|
||||
(void)gpc_index;
|
||||
nvgpu_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask);
|
||||
}
|
||||
|
||||
@@ -508,31 +517,37 @@ void gm20b_gr_init_commit_global_timeslice(struct gk20a *g)
|
||||
|
||||
u32 gm20b_gr_init_get_bundle_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_scc_bundle_cb_size_div_256b__prod_v();
|
||||
}
|
||||
|
||||
u32 gm20b_gr_init_get_min_gpm_fifo_depth(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v();
|
||||
}
|
||||
|
||||
u32 gm20b_gr_init_get_bundle_cb_token_limit(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_pd_ab_dist_cfg2_token_limit_init_v();
|
||||
}
|
||||
|
||||
u32 gm20b_gr_init_get_attrib_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
|
||||
}
|
||||
|
||||
u32 gm20b_gr_init_get_alpha_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
|
||||
}
|
||||
|
||||
u32 gm20b_gr_init_get_attrib_cb_size(struct gk20a *g, u32 tpc_count)
|
||||
{
|
||||
(void)tpc_count;
|
||||
return nvgpu_safe_add_u32(
|
||||
g->ops.gr.init.get_attrib_cb_default_size(g),
|
||||
(g->ops.gr.init.get_attrib_cb_default_size(g) >> 1));
|
||||
@@ -540,6 +555,7 @@ u32 gm20b_gr_init_get_attrib_cb_size(struct gk20a *g, u32 tpc_count)
|
||||
|
||||
u32 gm20b_gr_init_get_alpha_cb_size(struct gk20a *g, u32 tpc_count)
|
||||
{
|
||||
(void)tpc_count;
|
||||
return nvgpu_safe_add_u32(
|
||||
g->ops.gr.init.get_alpha_cb_default_size(g),
|
||||
(g->ops.gr.init.get_alpha_cb_default_size(g) >> 1));
|
||||
@@ -611,6 +627,7 @@ void gm20b_gr_init_commit_global_bundle_cb(struct gk20a *g,
|
||||
|
||||
u32 gm20b_gr_init_pagepool_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_scc_pagepool_total_pages_hwmax_value_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -407,6 +407,9 @@ void gm20b_gr_init_commit_global_attrib_cb(struct gk20a *g,
|
||||
{
|
||||
u32 cb_addr;
|
||||
|
||||
(void)tpc_count;
|
||||
(void)max_tpc;
|
||||
|
||||
addr = addr >> gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v();
|
||||
|
||||
nvgpu_log_info(g, "attrib cb addr : 0x%016llx", addr);
|
||||
@@ -428,6 +431,8 @@ void gm20b_gr_init_commit_global_attrib_cb(struct gk20a *g,
|
||||
u32 gm20b_gr_init_get_patch_slots(struct gk20a *g,
|
||||
struct nvgpu_gr_config *config)
|
||||
{
|
||||
(void)g;
|
||||
(void)config;
|
||||
return PATCH_CTX_SLOTS_PER_PAGE;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -78,6 +78,7 @@ void gp10b_gr_init_get_access_map(struct gk20a *g,
|
||||
};
|
||||
size_t array_size;
|
||||
|
||||
(void)g;
|
||||
*whitelist = wl_addr_gp10b;
|
||||
array_size = ARRAY_SIZE(wl_addr_gp10b);
|
||||
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
|
||||
@@ -95,6 +96,9 @@ int gp10b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
|
||||
u32 tpc_cnt = nvgpu_safe_sub_u32(
|
||||
nvgpu_gr_config_get_tpc_count(gr_config), 1U);
|
||||
|
||||
(void)gr_ctx;
|
||||
(void)patch;
|
||||
|
||||
/* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/
|
||||
for (i = 0U; i <= (tpc_cnt / 4U); i++) {
|
||||
u32 reg = 0;
|
||||
@@ -194,11 +198,13 @@ int gp10b_gr_init_preemption_state(struct gk20a *g)
|
||||
*/
|
||||
u32 gp10b_gr_init_get_attrib_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return GP10B_CBM_BETA_CB_NO_DEEP_TILING_SIZE_DEFAULT;
|
||||
}
|
||||
|
||||
u32 gp10b_gr_init_get_alpha_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
|
||||
}
|
||||
|
||||
@@ -312,6 +318,7 @@ u32 gp10b_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g)
|
||||
|
||||
u32 gp10b_gr_init_get_ctx_spill_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return nvgpu_safe_mult_u32(
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(),
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v());
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -148,6 +148,7 @@ void gp10b_gr_init_commit_global_bundle_cb(struct gk20a *g,
|
||||
|
||||
u32 gp10b_gr_init_pagepool_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_scc_pagepool_total_pages_hwmax_value_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -92,6 +92,7 @@ void gv11b_gr_init_get_access_map(struct gk20a *g,
|
||||
};
|
||||
size_t array_size;
|
||||
|
||||
(void)g;
|
||||
*whitelist = wl_addr_gv11b;
|
||||
array_size = ARRAY_SIZE(wl_addr_gv11b);
|
||||
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -340,6 +340,8 @@ u32 gv11b_gr_init_get_nonpes_aware_tpc(struct gk20a *g, u32 gpc, u32 tpc,
|
||||
u32 temp;
|
||||
u32 pes;
|
||||
|
||||
(void)g;
|
||||
|
||||
for (pes = 0U;
|
||||
pes < nvgpu_gr_config_get_gpc_ppc_count(gr_config, gpc);
|
||||
pes++) {
|
||||
@@ -642,26 +644,31 @@ void gv11b_gr_init_commit_global_timeslice(struct gk20a *g)
|
||||
|
||||
u32 gv11b_gr_init_get_bundle_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_scc_bundle_cb_size_div_256b__prod_v();
|
||||
}
|
||||
|
||||
u32 gv11b_gr_init_get_min_gpm_fifo_depth(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v();
|
||||
}
|
||||
|
||||
u32 gv11b_gr_init_get_bundle_cb_token_limit(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_pd_ab_dist_cfg2_token_limit_init_v();
|
||||
}
|
||||
|
||||
u32 gv11b_gr_init_get_attrib_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
|
||||
}
|
||||
|
||||
u32 gv11b_gr_init_get_alpha_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
|
||||
}
|
||||
|
||||
@@ -1165,16 +1172,19 @@ void gv11b_gr_init_commit_cbes_reserve(struct gk20a *g,
|
||||
|
||||
u32 gv11b_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
|
||||
}
|
||||
|
||||
u32 gv11b_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
|
||||
}
|
||||
|
||||
u32 gv11b_gr_init_get_ctx_spill_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return nvgpu_safe_mult_u32(
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(),
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v());
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -34,26 +34,31 @@
|
||||
|
||||
u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_scc_bundle_cb_size_div_256b__prod_v();
|
||||
}
|
||||
|
||||
u32 tu104_gr_init_get_min_gpm_fifo_depth(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v();
|
||||
}
|
||||
|
||||
u32 tu104_gr_init_get_bundle_cb_token_limit(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_pd_ab_dist_cfg2_token_limit_init_v();
|
||||
}
|
||||
|
||||
u32 tu104_gr_init_get_attrib_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
|
||||
}
|
||||
|
||||
u32 tu104_gr_init_get_alpha_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
|
||||
}
|
||||
|
||||
@@ -150,6 +155,7 @@ int tu104_gr_init_load_sw_bundle64(struct gk20a *g,
|
||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||
u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return nvgpu_safe_mult_u32(
|
||||
nvgpu_safe_add_u32(
|
||||
gr_scc_rm_rtv_cb_size_div_256b_default_f(),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -91,6 +91,7 @@ static u32 get_sm_hww_global_esr_report_mask(void)
|
||||
|
||||
u32 ga10b_gr_intr_enable_mask(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_intr_en_mask();
|
||||
}
|
||||
|
||||
@@ -105,6 +106,11 @@ int ga10b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
||||
u32 left_shift_by_2 = 2U;
|
||||
#endif
|
||||
|
||||
(void)addr;
|
||||
(void)class_num;
|
||||
(void)offset;
|
||||
(void)data;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||
@@ -362,6 +368,8 @@ void ga10b_gr_intr_enable_exceptions(struct gk20a *g,
|
||||
static void ga10b_gr_intr_report_gpcmmu_ecc_err(struct gk20a *g,
|
||||
u32 ecc_status, u32 gpc, u32 correct_err, u32 uncorrect_err)
|
||||
{
|
||||
(void)correct_err;
|
||||
|
||||
if ((ecc_status &
|
||||
gr_gpc0_mmu0_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m()) != 0U) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
|
||||
@@ -473,6 +481,7 @@ void ga10b_gr_intr_handle_gpc_gpcmmu_exception(struct gk20a *g, u32 gpc,
|
||||
static void ga10b_gr_intr_set_l1_tag_uncorrected_err(struct gk20a *g,
|
||||
u32 l1_tag_ecc_status, struct nvgpu_gr_sm_ecc_status *ecc_status)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
if ((l1_tag_ecc_status &
|
||||
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_0_m()) != 0U) {
|
||||
@@ -502,6 +511,7 @@ static void ga10b_gr_intr_set_l1_tag_uncorrected_err(struct gk20a *g,
|
||||
static void ga10b_gr_intr_set_l1_tag_corrected_err(struct gk20a *g,
|
||||
u32 l1_tag_ecc_status, struct nvgpu_gr_sm_ecc_status *ecc_status)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
if ((l1_tag_ecc_status &
|
||||
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_0_m()) != 0U) {
|
||||
@@ -548,6 +558,8 @@ static bool ga10b_gr_intr_sm_lrf_ecc_status_errors(struct gk20a *g,
|
||||
u32 corr_err, uncorr_err;
|
||||
bool err_status = true;
|
||||
|
||||
(void)g;
|
||||
|
||||
corr_err = lrf_ecc_status &
|
||||
(gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m() |
|
||||
gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m() |
|
||||
@@ -585,6 +597,8 @@ static bool ga10b_gr_intr_sm_cbu_ecc_status_errors(struct gk20a *g,
|
||||
u32 corr_err, uncorr_err;
|
||||
bool err_status = true;
|
||||
|
||||
(void)g;
|
||||
|
||||
corr_err = cbu_ecc_status &
|
||||
(gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m() |
|
||||
gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m());
|
||||
@@ -618,6 +632,8 @@ static bool ga10b_gr_intr_sm_l1_data_ecc_status_errors(struct gk20a *g,
|
||||
u32 corr_err, uncorr_err;
|
||||
bool err_status = true;
|
||||
|
||||
(void)g;
|
||||
|
||||
corr_err = l1_data_ecc_status &
|
||||
gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m();
|
||||
uncorr_err = l1_data_ecc_status &
|
||||
@@ -648,6 +664,8 @@ static bool ga10b_gr_intr_sm_rams_ecc_status_errors(struct gk20a *g,
|
||||
u32 corr_err, uncorr_err;
|
||||
bool err_status = true;
|
||||
|
||||
(void)g;
|
||||
|
||||
corr_err = rams_ecc_status &\
|
||||
(gr_pri_gpc0_tpc0_sm_rams_ecc_status_corrected_err_l0ic_data_m() |\
|
||||
gr_pri_gpc0_tpc0_sm_rams_ecc_status_corrected_err_l0ic_predecode_m() |\
|
||||
@@ -689,6 +707,8 @@ static bool ga10b_gr_intr_sm_icache_ecc_status_errors(struct gk20a *g,
|
||||
u32 corr_err, uncorr_err;
|
||||
bool err_status = true;
|
||||
|
||||
(void)g;
|
||||
|
||||
corr_err = icache_ecc_status &
|
||||
gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_data_m();
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -111,6 +111,8 @@ void gm20b_gr_intr_handle_class_error(struct gk20a *g, u32 chid,
|
||||
int gm20b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
||||
u32 class_num, u32 offset, u32 data)
|
||||
{
|
||||
(void)addr;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
@@ -230,6 +232,7 @@ void gm20b_gr_intr_enable_exceptions(struct gk20a *g,
|
||||
{
|
||||
u32 reg_value = (enable) ? 0xFFFFFFFFU : 0U;
|
||||
|
||||
(void)gr_config;
|
||||
nvgpu_writel(g, gr_exception_en_r(), reg_value);
|
||||
nvgpu_writel(g, gr_exception1_en_r(), reg_value);
|
||||
nvgpu_writel(g, gr_exception2_en_r(), reg_value);
|
||||
@@ -288,6 +291,9 @@ void gm20b_gr_intr_set_hww_esr_report_mask(struct gk20a *g)
|
||||
void gm20b_gr_intr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
|
||||
u32 *esr_sm_sel)
|
||||
{
|
||||
(void)g;
|
||||
(void)gpc;
|
||||
(void)tpc;
|
||||
*esr_sm_sel = 1;
|
||||
}
|
||||
|
||||
@@ -297,6 +303,8 @@ void gm20b_gr_intr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
|
||||
u32 offset = nvgpu_safe_add_u32(nvgpu_gr_gpc_offset(g, gpc),
|
||||
nvgpu_gr_tpc_offset(g, tpc));
|
||||
|
||||
(void)sm;
|
||||
|
||||
gk20a_writel(g, nvgpu_safe_add_u32(
|
||||
gr_gpc0_tpc0_sm_hww_global_esr_r(), offset),
|
||||
global_esr);
|
||||
@@ -338,6 +346,8 @@ u32 gm20b_gr_intr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 s
|
||||
struct nvgpu_tsg *tsg = NULL;
|
||||
int err = 0;
|
||||
|
||||
(void)sm;
|
||||
|
||||
offset = nvgpu_safe_add_u32(
|
||||
nvgpu_safe_mult_u32(gpc_stride, gpc),
|
||||
nvgpu_safe_mult_u32(tpc_in_gpc_stride, tpc));
|
||||
@@ -381,6 +391,7 @@ u32 gm20b_gr_intr_get_sm_hww_global_esr(struct gk20a *g, u32 gpc, u32 tpc,
|
||||
u32 hww_global_esr = gk20a_readl(g, nvgpu_safe_add_u32(
|
||||
gr_gpc0_tpc0_sm_hww_global_esr_r(), offset));
|
||||
|
||||
(void)sm;
|
||||
return hww_global_esr;
|
||||
}
|
||||
|
||||
@@ -390,6 +401,7 @@ u32 gm20b_gr_intr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm)
|
||||
nvgpu_gr_tpc_offset(g, tpc));
|
||||
u32 hww_warp_esr = gk20a_readl(g, nvgpu_safe_add_u32(
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_r(), offset));
|
||||
(void)sm;
|
||||
return hww_warp_esr;
|
||||
}
|
||||
|
||||
@@ -407,6 +419,7 @@ u32 gm20b_gr_intr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g)
|
||||
gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f() |
|
||||
gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f();
|
||||
|
||||
(void)g;
|
||||
return global_esr_mask;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <nvgpu/class.h>
|
||||
#include <nvgpu/static_analysis.h>
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/gr/gr.h>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -41,6 +41,8 @@
|
||||
int gp10b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
||||
u32 class_num, u32 offset, u32 data)
|
||||
{
|
||||
(void)addr;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <nvgpu/class.h>
|
||||
#include <nvgpu/static_analysis.h>
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/gr/gr.h>
|
||||
@@ -184,6 +185,11 @@ int gv11b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
||||
{
|
||||
int err = -EFAULT;
|
||||
|
||||
(void)addr;
|
||||
(void)class_num;
|
||||
(void)offset;
|
||||
(void)data;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||
@@ -273,6 +279,8 @@ void gv11b_gr_intr_handle_gcc_exception(struct gk20a *g, u32 gpc,
|
||||
bool is_gcc_l15_ecc_corrected_total_err_overflow = false;
|
||||
bool is_gcc_l15_ecc_uncorrected_total_err_overflow = false;
|
||||
|
||||
(void)corrected_err;
|
||||
|
||||
if (gr_gpc0_gpccs_gpc_exception_gcc_v(gpc_exception) == 0U) {
|
||||
return;
|
||||
}
|
||||
@@ -359,6 +367,8 @@ static void gv11b_gr_intr_report_gpcmmu_ecc_err(struct gk20a *g,
|
||||
u32 ecc_status, u32 gpc,
|
||||
u32 correct_err, u32 uncorrect_err)
|
||||
{
|
||||
(void)correct_err;
|
||||
|
||||
if ((ecc_status &
|
||||
gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m()) !=
|
||||
0U) {
|
||||
@@ -974,6 +984,7 @@ static void gv11b_gr_intr_report_l1_tag_corrected_err(struct gk20a *g,
|
||||
static void gv11b_gr_intr_set_l1_tag_uncorrected_err(struct gk20a *g,
|
||||
u32 l1_tag_ecc_status, struct nvgpu_gr_sm_ecc_status *ecc_status)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
if ((l1_tag_ecc_status &
|
||||
(gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_0_m() |
|
||||
@@ -1004,6 +1015,7 @@ static void gv11b_gr_intr_set_l1_tag_uncorrected_err(struct gk20a *g,
|
||||
static void gv11b_gr_intr_set_l1_tag_corrected_err(struct gk20a *g,
|
||||
u32 l1_tag_ecc_status, struct nvgpu_gr_sm_ecc_status *ecc_status)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
if ((l1_tag_ecc_status &
|
||||
(gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_0_m() |
|
||||
@@ -1159,6 +1171,8 @@ static bool gv11b_gr_intr_sm_lrf_ecc_status_errors(struct gk20a *g,
|
||||
u32 corr_err, uncorr_err;
|
||||
bool err_status = true;
|
||||
|
||||
(void)g;
|
||||
|
||||
corr_err = lrf_ecc_status &
|
||||
(gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m() |
|
||||
gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m() |
|
||||
@@ -1302,6 +1316,8 @@ static bool gv11b_gr_intr_sm_cbu_ecc_status_errors(struct gk20a *g,
|
||||
u32 corr_err, uncorr_err;
|
||||
bool err_status = true;
|
||||
|
||||
(void)g;
|
||||
|
||||
corr_err = cbu_ecc_status &
|
||||
(gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m() |
|
||||
gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m() |
|
||||
@@ -1435,6 +1451,8 @@ static bool gv11b_gr_intr_sm_l1_data_ecc_status_errors(struct gk20a *g,
|
||||
u32 corr_err, uncorr_err;
|
||||
bool err_status = true;
|
||||
|
||||
(void)g;
|
||||
|
||||
corr_err = l1_data_ecc_status &
|
||||
(gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m() |
|
||||
gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m());
|
||||
@@ -1603,6 +1621,8 @@ static void gv11b_set_icache_ecc_status_uncorrected_errors(struct gk20a *g,
|
||||
u32 icache_ecc_status,
|
||||
struct nvgpu_gr_sm_ecc_status *ecc_status)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
if ((icache_ecc_status &
|
||||
gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_data_m()) != 0U) {
|
||||
ecc_status->err_id[ecc_status->err_count] =
|
||||
@@ -2001,6 +2021,8 @@ u32 gv11b_gr_intr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g)
|
||||
gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f() |
|
||||
gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f();
|
||||
|
||||
(void)g;
|
||||
|
||||
return global_esr_mask;
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,8 @@
|
||||
void ga10b_gr_zbc_init_table_indices(struct gk20a *g,
|
||||
struct nvgpu_gr_zbc_table_indices *zbc_indices)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
/* Color indices */
|
||||
zbc_indices->min_color_index =
|
||||
gr_pri_gpcs_rops_crop_zbc_index_address_min_v();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -31,6 +31,8 @@
|
||||
void gm20b_gr_zbc_init_table_indices(struct gk20a *g,
|
||||
struct nvgpu_gr_zbc_table_indices *zbc_indices)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
/* Color indices */
|
||||
zbc_indices->min_color_index = NVGPU_GR_ZBC_STARTOF_TABLE;
|
||||
zbc_indices->max_color_index = 15U;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -31,6 +31,8 @@
|
||||
void gp10b_gr_zbc_init_table_indices(struct gk20a *g,
|
||||
struct nvgpu_gr_zbc_table_indices *zbc_indices)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
/* Color indices */
|
||||
zbc_indices->min_color_index = NVGPU_GR_ZBC_STARTOF_TABLE;
|
||||
zbc_indices->max_color_index = gr_gpcs_swdx_dss_zbc_color_r__size_1_v();
|
||||
@@ -46,11 +48,13 @@ void gp10b_gr_zbc_init_table_indices(struct gk20a *g,
|
||||
|
||||
u32 gp10b_gr_zbc_get_gpcs_swdx_dss_zbc_c_format_reg(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r();
|
||||
}
|
||||
|
||||
u32 gp10b_gr_zbc_get_gpcs_swdx_dss_zbc_z_format_reg(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -31,6 +31,8 @@
|
||||
void gv11b_gr_zbc_init_table_indices(struct gk20a *g,
|
||||
struct nvgpu_gr_zbc_table_indices *zbc_indices)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
/* Color indices */
|
||||
zbc_indices->min_color_index = NVGPU_GR_ZBC_STARTOF_TABLE;
|
||||
zbc_indices->max_color_index = gr_gpcs_swdx_dss_zbc_color_r__size_1_v();
|
||||
@@ -46,11 +48,13 @@ void gv11b_gr_zbc_init_table_indices(struct gk20a *g,
|
||||
|
||||
u32 gv11b_gr_zbc_get_gpcs_swdx_dss_zbc_c_format_reg(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r();
|
||||
}
|
||||
|
||||
u32 gv11b_gr_zbc_get_gpcs_swdx_dss_zbc_z_format_reg(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GA10B LTC INTR
|
||||
*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -249,6 +249,8 @@ static void ga10b_ltc_intr2_configure(struct gk20a *g)
|
||||
|
||||
void ga10b_ltc_intr3_configure_extra(struct gk20a *g, u32 *reg)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
/*
|
||||
* DTM_KIND_INVALID - If the kind of a comp stat req read or packed
|
||||
* read is invalid or pitch, the inter3_dtm_kind_invalid interrupt will
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B LTC INTR
|
||||
*
|
||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -98,6 +98,11 @@ void gv11b_ltc_intr_handle_rstg_ecc_interrupts(struct gk20a *g,
|
||||
u32 ltc, u32 slice, u32 ecc_status, u32 ecc_addr,
|
||||
u32 uncorrected_delta)
|
||||
{
|
||||
(void)ltc;
|
||||
(void)slice;
|
||||
(void)ecc_addr;
|
||||
(void)uncorrected_delta;
|
||||
|
||||
if ((ecc_status &
|
||||
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m())
|
||||
!= 0U) {
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
*/
|
||||
bool gm20b_ltc_pri_is_ltc_addr(struct gk20a *g, u32 addr)
|
||||
{
|
||||
(void)g;
|
||||
return ((addr >= ltc_pltcg_base_v()) && (addr < ltc_pltcg_extent_v()));
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B master
|
||||
*
|
||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -35,6 +35,7 @@
|
||||
|
||||
bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
|
||||
{
|
||||
(void)g;
|
||||
return ((mc_intr_0 & mc_intr_hub_pending_f()) != 0U);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -194,8 +194,8 @@ static void ga10b_intr_subtree_clear_unhandled(struct gk20a *g,
|
||||
"intr_leaf0 0x%08x intr_leaf1 0x%08x",
|
||||
subtree, handled_subtree_mask, intr_leaf0, intr_leaf1);
|
||||
ga10b_intr_subtree_clear(g, subtree,
|
||||
hi32_lo32_to_u64(unhandled_intr_leaf1,
|
||||
unhandled_intr_leaf0));
|
||||
hi32_lo32_to_u64((u32)unhandled_intr_leaf1,
|
||||
(u32)unhandled_intr_leaf0));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -490,6 +490,8 @@ static u32 ga10b_intr_map_mc_stall_unit_to_intr_unit(struct gk20a *g,
|
||||
{
|
||||
u32 intr_unit = mc_intr_unit;
|
||||
|
||||
(void)g;
|
||||
|
||||
/**
|
||||
* Different indices are used to store unit info for
|
||||
* gr/ce stall/nostall intr.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -243,6 +243,8 @@ void intr_tu104_stall_unit_config(struct gk20a *g, u32 unit, bool enable)
|
||||
|
||||
void intr_tu104_nonstall_unit_config(struct gk20a *g, u32 unit, bool enable)
|
||||
{
|
||||
(void)unit;
|
||||
(void)enable;
|
||||
intr_tu104_nonstall_enable(g);
|
||||
}
|
||||
|
||||
@@ -386,6 +388,7 @@ u32 intr_tu104_stall(struct gk20a *g)
|
||||
/* Return true if HUB interrupt is pending */
|
||||
bool intr_tu104_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
|
||||
{
|
||||
(void)mc_intr_0;
|
||||
return g->ops.mc.is_mmu_fault_pending(g);
|
||||
}
|
||||
|
||||
|
||||
@@ -74,6 +74,7 @@
|
||||
|
||||
u32 ga10b_mm_get_iommu_bit(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return GA10B_MM_IOMMU_BIT;
|
||||
}
|
||||
|
||||
@@ -382,10 +383,13 @@ static const struct gk20a_mmu_level ga10b_mm_levels[] = {
|
||||
const struct gk20a_mmu_level *ga10b_mm_get_mmu_levels(struct gk20a *g,
|
||||
u64 big_page_size)
|
||||
{
|
||||
(void)g;
|
||||
(void)big_page_size;
|
||||
return ga10b_mm_levels;
|
||||
}
|
||||
|
||||
u32 ga10b_get_max_page_table_levels(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return GA10B_MAX_PAGE_TABLE_LEVELS;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -266,16 +266,19 @@ const struct gk20a_mmu_level gk20a_mm_levels_128k[] = {
|
||||
const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
|
||||
u64 big_page_size)
|
||||
{
|
||||
(void)g;
|
||||
return (big_page_size == SZ_64K) ?
|
||||
gk20a_mm_levels_64k : gk20a_mm_levels_128k;
|
||||
}
|
||||
|
||||
u32 gk20a_get_max_page_table_levels(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return 2U;
|
||||
}
|
||||
|
||||
u32 gk20a_mm_get_iommu_bit(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return 34;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -28,6 +28,10 @@
|
||||
u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
|
||||
struct nvgpu_gmmu_pd *pd, u32 pd_idx)
|
||||
{
|
||||
(void)g;
|
||||
(void)l;
|
||||
(void)pd;
|
||||
(void)pd_idx;
|
||||
/*
|
||||
* big and small page sizes are the same
|
||||
*/
|
||||
@@ -37,6 +41,10 @@ u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
|
||||
u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
|
||||
struct nvgpu_gmmu_pd *pd, u32 pd_idx)
|
||||
{
|
||||
(void)g;
|
||||
(void)l;
|
||||
(void)pd;
|
||||
(void)pd_idx;
|
||||
/*
|
||||
* return invalid
|
||||
*/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -30,5 +30,7 @@
|
||||
u64 gm20b_gpu_phys_addr(struct gk20a *g,
|
||||
struct nvgpu_gmmu_attrs *attrs, u64 phys)
|
||||
{
|
||||
(void)g;
|
||||
(void)attrs;
|
||||
return phys;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -39,6 +39,7 @@
|
||||
|
||||
u32 gp10b_mm_get_iommu_bit(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return GP10B_MM_IOMMU_BIT;
|
||||
}
|
||||
|
||||
@@ -346,10 +347,13 @@ static const struct gk20a_mmu_level gp10b_mm_levels[] = {
|
||||
const struct gk20a_mmu_level *gp10b_mm_get_mmu_levels(struct gk20a *g,
|
||||
u64 big_page_size)
|
||||
{
|
||||
(void)g;
|
||||
(void)big_page_size;
|
||||
return gp10b_mm_levels;
|
||||
}
|
||||
|
||||
u32 gp10b_get_max_page_table_levels(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return 5U;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -26,6 +26,7 @@
|
||||
|
||||
bool gm20b_mm_is_bar1_supported(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -53,4 +54,4 @@ void gm20b_mm_get_default_va_sizes(u64 *aperture_size,
|
||||
if (kernel_size != NULL) {
|
||||
*kernel_size = BIT64(32);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -71,5 +71,6 @@ void gv11b_mm_init_inst_block_for_subctxs(struct nvgpu_mem *inst_block,
|
||||
|
||||
bool gv11b_mm_is_bar1_supported(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -42,6 +42,7 @@
|
||||
#include <nvgpu/mmu_fault.h>
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
#include <nvgpu/power_features/pg.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
|
||||
|
||||
@@ -331,6 +332,8 @@ static bool gv11b_mm_mmu_fault_handle_mmu_fault_ce(struct gk20a *g,
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
(void)invalidate_replay_val;
|
||||
#endif
|
||||
/* Do recovery */
|
||||
nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Not Fixed");
|
||||
@@ -711,6 +714,7 @@ void gv11b_mm_mmu_fault_info_mem_destroy(struct gk20a *g)
|
||||
|
||||
static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,8 @@ int ga10b_netlist_get_name(struct gk20a *g, int index, char *name)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
(void)g;
|
||||
|
||||
switch (index) {
|
||||
#ifdef GA10B_NETLIST_IMAGE_FW_NAME
|
||||
case NETLIST_FINAL:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -21,6 +21,7 @@
|
||||
*/
|
||||
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "netlist_gm20b.h"
|
||||
|
||||
@@ -28,6 +29,8 @@ int gm20b_netlist_get_name(struct gk20a *g, int index, char *name)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
(void)g;
|
||||
|
||||
switch (index) {
|
||||
#ifdef GM20B_NETLIST_IMAGE_FW_NAME
|
||||
case NETLIST_FINAL:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -21,6 +21,7 @@
|
||||
*/
|
||||
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "netlist_gp10b.h"
|
||||
|
||||
@@ -28,6 +29,8 @@ int gp10b_netlist_get_name(struct gk20a *g, int index, char *name)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
(void)g;
|
||||
|
||||
switch (index) {
|
||||
#ifdef GP10B_NETLIST_IMAGE_FW_NAME
|
||||
case NETLIST_FINAL:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -21,6 +21,7 @@
|
||||
*/
|
||||
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "netlist_gv11b.h"
|
||||
|
||||
@@ -28,6 +29,8 @@ int gv11b_netlist_get_name(struct gk20a *g, int index, char *name)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
(void)g;
|
||||
|
||||
switch (index) {
|
||||
#ifdef GV11B_NETLIST_IMAGE_FW_NAME
|
||||
case NETLIST_FINAL:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -111,7 +111,7 @@ int gv11b_perf_update_get_put(struct gk20a *g, u64 bytes_consumed,
|
||||
u32 val;
|
||||
|
||||
if (bytes_consumed != 0U) {
|
||||
nvgpu_writel(g, perf_pmasys_mem_bump_r(), bytes_consumed);
|
||||
nvgpu_writel(g, perf_pmasys_mem_bump_r(), (u32)bytes_consumed);
|
||||
}
|
||||
|
||||
if (update_available_bytes) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -36,6 +36,7 @@
|
||||
|
||||
bool ga10b_is_pmu_supported(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
return nvgpu_platform_is_simulation(g) ? false : true;
|
||||
#else
|
||||
@@ -367,6 +368,8 @@ void ga10b_pmu_handle_swgen1_irq(struct gk20a *g, u32 intr)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
(void)g;
|
||||
(void)intr;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -418,6 +418,8 @@ u32 gk20a_pmu_get_irqdest(struct gk20a *g)
|
||||
{
|
||||
u32 intr_dest;
|
||||
|
||||
(void)g;
|
||||
|
||||
/* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
|
||||
intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) |
|
||||
pwr_falcon_irqdest_host_wdtmr_f(1) |
|
||||
@@ -724,5 +726,6 @@ u32 gk20a_pmu_falcon_base_addr(void)
|
||||
|
||||
bool gk20a_is_pmu_supported(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -223,6 +223,7 @@ void gm20b_secured_pmu_start(struct gk20a *g)
|
||||
|
||||
bool gm20b_is_pmu_supported(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -161,6 +161,7 @@ void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr)
|
||||
|
||||
bool gp10b_is_pmu_supported(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user