gpu: nvgpu: Implement clk_good and pll_lock check

Add clk_good and pll_lock check as a part of fmon polling.
This will poll for any clock related faults at FTTI interval.
Add new function to poll for vbios init completion.

NVGPU-4967
Bug 2849506
Bug 200564937

Change-Id: I5bc885329981e07376824e148edabe9be4120e1c
Signed-off-by: Abdul Salam <absalam@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2305782
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Abdul Salam
2020-03-24 22:19:53 +05:30
committed by Alex Waterman
parent b8a3e54dda
commit 4f5bd9e633
9 changed files with 116 additions and 10 deletions

View File

@@ -26,6 +26,7 @@
#include <nvgpu/string.h>
#include <nvgpu/soc.h>
#include <nvgpu/static_analysis.h>
#include <nvgpu/timers.h>
#include "bios_sw_gv100.h"
#include "bios_sw_tu104.h"
@@ -833,3 +834,31 @@ u32 nvgpu_bios_read_u32(struct gk20a *g, u32 offset)
return val;
}
#ifdef CONFIG_NVGPU_DGPU
bool nvgpu_bios_wait_for_init_done(struct gk20a *g)
{
struct nvgpu_timeout timeout;
int err;
err = nvgpu_timeout_init(g, &timeout,
NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return false;
}
/* Wait till vbios is completed */
do {
if (g->bios_is_init == true) {
return true;
}
nvgpu_msleep(NVGPU_BIOS_DEVINIT_VERIFY_COMPLETION_MS);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (g->bios_is_init == true) {
return true;
} else {
return false;
}
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -28,9 +28,6 @@
#include "bios_sw_gv100.h"
#include "bios_sw_tu104.h"
#define NV_DEVINIT_VERIFY_TIMEOUT_MS 1000U
#define NV_DEVINIT_VERIFY_TIMEOUT_DELAY_US 10U
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_MASK \
0xFFU
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED \
@@ -123,8 +120,8 @@ int tu104_bios_verify_devinit(struct gk20a *g)
u32 aon_secure_scratch_reg;
int err;
err = nvgpu_timeout_init(g, &timeout, NV_DEVINIT_VERIFY_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
err = nvgpu_timeout_init(g, &timeout,
NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
@@ -139,7 +136,7 @@ int tu104_bios_verify_devinit(struct gk20a *g)
return 0;
}
nvgpu_udelay(NV_DEVINIT_VERIFY_TIMEOUT_DELAY_US);
nvgpu_udelay(NVGPU_BIOS_DEVINIT_VERIFY_DELAY_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,10 @@
#ifndef NVGPU_BIOS_SW_TU104_H
#define NVGPU_BIOS_SW_TU104_H
#define NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS 1000U
#define NVGPU_BIOS_DEVINIT_VERIFY_DELAY_US 10U
#define NVGPU_BIOS_DEVINIT_VERIFY_COMPLETION_MS 1U
struct gk20a;
int tu104_bios_verify_devinit(struct gk20a *g);

View File

@@ -228,3 +228,52 @@ int nvgpu_clk_mon_check_status(struct gk20a *g,
}
return 0;
}
bool nvgpu_clk_mon_check_clk_good(struct gk20a *g)
{
u32 clk_status = nvgpu_readl(g, trim_xtal4x_cfg5_r());
if (trim_xtal4x_cfg5_curr_state_v(clk_status) !=
trim_xtal4x_cfg5_curr_state_good_v()) {
return true;
}
return false;
}
bool nvgpu_clk_mon_check_pll_lock(struct gk20a *g)
{
u32 clk_status = nvgpu_readl(g, trim_xtal4x_cfg_r());
/* check xtal4 */
if (trim_xtal4x_cfg_pll_lock_v(clk_status) !=
trim_xtal4x_cfg_pll_lock_true_v()) {
return true;
}
/* check mem pll */
clk_status = nvgpu_readl(g, trim_mem_pll_status_r());
if (trim_mem_pll_status_dram_curr_state_v(clk_status) !=
trim_mem_pll_status_dram_curr_state_good_v()) {
return true;
}
if (trim_mem_pll_status_refm_curr_state_v(clk_status) !=
trim_mem_pll_status_refm_curr_state_good_v()) {
return true;
}
/* check sppll0,1 */
clk_status = nvgpu_readl(g, trim_sppll0_cfg_r());
if (trim_sppll0_cfg_curr_state_v(clk_status) !=
trim_sppll0_cfg_curr_state_good_v()) {
return true;
}
clk_status = nvgpu_readl(g, trim_sppll1_cfg_r());
if (trim_sppll1_cfg_curr_state_v(clk_status) !=
trim_sppll1_cfg_curr_state_good_v()) {
return true;
}
return false;
}

View File

@@ -36,5 +36,6 @@ bool nvgpu_clk_mon_check_master_fault_status(struct gk20a *g);
int nvgpu_clk_mon_check_status(struct gk20a *g, struct
clk_domains_mon_status_params *clk_mon_status,
u32 domain_mask);
bool nvgpu_clk_mon_check_clk_good(struct gk20a *g);
bool nvgpu_clk_mon_check_pll_lock(struct gk20a *g);
#endif /* CLK_MON_TU104_H */

View File

@@ -273,6 +273,7 @@ static const struct gpu_ops tu104_ops = {
#ifdef CONFIG_NVGPU_DGPU
.bios_sw_init = nvgpu_bios_sw_init,
.bios_sw_deinit = nvgpu_bios_sw_deinit,
.wait_for_bios_init_done = nvgpu_bios_wait_for_init_done,
#endif /* CONFIG_NVGPU_DGPU */
.get_aon_secure_scratch_reg = tu104_get_aon_secure_scratch_reg,
},
@@ -1294,6 +1295,8 @@ static const struct gpu_ops tu104_ops = {
nvgpu_clk_mon_check_master_fault_status,
.clk_mon_check_status = nvgpu_clk_mon_check_status,
.clk_mon_init_domains = nvgpu_pmu_clk_mon_init_domains,
.clk_mon_check_clk_good = nvgpu_clk_mon_check_clk_good,
.clk_mon_check_pll_lock = nvgpu_clk_mon_check_pll_lock,
.perf_pmu_vfe_load = nvgpu_pmu_perf_load,
},
#ifdef CONFIG_NVGPU_CLK_ARB

View File

@@ -1505,4 +1505,5 @@ u32 nvgpu_bios_get_vbios_version(struct gk20a *g);
u8 nvgpu_bios_get_vbios_oem_version(struct gk20a *g);
struct bit_token *nvgpu_bios_get_bit_token(struct gk20a *g,
u8 token_id);
bool nvgpu_bios_wait_for_init_done(struct gk20a *g);
#endif

View File

@@ -373,6 +373,10 @@ struct gpu_ops {
struct clk_domains_mon_status_params *clk_mon_status,
u32 domain_mask);
u32 (*clk_mon_init_domains)(struct gk20a *g);
#ifdef CONFIG_NVGPU_DGPU
bool (*clk_mon_check_clk_good)(struct gk20a *g);
bool (*clk_mon_check_pll_lock)(struct gk20a *g);
#endif
} clk;
#ifdef CONFIG_NVGPU_CLK_ARB
struct {
@@ -468,6 +472,7 @@ struct gpu_ops {
void (*bios_sw_deinit)(struct gk20a *g,
struct nvgpu_bios *bios);
u32 (*get_aon_secure_scratch_reg)(struct gk20a *g, u32 i);
bool (*wait_for_bios_init_done)(struct gk20a *g);
} bios;
#if defined(CONFIG_NVGPU_CYCLESTATS)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -210,4 +210,21 @@
#define trim_fmon_master_status_r() (0x00137a00U)
#define trim_fmon_master_status_fault_out_v(r) (((r) >> 0U) & 0x1U)
#define trim_fmon_master_status_fault_out_true_v() (0x00000001U)
#define trim_xtal4x_cfg5_r() (0x001370c0U)
#define trim_xtal4x_cfg5_curr_state_v(r) (((r) >> 16U) & 0xfU)
#define trim_xtal4x_cfg5_curr_state_good_v() (0x00000006U)
#define trim_xtal4x_cfg_r() (0x001370a0U)
#define trim_xtal4x_cfg_pll_lock_v(r) (((r) >> 17U) & 0x1U)
#define trim_xtal4x_cfg_pll_lock_true_v() (0x00000001U)
#define trim_mem_pll_status_r() (0x00137390U)
#define trim_mem_pll_status_dram_curr_state_v(r) (((r) >> 1U) & 0x1U)
#define trim_mem_pll_status_dram_curr_state_good_v() (0x00000001U)
#define trim_mem_pll_status_refm_curr_state_v(r) (((r) >> 17U) & 0x1U)
#define trim_mem_pll_status_refm_curr_state_good_v() (0x00000001U)
#define trim_sppll0_cfg_r() (0x0000e800U)
#define trim_sppll0_cfg_curr_state_v(r) (((r) >> 17U) & 0x1U)
#define trim_sppll0_cfg_curr_state_good_v() (0x00000001U)
#define trim_sppll1_cfg_r() (0x0000e820U)
#define trim_sppll1_cfg_curr_state_v(r) (((r) >> 17U) & 0x1U)
#define trim_sppll1_cfg_curr_state_good_v() (0x00000001U)
#endif