mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: program ltc cg prod values after acr boot
Separate nvgpu_cg_blcg/slcg_fb_ltc_load_enable function into nvgpu_cg_blcg/slcg_fb_load_enable and nvgpu_cg_blcg/slcg_ltc_load_enable. Program fb slcg/blcg prod values during fb init and program ltc slcg/blcg prod values after acr boot to have correct privilege for ltc cg programming. Update unit tests to have sperate blcg/slcg hal for fb and ltc programming. Bug 3423549 Change-Id: Icdb45528abe1a3ab68a47f689310dee9a4fe9366 Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2646039 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
4e98b53944
commit
a7c1052024
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -30,9 +30,9 @@ int nvgpu_init_fb_support(struct gk20a *g)
|
||||
g->ops.mc.fb_reset(g);
|
||||
}
|
||||
|
||||
nvgpu_cg_slcg_fb_ltc_load_enable(g);
|
||||
nvgpu_cg_slcg_fb_load_enable(g);
|
||||
|
||||
nvgpu_cg_blcg_fb_ltc_load_enable(g);
|
||||
nvgpu_cg_blcg_fb_load_enable(g);
|
||||
|
||||
if (g->ops.fb.init_fs_state != NULL) {
|
||||
g->ops.fb.init_fs_state(g);
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
#include <nvgpu/fb.h>
|
||||
#include <nvgpu/device.h>
|
||||
#include <nvgpu/gr/gr.h>
|
||||
#include <nvgpu/power_features/cg.h>
|
||||
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
|
||||
#include <nvgpu/gsp.h>
|
||||
#endif
|
||||
@@ -642,6 +643,14 @@ static int nvgpu_init_slcg_acb_load_gating_prod(struct gk20a *g)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvgpu_init_cg_ltc_load_gating_prod(struct gk20a *g)
|
||||
{
|
||||
nvgpu_cg_slcg_ltc_load_enable(g);
|
||||
nvgpu_cg_blcg_ltc_load_enable(g);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvgpu_ipa_pa_rwsem_init(struct gk20a *g)
|
||||
{
|
||||
nvgpu_rwsem_init(&(g->ipa_pa_cache.ipa_pa_rw_lock));
|
||||
@@ -879,6 +888,15 @@ int nvgpu_finalize_poweron(struct gk20a *g)
|
||||
* in the init sequence and called after acr boot.
|
||||
*/
|
||||
NVGPU_INIT_TABLE_ENTRY(g->ops.fb.set_atomic_mode, NO_FLAG),
|
||||
|
||||
/**
|
||||
* During acr boot, PLM for ltc clock gating registers
|
||||
* will be lowered for nvgpu(PL0) write access. So,
|
||||
* ltc clock gating programming is done after acr boot.
|
||||
* Bug 3469873
|
||||
*/
|
||||
NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_cg_ltc_load_gating_prod,
|
||||
NO_FLAG),
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
NVGPU_INIT_TABLE_ENTRY(g->ops.sec2.init_sec2_support,
|
||||
NVGPU_SUPPORT_SEC2_RTOS),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -81,7 +81,7 @@ void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g)
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g)
|
||||
void nvgpu_cg_blcg_fb_load_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
@@ -92,6 +92,18 @@ void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g)
|
||||
if (g->ops.cg.blcg_fb_load_gating_prod != NULL) {
|
||||
g->ops.cg.blcg_fb_load_gating_prod(g, true);
|
||||
}
|
||||
done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_blcg_ltc_load_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (!g->blcg_enabled) {
|
||||
goto done;
|
||||
}
|
||||
if (g->ops.cg.blcg_ltc_load_gating_prod != NULL) {
|
||||
g->ops.cg.blcg_ltc_load_gating_prod(g, true);
|
||||
}
|
||||
@@ -162,7 +174,7 @@ done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_slcg_fb_ltc_load_enable(struct gk20a *g)
|
||||
void nvgpu_cg_slcg_fb_load_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
@@ -173,6 +185,18 @@ void nvgpu_cg_slcg_fb_ltc_load_enable(struct gk20a *g)
|
||||
if (g->ops.cg.slcg_fb_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_fb_load_gating_prod(g, true);
|
||||
}
|
||||
done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_slcg_ltc_load_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (!g->slcg_enabled) {
|
||||
goto done;
|
||||
}
|
||||
if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_ltc_load_gating_prod(g, true);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -108,12 +108,14 @@
|
||||
* + nvgpu_cg_init_gr_load_gating_prod()
|
||||
* + nvgpu_cg_elcg_enable_no_wait()
|
||||
* + nvgpu_cg_elcg_disable_no_wait()
|
||||
* + nvgpu_cg_blcg_fb_ltc_load_enable()
|
||||
* + nvgpu_cg_blcg_fb_load_enable()
|
||||
* + nvgpu_cg_blcg_ltc_load_enable()
|
||||
* + nvgpu_cg_blcg_fifo_load_enable()
|
||||
* + nvgpu_cg_blcg_pmu_load_enable()
|
||||
* + nvgpu_cg_blcg_ce_load_enable()
|
||||
* + nvgpu_cg_blcg_gr_load_enable()
|
||||
* + nvgpu_cg_slcg_fb_ltc_load_enable()
|
||||
* + nvgpu_cg_slcg_fb_load_enable()
|
||||
* + nvgpu_cg_slcg_ltc_load_enable()
|
||||
* + nvgpu_cg_slcg_priring_load_enable()
|
||||
* + nvgpu_cg_slcg_fifo_load_enable()
|
||||
* + nvgpu_cg_slcg_pmu_load_enable()
|
||||
@@ -256,21 +258,39 @@ void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g);
|
||||
/**
|
||||
* @brief During nvgpu power-on, as part of MM initialization,
|
||||
* this function is called to load register configuration
|
||||
* for BLCG for FB and LTC.
|
||||
* for BLCG for FB.
|
||||
*
|
||||
* @param g [in] The GPU driver struct.
|
||||
*
|
||||
* Checks the platform software capability blcg_enabled and programs registers
|
||||
* for configuring production gating values for BLCG for FB and LTC. This is
|
||||
* for configuring production gating values for BLCG for FB. This is
|
||||
* called in #nvgpu_init_mm_support.
|
||||
*
|
||||
* Steps:
|
||||
* - Acquire the mutex #cg_pg_lock.
|
||||
* - Check if #blcg_enabled is set, else skip BLCG programming.
|
||||
* - Load BLCG prod settings for fb and ltc.
|
||||
* - Load BLCG prod settings for fb.
|
||||
* - Release the mutex #cg_pg_lock.
|
||||
*/
|
||||
void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_fb_load_enable(struct gk20a *g);
|
||||
|
||||
/**
|
||||
* @brief During nvgpu power-on, as part of initialization,
|
||||
* this function is called to load register configuration
|
||||
* for BLCG for LTC.
|
||||
*
|
||||
* @param g [in] The GPU driver struct.
|
||||
*
|
||||
* Checks the platform software capability blcg_enabled and programs registers
|
||||
* for configuring production gating values for BLCG for LTC.
|
||||
*
|
||||
* Steps:
|
||||
* - Acquire the mutex #cg_pg_lock.
|
||||
* - Check if #blcg_enabled is set, else skip BLCG programming.
|
||||
* - Load BLCG prod settings for ltc.
|
||||
* - Release the mutex #cg_pg_lock.
|
||||
*/
|
||||
void nvgpu_cg_blcg_ltc_load_enable(struct gk20a *g);
|
||||
|
||||
/**
|
||||
* @brief During nvgpu power-on, while enabling FIFO, hardware
|
||||
@@ -349,21 +369,39 @@ void nvgpu_cg_blcg_gr_load_enable(struct gk20a *g);
|
||||
/**
|
||||
* @brief During nvgpu power-on, as part of MM initialization,
|
||||
* this function is called to load register configuration
|
||||
* for SLCG for FB and LTC.
|
||||
* for SLCG for FB.
|
||||
*
|
||||
* @param g [in] The GPU driver struct.
|
||||
*
|
||||
* Checks the platform software capability slcg_enabled and programs registers
|
||||
* for configuring production gating values for SLCG for FB and LTC. This is
|
||||
* for configuring production gating values for SLCG for FB. This is
|
||||
* called in #nvgpu_init_mm_support.
|
||||
*
|
||||
* Steps:
|
||||
* - Acquire the mutex #cg_pg_lock.
|
||||
* - Check if #slcg_enabled is set, else skip SLCG programming.
|
||||
* - Load SLCG prod settings for fb and ltc.
|
||||
* - Load SLCG prod settings for fb.
|
||||
* - Release the mutex #cg_pg_lock.
|
||||
*/
|
||||
void nvgpu_cg_slcg_fb_ltc_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_fb_load_enable(struct gk20a *g);
|
||||
|
||||
/**
|
||||
* @brief During nvgpu power-on, as part of initialization,
|
||||
* this function is called to load register configuration
|
||||
* for SLCG for LTC.
|
||||
*
|
||||
* @param g [in] The GPU driver struct.
|
||||
*
|
||||
* Checks the platform software capability slcg_enabled and programs registers
|
||||
* for configuring production gating values for SLCG for LTC. This is
|
||||
*
|
||||
* Steps:
|
||||
* - Acquire the mutex #cg_pg_lock.
|
||||
* - Check if #slcg_enabled is set, else skip SLCG programming.
|
||||
* - Load SLCG prod settings for ltc.
|
||||
* - Release the mutex #cg_pg_lock.
|
||||
*/
|
||||
void nvgpu_cg_slcg_ltc_load_enable(struct gk20a *g);
|
||||
|
||||
/**
|
||||
* @brief To enable privilege ring (PRI) to access h/w functionality,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
bitmap_find_next_zero_area
|
||||
fb_gv11b_write_mmu_fault_buffer_get
|
||||
@@ -276,12 +276,14 @@ nvgpu_bug_unregister_cb
|
||||
nvgpu_can_busy
|
||||
nvgpu_ce_engine_interrupt_mask
|
||||
nvgpu_ce_init_support
|
||||
nvgpu_cg_blcg_fb_ltc_load_enable
|
||||
nvgpu_cg_blcg_fb_load_enable
|
||||
nvgpu_cg_blcg_ltc_load_enable
|
||||
nvgpu_cg_blcg_fifo_load_enable
|
||||
nvgpu_cg_blcg_pmu_load_enable
|
||||
nvgpu_cg_blcg_ce_load_enable
|
||||
nvgpu_cg_blcg_gr_load_enable
|
||||
nvgpu_cg_slcg_fb_ltc_load_enable
|
||||
nvgpu_cg_slcg_fb_load_enable
|
||||
nvgpu_cg_slcg_ltc_load_enable
|
||||
nvgpu_cg_slcg_priring_load_enable
|
||||
nvgpu_cg_slcg_fifo_load_enable
|
||||
nvgpu_cg_slcg_pmu_load_enable
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
bitmap_find_next_zero_area
|
||||
fb_gv11b_write_mmu_fault_buffer_get
|
||||
@@ -284,12 +284,14 @@ nvgpu_bug_unregister_cb
|
||||
nvgpu_can_busy
|
||||
nvgpu_ce_engine_interrupt_mask
|
||||
nvgpu_ce_init_support
|
||||
nvgpu_cg_blcg_fb_ltc_load_enable
|
||||
nvgpu_cg_blcg_fb_load_enable
|
||||
nvgpu_cg_blcg_ltc_load_enable
|
||||
nvgpu_cg_blcg_fifo_load_enable
|
||||
nvgpu_cg_blcg_pmu_load_enable
|
||||
nvgpu_cg_blcg_ce_load_enable
|
||||
nvgpu_cg_blcg_gr_load_enable
|
||||
nvgpu_cg_slcg_fb_ltc_load_enable
|
||||
nvgpu_cg_slcg_fb_load_enable
|
||||
nvgpu_cg_slcg_ltc_load_enable
|
||||
nvgpu_cg_slcg_priring_load_enable
|
||||
nvgpu_cg_slcg_fifo_load_enable
|
||||
nvgpu_cg_slcg_pmu_load_enable
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All Rights Reserved.
|
||||
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All Rights Reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
@@ -108,13 +108,15 @@ test_mthd_buffer_fault_in_bar2_fault.mthd_buffer_fault_in_bar2_fault=0
|
||||
[cg]
|
||||
init_test_env.init=0
|
||||
test_cg.blcg_ce=0
|
||||
test_cg.blcg_fb_ltc=0
|
||||
test_cg.blcg_fb=0
|
||||
test_cg.blcg_ltc=0
|
||||
test_cg.blcg_fifo=0
|
||||
test_cg.blcg_gr=0
|
||||
test_cg.blcg_gr_load_gating_prod=0
|
||||
test_cg.blcg_pmu=0
|
||||
test_cg.slcg_ce2=0
|
||||
test_cg.slcg_fb_ltc=0
|
||||
test_cg.slcg_fb=0
|
||||
test_cg.slcg_ltc=0
|
||||
test_cg.slcg_fifo=0
|
||||
test_cg.slcg_gr_load_gating_prod=0
|
||||
test_cg.slcg_pmu=0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -49,10 +49,16 @@ struct cg_test_data {
|
||||
u32 domain_desc_sizes[16];
|
||||
};
|
||||
|
||||
static struct cg_test_data blcg_fb_ltc = {
|
||||
static struct cg_test_data blcg_fb = {
|
||||
.cg_type = NVGPU_GPU_CAN_BLCG,
|
||||
.load_enable = nvgpu_cg_blcg_fb_ltc_load_enable,
|
||||
.domain_count = 2,
|
||||
.load_enable = nvgpu_cg_blcg_fb_load_enable,
|
||||
.domain_count = 1,
|
||||
};
|
||||
|
||||
static struct cg_test_data blcg_ltc = {
|
||||
.cg_type = NVGPU_GPU_CAN_BLCG,
|
||||
.load_enable = nvgpu_cg_blcg_ltc_load_enable,
|
||||
.domain_count = 1,
|
||||
};
|
||||
|
||||
static struct cg_test_data blcg_fifo = {
|
||||
@@ -79,10 +85,16 @@ static struct cg_test_data blcg_gr = {
|
||||
.domain_count = 1,
|
||||
};
|
||||
|
||||
static struct cg_test_data slcg_fb_ltc = {
|
||||
static struct cg_test_data slcg_fb = {
|
||||
.cg_type = NVGPU_GPU_CAN_SLCG,
|
||||
.load_enable = nvgpu_cg_slcg_fb_ltc_load_enable,
|
||||
.domain_count = 2,
|
||||
.load_enable = nvgpu_cg_slcg_fb_load_enable,
|
||||
.domain_count = 1,
|
||||
};
|
||||
|
||||
static struct cg_test_data slcg_ltc = {
|
||||
.cg_type = NVGPU_GPU_CAN_SLCG,
|
||||
.load_enable = nvgpu_cg_slcg_ltc_load_enable,
|
||||
.domain_count = 1,
|
||||
};
|
||||
|
||||
static struct cg_test_data slcg_priring = {
|
||||
@@ -134,14 +146,14 @@ struct cg_test_data blcg_gr_load_gating_prod = {
|
||||
tmp->domain_desc_sizes[0] = gv11b_blcg_##param##_gating_prod_size(); \
|
||||
})
|
||||
|
||||
static void init_blcg_fb_ltc_data(struct gk20a *g)
|
||||
static void init_blcg_fb_data(struct gk20a *g)
|
||||
{
|
||||
blcg_fb_ltc.domain_descs[0] = gv11b_blcg_fb_get_gating_prod();
|
||||
blcg_fb_ltc.gating_funcs[0] = g->ops.cg.blcg_fb_load_gating_prod;
|
||||
blcg_fb_ltc.domain_desc_sizes[0] = gv11b_blcg_fb_gating_prod_size();
|
||||
blcg_fb_ltc.domain_descs[1] = gv11b_blcg_ltc_get_gating_prod();
|
||||
blcg_fb_ltc.gating_funcs[1] = g->ops.cg.blcg_ltc_load_gating_prod;
|
||||
blcg_fb_ltc.domain_desc_sizes[1] = gv11b_blcg_ltc_gating_prod_size();
|
||||
INIT_BLCG_DOMAIN_TEST_DATA(fb);
|
||||
}
|
||||
|
||||
static void init_blcg_ltc_data(struct gk20a *g)
|
||||
{
|
||||
INIT_BLCG_DOMAIN_TEST_DATA(ltc);
|
||||
}
|
||||
|
||||
static void init_blcg_fifo_data(struct gk20a *g)
|
||||
@@ -199,14 +211,14 @@ static void init_blcg_gr_load_gating_data(struct gk20a *g)
|
||||
tmp->domain_desc_sizes[0] = gv11b_slcg_##param##_gating_prod_size(); \
|
||||
})
|
||||
|
||||
static void init_slcg_fb_ltc_data(struct gk20a *g)
|
||||
static void init_slcg_fb_data(struct gk20a *g)
|
||||
{
|
||||
slcg_fb_ltc.domain_descs[0] = gv11b_slcg_fb_get_gating_prod();
|
||||
slcg_fb_ltc.gating_funcs[0] = g->ops.cg.slcg_fb_load_gating_prod;
|
||||
slcg_fb_ltc.domain_desc_sizes[0] = gv11b_slcg_fb_gating_prod_size();
|
||||
slcg_fb_ltc.domain_descs[1] = gv11b_slcg_ltc_get_gating_prod();
|
||||
slcg_fb_ltc.gating_funcs[1] = g->ops.cg.slcg_ltc_load_gating_prod;
|
||||
slcg_fb_ltc.domain_desc_sizes[1] = gv11b_slcg_ltc_gating_prod_size();
|
||||
INIT_SLCG_DOMAIN_TEST_DATA(fb);
|
||||
}
|
||||
|
||||
static void init_slcg_ltc_data(struct gk20a *g)
|
||||
{
|
||||
INIT_SLCG_DOMAIN_TEST_DATA(ltc);
|
||||
}
|
||||
|
||||
static void init_slcg_priring_data(struct gk20a *g)
|
||||
@@ -341,14 +353,16 @@ static int init_test_env(struct unit_module *m, struct gk20a *g, void *args)
|
||||
|
||||
gv11b_init_hal(g);
|
||||
|
||||
init_blcg_fb_ltc_data(g);
|
||||
init_blcg_fb_data(g);
|
||||
init_blcg_ltc_data(g);
|
||||
init_blcg_fifo_data(g);
|
||||
init_blcg_pmu_data(g);
|
||||
init_blcg_ce_data(g);
|
||||
init_blcg_gr_data(g);
|
||||
init_blcg_gr_load_gating_data(g);
|
||||
|
||||
init_slcg_fb_ltc_data(g);
|
||||
init_slcg_fb_data(g);
|
||||
init_slcg_ltc_data(g);
|
||||
init_slcg_priring_data(g);
|
||||
init_slcg_fifo_data(g);
|
||||
init_slcg_pmu_data(g);
|
||||
@@ -686,13 +700,15 @@ int test_elcg(struct unit_module *m, struct gk20a *g, void *args)
|
||||
struct unit_module_test cg_tests[] = {
|
||||
UNIT_TEST(init, init_test_env, NULL, 0),
|
||||
|
||||
UNIT_TEST(blcg_fb_ltc, test_cg, &blcg_fb_ltc, 0),
|
||||
UNIT_TEST(blcg_fb, test_cg, &blcg_fb, 0),
|
||||
UNIT_TEST(blcg_ltc, test_cg, &blcg_ltc, 0),
|
||||
UNIT_TEST(blcg_fifo, test_cg, &blcg_fifo, 0),
|
||||
UNIT_TEST(blcg_ce, test_cg, &blcg_ce, 0),
|
||||
UNIT_TEST(blcg_pmu, test_cg, &blcg_pmu, 0),
|
||||
UNIT_TEST(blcg_gr, test_cg, &blcg_gr, 0),
|
||||
|
||||
UNIT_TEST(slcg_fb_ltc, test_cg, &slcg_fb_ltc, 0),
|
||||
UNIT_TEST(slcg_fb, test_cg, &slcg_fb, 0),
|
||||
UNIT_TEST(slcg_ltc, test_cg, &slcg_ltc, 0),
|
||||
UNIT_TEST(slcg_priring, test_cg, &slcg_priring, 0),
|
||||
UNIT_TEST(slcg_fifo, test_cg, &slcg_fifo, 0),
|
||||
UNIT_TEST(slcg_pmu, test_cg, &slcg_pmu, 0),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -37,12 +37,13 @@ struct unit_module;
|
||||
*
|
||||
* Test Type: Feature
|
||||
*
|
||||
* Targets: nvgpu_cg_blcg_fb_ltc_load_enable, nvgpu_cg_blcg_fifo_load_enable,
|
||||
* Targets: nvgpu_cg_blcg_fb_load_enable, nvgpu_cg_blcg_fifo_load_enable,
|
||||
* nvgpu_cg_blcg_ce_load_enable, nvgpu_cg_blcg_pmu_load_enable,
|
||||
* nvgpu_cg_blcg_gr_load_enable, nvgpu_cg_slcg_fb_ltc_load_enable,
|
||||
* nvgpu_cg_blcg_gr_load_enable, nvgpu_cg_slcg_fb_load_enable,
|
||||
* nvgpu_cg_slcg_priring_load_enable, nvgpu_cg_slcg_fifo_load_enable,
|
||||
* nvgpu_cg_slcg_pmu_load_enable, nvgpu_cg_slcg_therm_load_enable,
|
||||
* nvgpu_cg_slcg_ce2_load_enable, nvgpu_cg_init_gr_load_gating_prod
|
||||
* nvgpu_cg_slcg_ce2_load_enable, nvgpu_cg_init_gr_load_gating_prod,
|
||||
* nvgpu_cg_blcg_ltc_load_enable, nvgpu_cg_slcg_ltc_load_enable
|
||||
*
|
||||
* Input: The struct specifying type of clock gating, target nvgpu routine
|
||||
* that handles the setup, clock gating domain descriptors.
|
||||
|
||||
Reference in New Issue
Block a user