gpu: nvgpu: Remove support for GP106

Delete gp106 HALs and GPUIDs
As first part, below are removed
1. HAL files
2. GPUIDs and its check in hal init
3. Unused _gp106 files

Bug 200457373

Change-Id: Ic713e3ef728c006d5935ab638d6ff0e1583486d3
Signed-off-by: Abdul Salam <absalam@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1949495
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Abdul Salam
2018-12-13 11:33:50 +05:30
committed by mobile promotions
parent 66729df1bb
commit 8d2c1141d3
21 changed files with 7 additions and 4187 deletions

View File

@@ -74,7 +74,6 @@ nvgpu-y += common/bus/bus_gk20a.o \
common/netlist/netlist.o \ common/netlist/netlist.o \
common/netlist/netlist_sim.o \ common/netlist/netlist_sim.o \
common/netlist/netlist_gm20b.o \ common/netlist/netlist_gm20b.o \
common/netlist/netlist_gp106.o \
common/netlist/netlist_gp10b.o \ common/netlist/netlist_gp10b.o \
common/netlist/netlist_gv100.o \ common/netlist/netlist_gv100.o \
common/netlist/netlist_gv11b.o \ common/netlist/netlist_gv11b.o \
@@ -87,7 +86,6 @@ nvgpu-y += \
os/linux/os_ops.o \ os/linux/os_ops.o \
os/linux/os_ops_gm20b.o \ os/linux/os_ops_gm20b.o \
os/linux/os_ops_gp10b.o \ os/linux/os_ops_gp10b.o \
os/linux/os_ops_gp106.o \
os/linux/os_ops_gv100.o \ os/linux/os_ops_gv100.o \
os/linux/os_ops_gv11b.o \ os/linux/os_ops_gv11b.o \
os/linux/kmem.o \ os/linux/kmem.o \
@@ -334,12 +332,9 @@ nvgpu-y += \
gp10b/fecs_trace_gp10b.o \ gp10b/fecs_trace_gp10b.o \
gp10b/gp10b.o \ gp10b/gp10b.o \
gp10b/ecc_gp10b.o \ gp10b/ecc_gp10b.o \
gp106/hal_gp106.o \
gp106/mclk_gp106.o \ gp106/mclk_gp106.o \
gp106/gr_gp106.o \ gp106/gr_gp106.o \
gp106/sec2_gp106.o \ gp106/sec2_gp106.o \
gp106/fifo_gp106.o \
gp106/regops_gp106.o \
gp106/bios_gp106.o \ gp106/bios_gp106.o \
gv11b/gv11b.o \ gv11b/gv11b.o \
gv11b/hal_gv11b.o \ gv11b/hal_gv11b.o \
@@ -385,7 +380,6 @@ nvgpu-y += \
pmu_perf/change_seq.o \ pmu_perf/change_seq.o \
clk/clk.o \ clk/clk.o \
gp106/clk_gp106.o \ gp106/clk_gp106.o \
gp106/clk_arb_gp106.o \
pmgr/pwrdev.o \ pmgr/pwrdev.o \
pmgr/pmgr.o \ pmgr/pmgr.o \
pmgr/pmgrpmu.o \ pmgr/pmgrpmu.o \

View File

@@ -112,7 +112,6 @@ srcs := os/posix/nvgpu.c \
common/netlist/netlist.c \ common/netlist/netlist.c \
common/netlist/netlist_sim.c \ common/netlist/netlist_sim.c \
common/netlist/netlist_gm20b.c \ common/netlist/netlist_gm20b.c \
common/netlist/netlist_gp106.c \
common/netlist/netlist_gp10b.c \ common/netlist/netlist_gp10b.c \
common/netlist/netlist_gv100.c \ common/netlist/netlist_gv100.c \
common/netlist/netlist_gv11b.c \ common/netlist/netlist_gv11b.c \
@@ -221,15 +220,11 @@ srcs := os/posix/nvgpu.c \
gv11b/subctx_gv11b.c \ gv11b/subctx_gv11b.c \
gv11b/regops_gv11b.c \ gv11b/regops_gv11b.c \
gv11b/ecc_gv11b.c \ gv11b/ecc_gv11b.c \
gp106/hal_gp106.c \
gp106/mclk_gp106.c \ gp106/mclk_gp106.c \
gp106/gr_gp106.c \ gp106/gr_gp106.c \
gp106/sec2_gp106.c \ gp106/sec2_gp106.c \
gp106/fifo_gp106.c \
gp106/regops_gp106.c \
gp106/bios_gp106.c \ gp106/bios_gp106.c \
gp106/clk_gp106.c \ gp106/clk_gp106.c \
gp106/clk_arb_gp106.c \
gv100/mm_gv100.c \ gv100/mm_gv100.c \
gv100/bios_gv100.c \ gv100/bios_gv100.c \
gv100/fifo_gv100.c \ gv100/fifo_gv100.c \

View File

@@ -30,7 +30,6 @@
#include "gm20b/hal_gm20b.h" #include "gm20b/hal_gm20b.h"
#include "gp10b/hal_gp10b.h" #include "gp10b/hal_gp10b.h"
#include "gp106/hal_gp106.h"
#include "gv100/hal_gv100.h" #include "gv100/hal_gv100.h"
#include "gv11b/hal_gv11b.h" #include "gv11b/hal_gv11b.h"
#include "tu104/hal_tu104.h" #include "tu104/hal_tu104.h"
@@ -58,16 +57,6 @@ int nvgpu_init_hal(struct gk20a *g)
return -ENODEV; return -ENODEV;
} }
break; break;
case NVGPU_GPUID_GP104:
if (gp106_init_hal(g) != 0) {
return -ENODEV;
}
break;
case NVGPU_GPUID_GP106:
if (gp106_init_hal(g) != 0) {
return -ENODEV;
}
break;
case NVGPU_GPUID_GV11B: case NVGPU_GPUID_GV11B:
if (gv11b_init_hal(g) != 0) { if (gv11b_init_hal(g) != 0) {
return -ENODEV; return -ENODEV;

View File

@@ -1,51 +0,0 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/netlist.h>
#include "netlist_gp106.h"
int gp106_netlist_get_name(struct gk20a *g, int index, char *name)
{
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
switch (ver) {
case NVGPU_GPUID_GP104:
(void) sprintf(name, "%s/%s", "gp104",
GP104_NETLIST_IMAGE_FW_NAME);
break;
case NVGPU_GPUID_GP106:
(void) sprintf(name, "%s/%s", "gp106",
GP106_NETLIST_IMAGE_FW_NAME);
break;
default:
nvgpu_err(g, "no support for GPUID %x", ver);
}
return 0;
}
bool gp106_netlist_is_firmware_defined(void)
{
return true;
}

View File

@@ -1,34 +0,0 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NETLIST_GP106_H
#define NVGPU_NETLIST_GP106_H
#include <nvgpu/netlist.h>
/* production netlist, one and only one from below */
#define GP106_NETLIST_IMAGE_FW_NAME NVGPU_NETLIST_IMAGE_C
#define GP104_NETLIST_IMAGE_FW_NAME NVGPU_NETLIST_IMAGE_D
int gp106_netlist_get_name(struct gk20a *g, int index, char *name);
bool gp106_netlist_is_firmware_defined(void);
#endif /* NVGPU_NETLIST_GP106_H */

View File

@@ -194,16 +194,6 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
int err; int err;
switch (ver) { switch (ver) {
case NVGPU_GPUID_GP104:
fecs_sig = nvgpu_request_firmware(g,
GP104_FECS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
break;
case NVGPU_GPUID_GP106:
fecs_sig = nvgpu_request_firmware(g,
GP106_FECS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
break;
case NVGPU_GPUID_GV11B: case NVGPU_GPUID_GV11B:
fecs_sig = nvgpu_request_firmware(g, fecs_sig = nvgpu_request_firmware(g,
GM20B_FECS_UCODE_SIG, 0); GM20B_FECS_UCODE_SIG, 0);
@@ -296,16 +286,6 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
} }
switch (ver) { switch (ver) {
case NVGPU_GPUID_GP104:
gpccs_sig = nvgpu_request_firmware(g,
GP104_GPCCS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
break;
case NVGPU_GPUID_GP106:
gpccs_sig = nvgpu_request_firmware(g,
GP106_GPCCS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
break;
case NVGPU_GPUID_GV11B: case NVGPU_GPUID_GV11B:
gpccs_sig = nvgpu_request_firmware(g, gpccs_sig = nvgpu_request_firmware(g,
T18x_GPCCS_UCODE_SIG, 0); T18x_GPCCS_UCODE_SIG, 0);

View File

@@ -23,12 +23,6 @@
#ifndef NVGPU_ACR_GP106_H #ifndef NVGPU_ACR_GP106_H
#define NVGPU_ACR_GP106_H #define NVGPU_ACR_GP106_H
#define GP106_FECS_UCODE_SIG "gp106/fecs_sig.bin"
#define GP106_GPCCS_UCODE_SIG "gp106/gpccs_sig.bin"
#define GP104_FECS_UCODE_SIG "gp104/fecs_sig.bin"
#define GP104_GPCCS_UCODE_SIG "gp104/gpccs_sig.bin"
int gp106_bootstrap_hs_flcn(struct gk20a *g); int gp106_bootstrap_hs_flcn(struct gk20a *g);
int gp106_prepare_ucode_blob(struct gk20a *g); int gp106_prepare_ucode_blob(struct gk20a *g);
int gp106_alloc_blob_space(struct gk20a *g, int gp106_alloc_blob_space(struct gk20a *g,

View File

@@ -40,10 +40,6 @@ static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
unit_id = PMU_UNIT_PERFMON; unit_id = PMU_UNIT_PERFMON;
break; break;
case NVGPU_GPUID_GP10B: case NVGPU_GPUID_GP10B:
case NVGPU_GPUID_GP104:
case NVGPU_GPUID_GP106:
unit_id = PMU_UNIT_PERFMON_T18X;
break;
case NVGPU_GPUID_GV11B: case NVGPU_GPUID_GV11B:
unit_id = PMU_UNIT_PERFMON_T18X; unit_id = PMU_UNIT_PERFMON_T18X;
break; break;

View File

@@ -1,792 +0,0 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/clk_arb.h>
#include <nvgpu/string.h>
#include "clk_arb_gp106.h"
u32 gp106_get_arbiter_clk_domains(struct gk20a *g)
{
(void)g;
return (CTRL_CLK_DOMAIN_MCLK|CTRL_CLK_DOMAIN_GPC2CLK);
}
int gp106_get_arbiter_f_points(struct gk20a *g,u32 api_domain,
u32 *num_points, u16 *freqs_in_mhz)
{
return g->ops.clk.clk_domain_get_f_points(g,
api_domain, num_points, freqs_in_mhz);
}
int gp106_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
u16 *min_mhz, u16 *max_mhz)
{
u32 clkwhich;
struct clk_set_info *p0_info;
struct clk_set_info *p5_info;
struct avfsfllobjs *pfllobjs = &(g->clk_pmu->avfs_fllobjs);
u16 limit_min_mhz;
switch (api_domain) {
case CTRL_CLK_DOMAIN_MCLK:
clkwhich = CLKWHICH_MCLK;
break;
case CTRL_CLK_DOMAIN_GPC2CLK:
clkwhich = CLKWHICH_GPC2CLK;
break;
default:
return -EINVAL;
}
p5_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P5, clkwhich);
if (p5_info == NULL) {
return -EINVAL;
}
p0_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P0, clkwhich);
if (p0_info == NULL) {
return -EINVAL;
}
limit_min_mhz = p5_info->min_mhz;
/* WAR for DVCO min */
if (api_domain == CTRL_CLK_DOMAIN_GPC2CLK) {
if ((pfllobjs->max_min_freq_mhz != 0U) &&
(pfllobjs->max_min_freq_mhz >= limit_min_mhz)) {
limit_min_mhz = pfllobjs->max_min_freq_mhz + 1U;
}
}
*min_mhz = limit_min_mhz;
*max_mhz = p0_info->max_mhz;
return 0;
}
int gp106_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
u16 *default_mhz)
{
u32 clkwhich;
struct clk_set_info *p0_info;
switch (api_domain) {
case CTRL_CLK_DOMAIN_MCLK:
clkwhich = CLKWHICH_MCLK;
break;
case CTRL_CLK_DOMAIN_GPC2CLK:
clkwhich = CLKWHICH_GPC2CLK;
break;
default:
return -EINVAL;
}
p0_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P0, clkwhich);
if (p0_info == NULL) {
return -EINVAL;
}
*default_mhz = p0_info->max_mhz;
return 0;
}
int gp106_init_clk_arbiter(struct gk20a *g)
{
struct nvgpu_clk_arb *arb;
u16 default_mhz;
int err;
int index;
struct nvgpu_clk_vf_table *table;
clk_arb_dbg(g, " ");
if (g->clk_arb != NULL) {
return 0;
}
arb = nvgpu_kzalloc(g, sizeof(struct nvgpu_clk_arb));
if (arb == NULL) {
return -ENOMEM;
}
err = nvgpu_mutex_init(&arb->pstate_lock);
if (err != 0) {
goto mutex_fail;
}
nvgpu_spinlock_init(&arb->sessions_lock);
nvgpu_spinlock_init(&arb->users_lock);
nvgpu_spinlock_init(&arb->requests_lock);
arb->mclk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
if (arb->mclk_f_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
arb->gpc2clk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
if (arb->gpc2clk_f_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
for (index = 0; index < 2; index++) {
table = &arb->vf_table_pool[index];
table->gpc2clk_num_points = MAX_F_POINTS;
table->mclk_num_points = MAX_F_POINTS;
table->gpc2clk_points = nvgpu_kcalloc(g, MAX_F_POINTS,
sizeof(struct nvgpu_clk_vf_point));
if (table->gpc2clk_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
table->mclk_points = nvgpu_kcalloc(g, MAX_F_POINTS,
sizeof(struct nvgpu_clk_vf_point));
if (table->mclk_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
}
g->clk_arb = arb;
arb->g = g;
err = g->ops.clk_arb.get_arbiter_clk_default(g,
CTRL_CLK_DOMAIN_MCLK, &default_mhz);
if (err < 0) {
err = -EINVAL;
goto init_fail;
}
arb->mclk_default_mhz = default_mhz;
err = g->ops.clk_arb.get_arbiter_clk_default(g,
CTRL_CLK_DOMAIN_GPC2CLK, &default_mhz);
if (err < 0) {
err = -EINVAL;
goto init_fail;
}
arb->gpc2clk_default_mhz = default_mhz;
arb->actual = &arb->actual_pool[0];
nvgpu_atomic_set(&arb->req_nr, 0);
nvgpu_atomic64_set(&arb->alarm_mask, 0);
err = nvgpu_clk_notification_queue_alloc(g, &arb->notification_queue,
DEFAULT_EVENT_NUMBER);
if (err < 0) {
goto init_fail;
}
nvgpu_init_list_node(&arb->users);
nvgpu_init_list_node(&arb->sessions);
nvgpu_init_list_node(&arb->requests);
nvgpu_cond_init(&arb->request_wq);
nvgpu_init_list_node(&arb->update_vf_table_work_item.worker_item);
nvgpu_init_list_node(&arb->update_arb_work_item.worker_item);
arb->update_vf_table_work_item.arb = arb;
arb->update_arb_work_item.arb = arb;
arb->update_vf_table_work_item.item_type = CLK_ARB_WORK_UPDATE_VF_TABLE;
arb->update_arb_work_item.item_type = CLK_ARB_WORK_UPDATE_ARB;
err = nvgpu_clk_arb_worker_init(g);
if (err < 0) {
goto init_fail;
}
#ifdef CONFIG_DEBUG_FS
arb->debug = &arb->debug_pool[0];
if (!arb->debugfs_set) {
if (nvgpu_clk_arb_debugfs_init(g))
arb->debugfs_set = true;
}
#endif
err = clk_vf_point_cache(g);
if (err < 0) {
goto init_fail;
}
err = nvgpu_clk_arb_update_vf_table(arb);
if (err < 0) {
goto init_fail;
}
do {
/* Check that first run is completed */
nvgpu_smp_mb();
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
nvgpu_atomic_read(&arb->req_nr), 0);
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
return arb->status;
init_fail:
nvgpu_kfree(g, arb->gpc2clk_f_points);
nvgpu_kfree(g, arb->mclk_f_points);
for (index = 0; index < 2; index++) {
nvgpu_kfree(g, arb->vf_table_pool[index].gpc2clk_points);
nvgpu_kfree(g, arb->vf_table_pool[index].mclk_points);
}
nvgpu_mutex_destroy(&arb->pstate_lock);
mutex_fail:
nvgpu_kfree(g, arb);
return err;
}
static u8 nvgpu_clk_arb_find_vf_point(struct nvgpu_clk_arb *arb,
u16 *gpc2clk, u16 *sys2clk, u16 *xbar2clk, u16 *mclk,
u32 *voltuv, u32 *voltuv_sram, u32 *nuvmin, u32 *nuvmin_sram)
{
u16 gpc2clk_target, mclk_target;
u32 gpc2clk_voltuv, gpc2clk_voltuv_sram;
u32 mclk_voltuv, mclk_voltuv_sram;
u32 pstate = VF_POINT_INVALID_PSTATE;
struct nvgpu_clk_vf_table *table;
u32 index, index_mclk;
struct nvgpu_clk_vf_point *mclk_vf = NULL;
do {
gpc2clk_target = *gpc2clk;
mclk_target = *mclk;
gpc2clk_voltuv = 0;
gpc2clk_voltuv_sram = 0;
mclk_voltuv = 0;
mclk_voltuv_sram = 0;
table = NV_ACCESS_ONCE(arb->current_vf_table);
/* pointer to table can be updated by callback */
nvgpu_smp_rmb();
if (table == NULL) {
continue;
}
if ((table->gpc2clk_num_points == 0U) ||
(table->mclk_num_points == 0U)) {
nvgpu_err(arb->g, "found empty table");
goto find_exit;
}
/* First we check MCLK to find out which PSTATE we are
* are requesting, and from there try to find the minimum
* GPC2CLK on the same PSTATE that satisfies the request.
* If no GPC2CLK can be found, then we need to up the PSTATE
*/
recalculate_vf_point:
for (index = 0; index < table->mclk_num_points; index++) {
if (table->mclk_points[index].mem_mhz >= mclk_target) {
mclk_vf = &table->mclk_points[index];
break;
}
}
if (index == table->mclk_num_points) {
mclk_vf = &table->mclk_points[index-1U];
index = table->mclk_num_points - 1U;
}
index_mclk = index;
/* round up the freq requests */
for (index = 0; index < table->gpc2clk_num_points; index++) {
pstate = VF_POINT_COMMON_PSTATE(
&table->gpc2clk_points[index], mclk_vf);
if ((table->gpc2clk_points[index].gpc_mhz >=
gpc2clk_target) &&
(pstate != VF_POINT_INVALID_PSTATE)) {
gpc2clk_target =
table->gpc2clk_points[index].gpc_mhz;
*sys2clk =
table->gpc2clk_points[index].sys_mhz;
*xbar2clk =
table->gpc2clk_points[index].xbar_mhz;
gpc2clk_voltuv =
table->gpc2clk_points[index].uvolt;
gpc2clk_voltuv_sram =
table->gpc2clk_points[index].uvolt_sram;
break;
}
}
if (index == table->gpc2clk_num_points) {
pstate = VF_POINT_COMMON_PSTATE(
&table->gpc2clk_points[index-1U], mclk_vf);
if (pstate != VF_POINT_INVALID_PSTATE) {
gpc2clk_target =
table->gpc2clk_points[index-1U].gpc_mhz;
*sys2clk =
table->gpc2clk_points[index-1U].sys_mhz;
*xbar2clk =
table->gpc2clk_points[index-1U].xbar_mhz;
gpc2clk_voltuv =
table->gpc2clk_points[index-1U].uvolt;
gpc2clk_voltuv_sram =
table->gpc2clk_points[index-1U].
uvolt_sram;
} else if (index_mclk >= table->mclk_num_points - 1U) {
/* There is no available combination of MCLK
* and GPC2CLK, we need to fail this
*/
gpc2clk_target = 0;
mclk_target = 0;
pstate = VF_POINT_INVALID_PSTATE;
goto find_exit;
} else {
/* recalculate with higher PSTATE */
gpc2clk_target = *gpc2clk;
mclk_target = table->mclk_points[index_mclk+1U].
mem_mhz;
goto recalculate_vf_point;
}
}
mclk_target = mclk_vf->mem_mhz;
mclk_voltuv = mclk_vf->uvolt;
mclk_voltuv_sram = mclk_vf->uvolt_sram;
} while ((table == NULL) ||
(NV_ACCESS_ONCE(arb->current_vf_table) != table));
find_exit:
*voltuv = gpc2clk_voltuv > mclk_voltuv ? gpc2clk_voltuv : mclk_voltuv;
*voltuv_sram = gpc2clk_voltuv_sram > mclk_voltuv_sram ?
gpc2clk_voltuv_sram : mclk_voltuv_sram;
/* noise unaware vmin */
*nuvmin = mclk_voltuv;
*nuvmin_sram = mclk_voltuv_sram;
*gpc2clk = gpc2clk_target < *gpc2clk ? gpc2clk_target : *gpc2clk;
*mclk = mclk_target;
return pstate;
}
static int nvgpu_clk_arb_change_vf_point(struct gk20a *g, u16 gpc2clk_target,
u16 sys2clk_target, u16 xbar2clk_target, u16 mclk_target, u32 voltuv,
u32 voltuv_sram)
{
struct set_fll_clk fllclk;
struct nvgpu_clk_arb *arb = g->clk_arb;
int status;
fllclk.gpc2clkmhz = gpc2clk_target;
fllclk.sys2clkmhz = sys2clk_target;
fllclk.xbar2clkmhz = xbar2clk_target;
fllclk.voltuv = voltuv;
/* if voltage ascends we do:
* (1) FLL change
* (2) Voltage change
* (3) MCLK change
* If it goes down
* (1) MCLK change
* (2) Voltage change
* (3) FLL change
*/
/* descending */
if (voltuv < arb->voltuv_actual) {
status = g->ops.clk.mclk_change(g, mclk_target);
if (status < 0) {
return status;
}
status = volt_set_voltage(g, voltuv, voltuv_sram);
if (status < 0) {
return status;
}
status = clk_set_fll_clks(g, &fllclk);
if (status < 0) {
return status;
}
} else {
status = clk_set_fll_clks(g, &fllclk);
if (status < 0) {
return status;
}
status = volt_set_voltage(g, voltuv, voltuv_sram);
if (status < 0) {
return status;
}
status = g->ops.clk.mclk_change(g, mclk_target);
if (status < 0) {
return status;
}
}
return 0;
}
void gp106_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
{
struct nvgpu_clk_session *session;
struct nvgpu_clk_dev *dev;
struct nvgpu_clk_dev *tmp;
struct nvgpu_clk_arb_target *target, *actual;
struct gk20a *g = arb->g;
u32 pstate = VF_POINT_INVALID_PSTATE;
u32 voltuv, voltuv_sram;
bool mclk_set, gpc2clk_set;
u32 nuvmin, nuvmin_sram;
u32 alarms_notified = 0;
u32 current_alarm;
int status = 0;
/* Temporary variables for checking target frequency */
u16 gpc2clk_target, sys2clk_target, xbar2clk_target, mclk_target;
u16 gpc2clk_session_target, mclk_session_target;
#ifdef CONFIG_DEBUG_FS
u64 t0, t1;
struct nvgpu_clk_arb_debug *debug;
#endif
clk_arb_dbg(g, " ");
/* bail out if gpu is down */
if ((unsigned long)nvgpu_atomic64_read(&arb->alarm_mask) &
EVENT(ALARM_GPU_LOST)) {
goto exit_arb;
}
#ifdef CONFIG_DEBUG_FS
g->ops.ptimer.read_ptimer(g, &t0);
#endif
/* Only one arbiter should be running */
gpc2clk_target = 0;
mclk_target = 0;
nvgpu_spinlock_acquire(&arb->sessions_lock);
nvgpu_list_for_each_entry(session, &arb->sessions,
nvgpu_clk_session, link) {
if (!session->zombie) {
mclk_set = false;
gpc2clk_set = false;
target = (session->target == &session->target_pool[0] ?
&session->target_pool[1] :
&session->target_pool[0]);
nvgpu_spinlock_acquire(&session->session_lock);
if (!nvgpu_list_empty(&session->targets)) {
/* Copy over state */
target->mclk = session->target->mclk;
target->gpc2clk = session->target->gpc2clk;
/* Query the latest committed request */
nvgpu_list_for_each_entry_safe(dev, tmp,
&session->targets, nvgpu_clk_dev, node) {
if (!mclk_set &&
(dev->mclk_target_mhz != 0U)) {
target->mclk =
dev->mclk_target_mhz;
mclk_set = true;
}
if (!gpc2clk_set &&
(dev->gpc2clk_target_mhz != 0U)) {
target->gpc2clk =
dev->gpc2clk_target_mhz;
gpc2clk_set = true;
}
nvgpu_ref_get(&dev->refcount);
nvgpu_list_del(&dev->node);
nvgpu_spinlock_acquire(
&arb->requests_lock);
nvgpu_list_add(
&dev->node, &arb->requests);
nvgpu_spinlock_release(&arb->requests_lock);
}
session->target = target;
}
nvgpu_spinlock_release(
&session->session_lock);
mclk_target = mclk_target > session->target->mclk ?
mclk_target : session->target->mclk;
gpc2clk_target =
gpc2clk_target > session->target->gpc2clk ?
gpc2clk_target : session->target->gpc2clk;
}
}
nvgpu_spinlock_release(&arb->sessions_lock);
gpc2clk_target = (gpc2clk_target > 0U) ? gpc2clk_target :
arb->gpc2clk_default_mhz;
if (gpc2clk_target < arb->gpc2clk_min) {
gpc2clk_target = arb->gpc2clk_min;
}
if (gpc2clk_target > arb->gpc2clk_max) {
gpc2clk_target = arb->gpc2clk_max;
}
mclk_target = (mclk_target > 0U) ? mclk_target :
arb->mclk_default_mhz;
if (mclk_target < arb->mclk_min) {
mclk_target = arb->mclk_min;
}
if (mclk_target > arb->mclk_max) {
mclk_target = arb->mclk_max;
}
sys2clk_target = 0;
xbar2clk_target = 0;
gpc2clk_session_target = gpc2clk_target;
mclk_session_target = mclk_target;
/* Query the table for the closest vf point to program */
pstate = nvgpu_clk_arb_find_vf_point(arb, &gpc2clk_target,
&sys2clk_target, &xbar2clk_target, &mclk_target, &voltuv,
&voltuv_sram, &nuvmin, &nuvmin_sram);
if (pstate == VF_POINT_INVALID_PSTATE) {
arb->status = -EINVAL;
/* make status visible */
nvgpu_smp_mb();
goto exit_arb;
}
if ((gpc2clk_target < gpc2clk_session_target) ||
(mclk_target < mclk_session_target)) {
nvgpu_clk_arb_set_global_alarm(g,
EVENT(ALARM_TARGET_VF_NOT_POSSIBLE));
}
if ((arb->actual->gpc2clk == gpc2clk_target) &&
(arb->actual->mclk == mclk_target) &&
(arb->voltuv_actual == voltuv)) {
goto exit_arb;
}
/* Program clocks */
/* A change in both mclk of gpc2clk may require a change in voltage */
nvgpu_mutex_acquire(&arb->pstate_lock);
status = nvgpu_lpwr_disable_pg(g, false);
status = clk_pmu_freq_controller_load(g, false,
CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL);
if (status < 0) {
arb->status = status;
nvgpu_mutex_release(&arb->pstate_lock);
/* make status visible */
nvgpu_smp_mb();
goto exit_arb;
}
status = volt_set_noiseaware_vmin(g, nuvmin, nuvmin_sram);
if (status < 0) {
arb->status = status;
nvgpu_mutex_release(&arb->pstate_lock);
/* make status visible */
nvgpu_smp_mb();
goto exit_arb;
}
status = nvgpu_clk_arb_change_vf_point(g, gpc2clk_target,
sys2clk_target, xbar2clk_target, mclk_target, voltuv,
voltuv_sram);
if (status < 0) {
arb->status = status;
nvgpu_mutex_release(&arb->pstate_lock);
/* make status visible */
nvgpu_smp_mb();
goto exit_arb;
}
status = clk_pmu_freq_controller_load(g, true,
CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL);
if (status < 0) {
arb->status = status;
nvgpu_mutex_release(&arb->pstate_lock);
/* make status visible */
nvgpu_smp_mb();
goto exit_arb;
}
status = nvgpu_lwpr_mclk_change(g, pstate);
if (status < 0) {
arb->status = status;
nvgpu_mutex_release(&arb->pstate_lock);
/* make status visible */
nvgpu_smp_mb();
goto exit_arb;
}
actual = NV_ACCESS_ONCE(arb->actual) == &arb->actual_pool[0] ?
&arb->actual_pool[1] : &arb->actual_pool[0];
/* do not reorder this pointer */
nvgpu_smp_rmb();
actual->gpc2clk = gpc2clk_target;
actual->mclk = mclk_target;
arb->voltuv_actual = voltuv;
actual->pstate = pstate;
arb->status = status;
/* Make changes visible to other threads */
nvgpu_smp_wmb();
arb->actual = actual;
status = nvgpu_lpwr_enable_pg(g, false);
if (status < 0) {
arb->status = status;
nvgpu_mutex_release(&arb->pstate_lock);
/* make status visible */
nvgpu_smp_mb();
goto exit_arb;
}
/* status must be visible before atomic inc */
nvgpu_smp_wmb();
nvgpu_atomic_inc(&arb->req_nr);
/* Unlock pstate change for PG */
nvgpu_mutex_release(&arb->pstate_lock);
/* VF Update complete */
nvgpu_clk_arb_set_global_alarm(g, EVENT(VF_UPDATE));
nvgpu_cond_signal_interruptible(&arb->request_wq);
#ifdef CONFIG_DEBUG_FS
g->ops.ptimer.read_ptimer(g, &t1);
debug = arb->debug == &arb->debug_pool[0] ?
&arb->debug_pool[1] : &arb->debug_pool[0];
nvgpu_memcpy((u8 *)debug, (u8 *)arb->debug, sizeof(arb->debug_pool[0]));
debug->switch_num++;
if (debug->switch_num == 1) {
debug->switch_max = debug->switch_min =
debug->switch_avg = (t1-t0)/1000;
debug->switch_std = 0;
} else {
s64 prev_avg;
s64 curr = (t1-t0)/1000;
debug->switch_max = curr > debug->switch_max ?
curr : debug->switch_max;
debug->switch_min = debug->switch_min ?
(curr < debug->switch_min ?
curr : debug->switch_min) : curr;
prev_avg = debug->switch_avg;
debug->switch_avg = (curr +
(debug->switch_avg * (debug->switch_num-1))) /
debug->switch_num;
debug->switch_std +=
(curr - debug->switch_avg) * (curr - prev_avg);
}
/* commit changes before exchanging debug pointer */
nvgpu_smp_wmb();
arb->debug = debug;
#endif
exit_arb:
if (status < 0) {
nvgpu_err(g, "Error in arbiter update");
nvgpu_clk_arb_set_global_alarm(g,
EVENT(ALARM_CLOCK_ARBITER_FAILED));
}
current_alarm = (u32) nvgpu_atomic64_read(&arb->alarm_mask);
/* notify completion for all requests */
nvgpu_spinlock_acquire(&arb->requests_lock);
nvgpu_list_for_each_entry_safe(dev, tmp, &arb->requests,
nvgpu_clk_dev, node) {
nvgpu_atomic_set(&dev->poll_mask,
NVGPU_POLLIN | NVGPU_POLLRDNORM);
nvgpu_clk_arb_event_post_event(dev);
nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
nvgpu_list_del(&dev->node);
}
nvgpu_spinlock_release(&arb->requests_lock);
nvgpu_atomic_set(&arb->notification_queue.head,
nvgpu_atomic_read(&arb->notification_queue.tail));
/* notify event for all users */
nvgpu_spinlock_acquire(&arb->users_lock);
nvgpu_list_for_each_entry(dev, &arb->users, nvgpu_clk_dev, link) {
alarms_notified |=
nvgpu_clk_arb_notify(dev, arb->actual, current_alarm);
}
nvgpu_spinlock_release(&arb->users_lock);
/* clear alarms */
nvgpu_clk_arb_clear_global_alarm(g, alarms_notified &
~EVENT(ALARM_GPU_LOST));
}
void gp106_clk_arb_cleanup(struct nvgpu_clk_arb *arb)
{
struct gk20a *g = arb->g;
int index;
nvgpu_kfree(g, arb->gpc2clk_f_points);
nvgpu_kfree(g, arb->mclk_f_points);
for (index = 0; index < 2; index++) {
nvgpu_kfree(g,
arb->vf_table_pool[index].gpc2clk_points);
nvgpu_kfree(g, arb->vf_table_pool[index].mclk_points);
}
nvgpu_mutex_destroy(&g->clk_arb->pstate_lock);
nvgpu_kfree(g, g->clk_arb);
g->clk_arb = NULL;
}

View File

@@ -1,39 +0,0 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef CLK_ARB_GP106_H
#define CLK_ARB_GP106_H
struct nvgpu_clk_session;
struct nvgpu_clk_arb;
u32 gp106_get_arbiter_clk_domains(struct gk20a *g);
int gp106_get_arbiter_f_points(struct gk20a *g,u32 api_domain,
u32 *num_points, u16 *freqs_in_mhz);
int gp106_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
u16 *min_mhz, u16 *max_mhz);
int gp106_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
u16 *default_mhz);
int gp106_init_clk_arbiter(struct gk20a *g);
void gp106_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb);
void gp106_clk_arb_cleanup(struct nvgpu_clk_arb *arb);
#endif /* CLK_ARB_GP106_H */

View File

@@ -1,107 +0,0 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/bug.h>
#include "gp10b/fifo_gp10b.h"
#include "fifo_gp106.h"
#include <nvgpu/hw/gp106/hw_ccsr_gp106.h>
#include <nvgpu/hw/gp106/hw_fifo_gp106.h>
u32 gp106_fifo_get_num_fifos(struct gk20a *g)
{
return ccsr_channel__size_1_v();
}
static const char * const gp106_hub_client_descs[] = {
"vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
"host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld",
"niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
"scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb",
"msenc", "d falcon", "sked", "a falcon", "n/a",
"hsce0", "hsce1", "hsce2", "hsce3", "hsce4", "hsce5",
"hsce6", "hsce7", "hsce8", "hsce9", "hshub",
"ptp x0", "ptp x1", "ptp x2", "ptp x3", "ptp x4",
"ptp x5", "ptp x6", "ptp x7", "vpr scrubber0", "vpr scrubber1",
"dwbif", "fbfalcon",
};
static const char * const gp106_gpc_client_descs[] = {
"l1 0", "t1 0", "pe 0",
"l1 1", "t1 1", "pe 1",
"l1 2", "t1 2", "pe 2",
"l1 3", "t1 3", "pe 3",
"rast", "gcc", "gpccs",
"prop 0", "prop 1", "prop 2", "prop 3",
"l1 4", "t1 4", "pe 4",
"l1 5", "t1 5", "pe 5",
"l1 6", "t1 6", "pe 6",
"l1 7", "t1 7", "pe 7",
"l1 9", "t1 9", "pe 9",
"l1 10", "t1 10", "pe 10",
"l1 11", "t1 11", "pe 11",
"unknown", "unknown", "unknown", "unknown",
"tpccs 0", "tpccs 1", "tpccs 2",
"tpccs 3", "tpccs 4", "tpccs 5",
"tpccs 6", "tpccs 7", "tpccs 8",
"tpccs 9", "tpccs 10", "tpccs 11",
"tpccs 12", "tpccs 13", "tpccs 14",
"tpccs 15", "tpccs 16", "tpccs 17",
"tpccs 18", "tpccs 19", "unknown", "unknown",
"unknown", "unknown", "unknown", "unknown",
"unknown", "unknown", "unknown", "unknown",
"unknown", "unknown",
"l1 12", "t1 12", "pe 12",
"l1 13", "t1 13", "pe 13",
"l1 14", "t1 14", "pe 14",
"l1 15", "t1 15", "pe 15",
"l1 16", "t1 16", "pe 16",
"l1 17", "t1 17", "pe 17",
"l1 18", "t1 18", "pe 18",
"l1 19", "t1 19", "pe 19",
};
void gp106_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gp106_gpc_client_descs)) {
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gp106_gpc_client_descs));
} else {
mmfault->client_id_desc =
gp106_gpc_client_descs[mmfault->client_id];
}
}
/* fill in mmu fault client description */
void gp106_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gp106_hub_client_descs)) {
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gp106_hub_client_descs));
} else {
mmfault->client_id_desc =
gp106_hub_client_descs[mmfault->client_id];
}
}

View File

@@ -1,31 +0,0 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FIFO_GP106_H
#define NVGPU_FIFO_GP106_H
struct gk20a;
struct mmu_fault_info;
u32 gp106_fifo_get_num_fifos(struct gk20a *g);
void gp106_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault);
void gp106_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault);
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,30 +0,0 @@
/*
* GP106 Tegra HAL interface
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_HAL_GP106_H
#define NVGPU_HAL_GP106_H
struct gk20a;
int gp106_init_hal(struct gk20a *g);
#endif /* NVGPU_HAL_GP106_H */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,42 +0,0 @@
/*
*
* Tegra GP106 GPU Debugger Driver Register Ops
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_REGOPS_GP106_H
#define NVGPU_REGOPS_GP106_H
const struct regop_offset_range *gp106_get_global_whitelist_ranges(void);
u64 gp106_get_global_whitelist_ranges_count(void);
const struct regop_offset_range *gp106_get_context_whitelist_ranges(void);
u64 gp106_get_context_whitelist_ranges_count(void);
const u32 *gp106_get_runcontrol_whitelist(void);
u64 gp106_get_runcontrol_whitelist_count(void);
const struct regop_offset_range *gp106_get_runcontrol_whitelist_ranges(void);
u64 gp106_get_runcontrol_whitelist_ranges_count(void);
const u32 *gp106_get_qctl_whitelist(void);
u64 gp106_get_qctl_whitelist_count(void);
const struct regop_offset_range *gp106_get_qctl_whitelist_ranges(void);
u64 gp106_get_qctl_whitelist_ranges_count(void);
int gp106_apply_smpc_war(struct dbg_session_gk20a *dbg_s);
#endif /* NVGPU_REGOPS_GP106_H */

View File

@@ -1906,8 +1906,6 @@ int gk20a_wait_for_idle(struct gk20a *g);
#define GK20A_GPUID_GM20B 0x0000012BU #define GK20A_GPUID_GM20B 0x0000012BU
#define GK20A_GPUID_GM20B_B 0x0000012EU #define GK20A_GPUID_GM20B_B 0x0000012EU
#define NVGPU_GPUID_GP10B 0x0000013BU #define NVGPU_GPUID_GP10B 0x0000013BU
#define NVGPU_GPUID_GP104 0x00000134U
#define NVGPU_GPUID_GP106 0x00000136U
#define NVGPU_GPUID_GV11B 0x0000015BU #define NVGPU_GPUID_GV11B 0x0000015BU
#define NVGPU_GPUID_GV100 0x00000140U #define NVGPU_GPUID_GV100 0x00000140U
#define NVGPU_GPUID_TU104 0x00000164U #define NVGPU_GPUID_TU104 0x00000164U

View File

@@ -18,7 +18,6 @@
#include "os_ops_gm20b.h" #include "os_ops_gm20b.h"
#include "os_ops_gp10b.h" #include "os_ops_gp10b.h"
#include "os_ops_gp106.h"
#include "os_ops_gv11b.h" #include "os_ops_gv11b.h"
#include "os_ops_gv100.h" #include "os_ops_gv100.h"
#include "os_ops_tu104.h" #include "os_ops_tu104.h"
@@ -36,9 +35,6 @@ int nvgpu_init_os_linux_ops(struct nvgpu_os_linux *l)
case NVGPU_GPUID_GP10B: case NVGPU_GPUID_GP10B:
nvgpu_gp10b_init_os_ops(l); nvgpu_gp10b_init_os_ops(l);
break; break;
case NVGPU_GPUID_GP106:
nvgpu_gp106_init_os_ops(l);
break;
case NVGPU_GPUID_GV100: case NVGPU_GPUID_GV100:
nvgpu_gv100_init_os_ops(l); nvgpu_gv100_init_os_ops(l);
break; break;

View File

@@ -1,40 +0,0 @@
/*
* Copyright (c) 2018, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os_linux.h"
#include "debug_clk_gp106.h"
#include "debug_therm_gp106.h"
#include "debug_fecs_trace.h"
static struct nvgpu_os_linux_ops gp106_os_linux_ops = {
.clk = {
.init_debugfs = gp106_clk_init_debugfs,
},
.therm = {
.init_debugfs = gp106_therm_init_debugfs,
},
.fecs_trace = {
.init_debugfs = nvgpu_fecs_trace_init_debugfs,
},
};
void nvgpu_gp106_init_os_ops(struct nvgpu_os_linux *l)
{
l->ops.clk = gp106_os_linux_ops.clk;
l->ops.therm = gp106_os_linux_ops.therm;
l->ops.fecs_trace = gp106_os_linux_ops.fecs_trace;
}

View File

@@ -1,22 +0,0 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_OS_OPS_GP106_H
#define __LINUX_OS_OPS_GP106_H
void nvgpu_gp106_init_os_ops(struct nvgpu_os_linux *l);
#endif

View File

@@ -73,154 +73,6 @@ static long nvgpu_pci_clk_round_rate(struct device *dev, unsigned long rate)
} }
static struct gk20a_platform nvgpu_pci_device[] = { static struct gk20a_platform nvgpu_pci_device[] = {
{ /* DEVICE=0x1c35 */
/* ptimer src frequency in hz */
.ptimer_src_freq = 31250000,
.probe = nvgpu_pci_tegra_probe,
.remove = nvgpu_pci_tegra_remove,
/* power management configuration */
.railgate_delay_init = 500,
.can_railgate_init = false,
.can_elpg_init = true,
.enable_elpg = true,
.enable_elcg = false,
.enable_slcg = true,
.enable_blcg = true,
.enable_mscg = true,
.can_slcg = true,
.can_blcg = true,
.can_elcg = true,
.disable_aspm = true,
/* power management callbacks */
.is_railgated = nvgpu_pci_tegra_is_railgated,
.clk_round_rate = nvgpu_pci_clk_round_rate,
.ch_wdt_timeout_ms = 7000,
.unify_address_spaces = true,
.honors_aperture = true,
.dma_mask = DMA_BIT_MASK(40),
.vbios_min_version = 0x86063000,
.hardcode_sw_threshold = true,
.ina3221_dcb_index = 0,
.ina3221_i2c_address = 0x84,
.ina3221_i2c_port = 0x2,
},
{ /* DEVICE=0x1c36 */
/* ptimer src frequency in hz */
.ptimer_src_freq = 31250000,
.probe = nvgpu_pci_tegra_probe,
.remove = nvgpu_pci_tegra_remove,
/* power management configuration */
.railgate_delay_init = 500,
.can_railgate_init = false,
.can_elpg_init = true,
.enable_elpg = true,
.enable_elcg = false,
.enable_slcg = true,
.enable_blcg = true,
.enable_mscg = true,
.can_slcg = true,
.can_blcg = true,
.can_elcg = true,
.disable_aspm = true,
/* power management callbacks */
.is_railgated = nvgpu_pci_tegra_is_railgated,
.clk_round_rate = nvgpu_pci_clk_round_rate,
.ch_wdt_timeout_ms = 7000,
.unify_address_spaces = true,
.honors_aperture = true,
.dma_mask = DMA_BIT_MASK(40),
.vbios_min_version = 0x86062d00,
.hardcode_sw_threshold = true,
.ina3221_dcb_index = 0,
.ina3221_i2c_address = 0x84,
.ina3221_i2c_port = 0x2,
},
{ /* DEVICE=0x1c37 */
/* ptimer src frequency in hz */
.ptimer_src_freq = 31250000,
.probe = nvgpu_pci_tegra_probe,
.remove = nvgpu_pci_tegra_remove,
/* power management configuration */
.railgate_delay_init = 500,
.can_railgate_init = false,
.can_elpg_init = true,
.enable_elpg = true,
.enable_elcg = false,
.enable_slcg = true,
.enable_blcg = true,
.enable_mscg = true,
.can_slcg = true,
.can_blcg = true,
.can_elcg = true,
.disable_aspm = true,
/* power management callbacks */
.is_railgated = nvgpu_pci_tegra_is_railgated,
.clk_round_rate = nvgpu_pci_clk_round_rate,
.ch_wdt_timeout_ms = 7000,
.unify_address_spaces = true,
.honors_aperture = true,
.dma_mask = DMA_BIT_MASK(40),
.vbios_min_version = 0x86063000,
.hardcode_sw_threshold = true,
.ina3221_dcb_index = 0,
.ina3221_i2c_address = 0x84,
.ina3221_i2c_port = 0x2,
},
{ /* DEVICE=0x1c75 */
/* ptimer src frequency in hz */
.ptimer_src_freq = 31250000,
.probe = nvgpu_pci_tegra_probe,
.remove = nvgpu_pci_tegra_remove,
/* power management configuration */
.railgate_delay_init = 500,
.can_railgate_init = false,
.can_elpg_init = true,
.enable_elpg = true,
.enable_elcg = false,
.enable_slcg = true,
.enable_blcg = true,
.enable_mscg = true,
.can_slcg = true,
.can_blcg = true,
.can_elcg = true,
.disable_aspm = true,
/* power management callbacks */
.is_railgated = nvgpu_pci_tegra_is_railgated,
.clk_round_rate = nvgpu_pci_clk_round_rate,
.ch_wdt_timeout_ms = 7000,
.unify_address_spaces = true,
.honors_aperture = true,
.dma_mask = DMA_BIT_MASK(40),
.vbios_min_version = 0x86065300,
.hardcode_sw_threshold = false,
.ina3221_dcb_index = 1,
.ina3221_i2c_address = 0x80,
.ina3221_i2c_port = 0x1,
},
{ /* DEVICE=PG503 SKU 201 */ { /* DEVICE=PG503 SKU 201 */
/* ptimer src frequency in hz */ /* ptimer src frequency in hz */
.ptimer_src_freq = 31250000, .ptimer_src_freq = 31250000,
@@ -440,70 +292,46 @@ static struct gk20a_platform nvgpu_pci_device[] = {
static struct pci_device_id nvgpu_pci_table[] = { static struct pci_device_id nvgpu_pci_table[] = {
{ {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1c35), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1db1),
.class = PCI_BASE_CLASS_DISPLAY << 16, .class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16, .class_mask = 0xff << 16,
.driver_data = 0, .driver_data = 0,
}, },
{ {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1c36), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1db0),
.class = PCI_BASE_CLASS_DISPLAY << 16, .class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16, .class_mask = 0xff << 16,
.driver_data = 1, .driver_data = 1,
}, },
{ {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1c37), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1dbe),
.class = PCI_BASE_CLASS_DISPLAY << 16, .class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16, .class_mask = 0xff << 16,
.driver_data = 2, .driver_data = 2,
}, },
{ {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1c75), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1df1),
.class = PCI_BASE_CLASS_DISPLAY << 16, .class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16, .class_mask = 0xff << 16,
.driver_data = 3, .driver_data = 3,
}, },
{ {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1db1), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1e3f),
.class = PCI_BASE_CLASS_DISPLAY << 16, .class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16, .class_mask = 0xff << 16,
.driver_data = 4, .driver_data = 4,
}, },
{ {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1db0), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1eba),
.class = PCI_BASE_CLASS_DISPLAY << 16, .class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16, .class_mask = 0xff << 16,
.driver_data = 5, .driver_data = 5,
}, },
{
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1dbe),
.class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16,
.driver_data = 6,
},
{
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1df1),
.class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16,
.driver_data = 7,
},
{
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1e3f),
.class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16,
.driver_data = 8,
},
{
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1eba),
.class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16,
.driver_data = 9,
},
{ {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1efa), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x1efa),
.class = PCI_BASE_CLASS_DISPLAY << 16, .class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16, .class_mask = 0xff << 16,
.driver_data = 9, .driver_data = 5,
}, },
{} {}
}; };