gpu: nvgpu: common/pmu: fix compile error of new compile flags

It's preparing to add bellow CFLAGS:
    -Werror -Wall -Wextra \
    -Wmissing-braces -Wpointer-arith -Wundef \
    -Wconversion -Wsign-conversion \
    -Wformat-security \
    -Wmissing-declarations -Wredundant-decls -Wimplicit-fallthrough

Jira GVSCI-11640

Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: Ide3ab484924bd5be976a9f335b55b136575ce428
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2555055
Reviewed-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-by: Aparna Das <aparnad@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Richard Zhao
2021-07-06 21:26:47 -07:00
committed by mobile promotions
parent a3ed73a57c
commit 851666b632
45 changed files with 313 additions and 98 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -70,6 +70,7 @@ void nvgpu_pmu_allocator_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
void nvgpu_pmu_allocator_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, void nvgpu_pmu_allocator_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
struct flcn_mem_desc_v0 *fb) struct flcn_mem_desc_v0 *fb)
{ {
(void)g;
fb->address.lo = u64_lo32(mem->gpu_va); fb->address.lo = u64_lo32(mem->gpu_va);
fb->address.hi = u64_hi32(mem->gpu_va); fb->address.hi = u64_hi32(mem->gpu_va);
fb->params = ((u32)mem->size & 0xFFFFFFU); fb->params = ((u32)mem->size & 0xFFFFFFU);

View File

@@ -22,6 +22,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/boardobjgrp.h> #include <nvgpu/boardobjgrp.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/super_surface.h> #include <nvgpu/pmu/super_surface.h>
#include <nvgpu/pmu/allocator.h> #include <nvgpu/pmu/allocator.h>
@@ -31,6 +32,8 @@
static int check_boardobjgrp_param(struct gk20a *g, static int check_boardobjgrp_param(struct gk20a *g,
struct boardobjgrp *pboardobjgrp) struct boardobjgrp *pboardobjgrp)
{ {
(void)g;
if (pboardobjgrp == NULL) { if (pboardobjgrp == NULL) {
return -EINVAL; return -EINVAL;
} }
@@ -137,7 +140,7 @@ static struct pmu_board_obj *obj_get_next_final(
/* Search from next element unless first object was requested */ /* Search from next element unless first object was requested */
index = (*currentindex != CTRL_BOARDOBJ_IDX_INVALID) ? index = (*currentindex != CTRL_BOARDOBJ_IDX_INVALID) ?
(*currentindex + 1U) : 0U; (u8)(*currentindex + 1U) : 0U;
/* For the cases below in which we have to return NULL */ /* For the cases below in which we have to return NULL */
*currentindex = CTRL_BOARDOBJ_IDX_INVALID; *currentindex = CTRL_BOARDOBJ_IDX_INVALID;
@@ -177,6 +180,9 @@ static int pmu_data_inst_get_stub(struct gk20a *g,
struct nv_pmu_boardobjgrp *boardobjgrppmu, struct nv_pmu_boardobjgrp *boardobjgrppmu,
struct nv_pmu_boardobj **pmu_obj, u8 idx) struct nv_pmu_boardobj **pmu_obj, u8 idx)
{ {
(void)boardobjgrppmu;
(void)pmu_obj;
(void)idx;
nvgpu_log_info(g, " "); nvgpu_log_info(g, " ");
return -EINVAL; return -EINVAL;
} }
@@ -186,6 +192,9 @@ static int pmu_status_inst_get_stub(struct gk20a *g,
void *pboardobjgrppmu, void *pboardobjgrppmu,
struct nv_pmu_boardobj_query **obj_pmu_status, u8 idx) struct nv_pmu_boardobj_query **obj_pmu_status, u8 idx)
{ {
(void)pboardobjgrppmu;
(void)obj_pmu_status;
(void)idx;
nvgpu_log_info(g, " "); nvgpu_log_info(g, " ");
return -EINVAL; return -EINVAL;
} }
@@ -302,6 +311,9 @@ static int is_pmu_cmd_id_valid(struct gk20a *g,
{ {
int err = 0; int err = 0;
(void)g;
(void)cmd;
if (pboardobjgrp->pmu.rpc_func_id == if (pboardobjgrp->pmu.rpc_func_id ==
BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID) { BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID) {
err = -EINVAL; err = -EINVAL;
@@ -608,6 +620,9 @@ int nvgpu_boardobjgrp_pmucmd_construct_impl(struct gk20a *g, struct boardobjgrp
*pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid, *pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid,
u16 hdrsize, u16 entrysize, u32 fbsize, u32 ss_offset, u8 rpc_func_id) u16 hdrsize, u16 entrysize, u32 fbsize, u32 ss_offset, u8 rpc_func_id)
{ {
(void)id;
(void)msgid;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
/* Copy the parameters into the CMD*/ /* Copy the parameters into the CMD*/
@@ -624,6 +639,8 @@ int nvgpu_boardobjgrp_pmu_hdr_data_init_super(struct gk20a *g, struct boardobjgr
*pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu, *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu,
struct boardobjgrpmask *mask) struct boardobjgrpmask *mask)
{ {
(void)mask;
nvgpu_log_info(g, " "); nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) { if (pboardobjgrp == NULL) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -80,7 +80,7 @@ int nvgpu_boardobjgrpmask_init(struct boardobjgrpmask *mask, u8 bitsize,
} }
mask->bitcount = bitsize; mask->bitcount = bitsize;
mask->maskdatacount = CTRL_BOARDOBJGRP_MASK_DATA_SIZE(bitsize); mask->maskdatacount = (u8)CTRL_BOARDOBJGRP_MASK_DATA_SIZE(bitsize);
mask->lastmaskfilter = U32(bitsize) % mask->lastmaskfilter = U32(bitsize) %
CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE; CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE;
@@ -145,7 +145,7 @@ u8 nvgpu_boardobjgrpmask_bit_set_count(struct boardobjgrpmask *mask)
u32 m = mask->data[index]; u32 m = mask->data[index];
NUMSETBITS_32(m); NUMSETBITS_32(m);
result += (u8)m; result = (u8)(result + m);
} }
return result; return result;
@@ -165,8 +165,8 @@ u8 nvgpu_boardobjgrpmask_bit_idx_highest(struct boardobjgrpmask *mask)
if (m != 0U) { if (m != 0U) {
HIGHESTBITIDX_32(m); HIGHESTBITIDX_32(m);
result = (u8)m + index * result = (u8)(m + index *
CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE; CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE);
break; break;
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -64,6 +64,8 @@ u32 nvgpu_pmu_clk_mon_init_domains(struct gk20a *g)
{ {
u32 domain_mask; u32 domain_mask;
(void)g;
domain_mask = (CTRL_CLK_DOMAIN_MCLK | domain_mask = (CTRL_CLK_DOMAIN_MCLK |
CTRL_CLK_DOMAIN_XBARCLK | CTRL_CLK_DOMAIN_XBARCLK |
CTRL_CLK_DOMAIN_SYSCLK | CTRL_CLK_DOMAIN_SYSCLK |

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -606,7 +606,7 @@ static int devinit_get_clocks_table_35(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&pclkdomainobjs->super.super, status = boardobjgrp_objinsert(&pclkdomainobjs->super.super,
(struct pmu_board_obj *)(void *) (struct pmu_board_obj *)(void *)
pclkdomain_dev, index); pclkdomain_dev, (u8)index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, nvgpu_err(g,
"unable to insert clock domain boardobj for %d", index); "unable to insert clock domain boardobj for %d", index);
@@ -1233,6 +1233,8 @@ static int clkdomainclkproglink_fixed(struct gk20a *g,
struct nvgpu_clk_pmupstate *pclk, struct nvgpu_clk_pmupstate *pclk,
struct nvgpu_clk_domain *pdomain) struct nvgpu_clk_domain *pdomain)
{ {
(void)pclk;
(void)pdomain;
nvgpu_log_info(g, " "); nvgpu_log_info(g, " ");
return 0; return 0;
} }
@@ -1650,7 +1652,7 @@ int nvgpu_pmu_clk_domain_get_from_index(struct gk20a *g, u32 *domain, u32 index)
struct nvgpu_clk_domain *clk_domain; struct nvgpu_clk_domain *clk_domain;
clk_domain = (struct nvgpu_clk_domain *) BOARDOBJGRP_OBJ_GET_BY_IDX( clk_domain = (struct nvgpu_clk_domain *) BOARDOBJGRP_OBJ_GET_BY_IDX(
&g->pmu->clk_pmu->clk_domainobjs->super.super, index); &g->pmu->clk_pmu->clk_domainobjs->super.super, (u8)index);
if (clk_domain == NULL) { if (clk_domain == NULL) {
return -EINVAL; return -EINVAL;
} }
@@ -1730,6 +1732,9 @@ int nvgpu_pmu_clk_domain_freq_to_volt(struct gk20a *g, u8 clkdomain_idx,
struct clk_vf_point *pclk_vf_point; struct clk_vf_point *pclk_vf_point;
u8 index; u8 index;
(void)clkdomain_idx;
(void)railidx;
nvgpu_log_info(g, " "); nvgpu_log_info(g, " ");
pclk_vf_points = g->pmu->clk_pmu->clk_vf_pointobjs; pclk_vf_points = g->pmu->clk_pmu->clk_vf_pointobjs;
pboardobjgrp = &pclk_vf_points->super.super; pboardobjgrp = &pclk_vf_points->super.super;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -125,6 +125,8 @@ static int _clk_fll_devgrp_pmustatus_instget(struct gk20a *g,
(struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status *) (struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status *)
pboardobjgrppmu; pboardobjgrppmu;
(void)g;
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0U) { pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0U) {
@@ -400,7 +402,7 @@ static int devinit_get_fll_device_table(struct gk20a *g,
pfll_dev = construct_fll_device(g, (void *)&fll_dev_data); pfll_dev = construct_fll_device(g, (void *)&fll_dev_data);
status = boardobjgrp_objinsert(&pfllobjs->super.super, status = boardobjgrp_objinsert(&pfllobjs->super.super,
(struct pmu_board_obj *)pfll_dev, index); (struct pmu_board_obj *)pfll_dev, (u8)index);
fll_tbl_entry_ptr += fll_desc_table_header.entry_size; fll_tbl_entry_ptr += fll_desc_table_header.entry_size;
} }
@@ -430,6 +432,9 @@ static int lutbroadcastslaveregister(struct gk20a *g,
struct clk_avfs_fll_objs *pfllobjs, struct fll_device *pfll, struct clk_avfs_fll_objs *pfllobjs, struct fll_device *pfll,
struct fll_device *pfll_slave) struct fll_device *pfll_slave)
{ {
(void)g;
(void)pfllobjs;
if (pfll->clk_domain != pfll_slave->clk_domain) { if (pfll->clk_domain != pfll_slave->clk_domain) {
return -EINVAL; return -EINVAL;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -437,7 +437,7 @@ static int devinit_get_clk_prog_table_35(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&pclkprogobjs->super.super, status = boardobjgrp_objinsert(&pclkprogobjs->super.super,
(struct pmu_board_obj *)(void *)pprog, i); (struct pmu_board_obj *)(void *)pprog, (u8)i);
if (status != 0) { if (status != 0) {
nvgpu_err(g, "error adding clk_prog boardobj %d", i); nvgpu_err(g, "error adding clk_prog boardobj %d", i);
status = -EINVAL; status = -EINVAL;
@@ -646,6 +646,8 @@ static int _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g,
struct clk_vf_point *p_vf_point; struct clk_vf_point *p_vf_point;
int status; int status;
(void)p1xmaster;
nvgpu_log_info(g, " "); nvgpu_log_info(g, " ");
p_vf_point = nvgpu_construct_clk_vf_point(g, (void *)p_vf_point_tmp); p_vf_point = nvgpu_construct_clk_vf_point(g, (void *)p_vf_point_tmp);
@@ -1010,6 +1012,8 @@ static int vfflatten_prog_1x_master(struct gk20a *g,
u8 vf_point_idx; u8 vf_point_idx;
u8 vf_rail_idx; u8 vf_rail_idx;
(void)clk_domain_idx;
nvgpu_log_info(g, " "); nvgpu_log_info(g, " ");
(void) memset(&vf_point_data, 0x0, sizeof(vf_point_data)); (void) memset(&vf_point_data, 0x0, sizeof(vf_point_data));
@@ -1041,7 +1045,7 @@ static int vfflatten_prog_1x_master(struct gk20a *g,
freq_step_size_mhz = source_pll->freq_step_size_mhz; freq_step_size_mhz = source_pll->freq_step_size_mhz;
step_count = (freq_step_size_mhz == 0U) ? 0U : step_count = (freq_step_size_mhz == 0U) ? 0U :
(u8)(p1xmaster->super.freq_max_mhz - (u8)(p1xmaster->super.freq_max_mhz -
*pfreqmaxlastmhz - 1U) / *pfreqmaxlastmhz - 1) /
freq_step_size_mhz; freq_step_size_mhz;
/* Intentional fall-through.*/ /* Intentional fall-through.*/
@@ -1050,9 +1054,9 @@ static int vfflatten_prog_1x_master(struct gk20a *g,
CTRL_CLK_CLK_VF_POINT_TYPE_35_FREQ; CTRL_CLK_CLK_VF_POINT_TYPE_35_FREQ;
do { do {
vf_point_data.vf_point.pair.freq_mhz = vf_point_data.vf_point.pair.freq_mhz =
p1xmaster->super.freq_max_mhz - (u16)(p1xmaster->super.freq_max_mhz -
U16(step_count) * U16(step_count) *
U16(freq_step_size_mhz); U16(freq_step_size_mhz));
status = _clk_prog_1x_master_rail_construct_vf_point(g, pclk, status = _clk_prog_1x_master_rail_construct_vf_point(g, pclk,
p1xmaster, p_vf_rail, p1xmaster, p_vf_rail,
@@ -1162,7 +1166,7 @@ static int vflookup_prog_1x_master(struct gk20a *g,
if (i == slaveentrycount) { if (i == slaveentrycount) {
return -EINVAL; return -EINVAL;
} }
clkmhz = (clkmhz * 100U)/pslaveents->ratio; clkmhz = (u16)((clkmhz * 100U)/pslaveents->ratio);
} else { } else {
/* only support ratio for now */ /* only support ratio for now */
return -EINVAL; return -EINVAL;
@@ -1239,7 +1243,7 @@ static int vflookup_prog_1x_master(struct gk20a *g,
if (i == slaveentrycount) { if (i == slaveentrycount) {
return -EINVAL; return -EINVAL;
} }
clkmhz = (clkmhz * pslaveents->ratio)/100U; clkmhz = (u16)((clkmhz * pslaveents->ratio)/100);
} else { } else {
/* only support ratio for now */ /* only support ratio for now */
return -EINVAL; return -EINVAL;
@@ -1266,6 +1270,8 @@ static int getfpoints_prog_1x_master(struct gk20a *g,
u8 j; u8 j;
u32 fpointscount = 0; u32 fpointscount = 0;
(void)g;
if (pfpointscount == NULL) { if (pfpointscount == NULL) {
return -EINVAL; return -EINVAL;
} }
@@ -1352,7 +1358,7 @@ static int getslaveclk_prog_1x_master(struct gk20a *g,
if (i == slaveentrycount) { if (i == slaveentrycount) {
return -EINVAL; return -EINVAL;
} }
*pclkmhz = (masterclkmhz * pslaveents->ratio)/100U; *pclkmhz = (u16)((masterclkmhz * pslaveents->ratio)/100);
} else { } else {
/* only support ratio for now */ /* only support ratio for now */
return -EINVAL; return -EINVAL;
@@ -1374,9 +1380,9 @@ static int getslaveclk_prog_1x_master(struct gk20a *g,
if (i == slaveentrycount) { if (i == slaveentrycount) {
return -EINVAL; return -EINVAL;
} }
*pclkmhz = (masterclkmhz * pslaveents->ratio)/100U; *pclkmhz = (u16)((masterclkmhz * pslaveents->ratio)/100);
/* Floor/Quantize all the slave clocks to the multiple of step size*/ /* Floor/Quantize all the slave clocks to the multiple of step size*/
*pclkmhz = (*pclkmhz / FREQ_STEP_SIZE_MHZ) * FREQ_STEP_SIZE_MHZ; *pclkmhz = (u16)((*pclkmhz / FREQ_STEP_SIZE_MHZ) * FREQ_STEP_SIZE_MHZ);
*ratio = pslaveents->ratio; *ratio = pslaveents->ratio;
} else { } else {
/* only support ratio for now */ /* only support ratio for now */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -112,6 +112,8 @@ static int _clk_vf_points_pmustatus_instget(struct gk20a *g,
(struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status *) (struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status *)
pboardobjgrppmu; pboardobjgrppmu;
(void)g;
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) { if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) {
return -EINVAL; return -EINVAL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -145,6 +145,8 @@ static int _clk_vin_devgrp_pmustatus_instget(struct gk20a *g,
(struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status *) (struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status *)
pboardobjgrppmu; pboardobjgrppmu;
(void)g;
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0U) { pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0U) {
@@ -326,7 +328,7 @@ static int devinit_get_vin_device_table(struct gk20a *g,
pvin_dev = construct_vin_device(g, (void *)&vin_device_data); pvin_dev = construct_vin_device(g, (void *)&vin_device_data);
status = boardobjgrp_objinsert(&pvinobjs->super.super, status = boardobjgrp_objinsert(&pvinobjs->super.super,
(struct pmu_board_obj *)pvin_dev, index); (struct pmu_board_obj *)pvin_dev, (u8)index);
vin_tbl_entry_ptr += vin_desc_table_header.entry_size; vin_tbl_entry_ptr += vin_desc_table_header.entry_size;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -84,6 +84,9 @@ void nvgpu_pmu_fw_state_change(struct gk20a *g, struct nvgpu_pmu *pmu,
u32 nvgpu_pmu_get_fw_state(struct gk20a *g, struct nvgpu_pmu *pmu) u32 nvgpu_pmu_get_fw_state(struct gk20a *g, struct nvgpu_pmu *pmu)
{ {
u32 state = pmu->fw->state; u32 state = pmu->fw->state;
(void)g;
nvgpu_smp_rmb(); nvgpu_smp_rmb();
return state; return state;
@@ -92,6 +95,7 @@ u32 nvgpu_pmu_get_fw_state(struct gk20a *g, struct nvgpu_pmu *pmu)
void nvgpu_pmu_set_fw_ready(struct gk20a *g, struct nvgpu_pmu *pmu, void nvgpu_pmu_set_fw_ready(struct gk20a *g, struct nvgpu_pmu *pmu,
bool status) bool status)
{ {
(void)g;
nvgpu_smp_wmb(); nvgpu_smp_wmb();
pmu->fw->ready = status; pmu->fw->ready = status;
} }
@@ -99,6 +103,9 @@ void nvgpu_pmu_set_fw_ready(struct gk20a *g, struct nvgpu_pmu *pmu,
bool nvgpu_pmu_get_fw_ready(struct gk20a *g, struct nvgpu_pmu *pmu) bool nvgpu_pmu_get_fw_ready(struct gk20a *g, struct nvgpu_pmu *pmu)
{ {
bool state = pmu->fw->ready; bool state = pmu->fw->ready;
(void)g;
nvgpu_smp_rmb(); nvgpu_smp_rmb();
return state; return state;
@@ -194,18 +201,21 @@ static void pmu_fw_release(struct gk20a *g, struct pmu_rtos_fw *rtos_fw)
struct nvgpu_firmware *nvgpu_pmu_fw_sig_desc(struct gk20a *g, struct nvgpu_firmware *nvgpu_pmu_fw_sig_desc(struct gk20a *g,
struct nvgpu_pmu *pmu) struct nvgpu_pmu *pmu)
{ {
(void)g;
return pmu->fw->fw_sig; return pmu->fw->fw_sig;
} }
struct nvgpu_firmware *nvgpu_pmu_fw_desc_desc(struct gk20a *g, struct nvgpu_firmware *nvgpu_pmu_fw_desc_desc(struct gk20a *g,
struct nvgpu_pmu *pmu) struct nvgpu_pmu *pmu)
{ {
(void)g;
return pmu->fw->fw_desc; return pmu->fw->fw_desc;
} }
struct nvgpu_firmware *nvgpu_pmu_fw_image_desc(struct gk20a *g, struct nvgpu_firmware *nvgpu_pmu_fw_image_desc(struct gk20a *g,
struct nvgpu_pmu *pmu) struct nvgpu_pmu *pmu)
{ {
(void)g;
return pmu->fw->fw_image; return pmu->fw->fw_image;
} }
@@ -339,6 +349,8 @@ exit:
void nvgpu_pmu_fw_deinit(struct gk20a *g, struct nvgpu_pmu *pmu, void nvgpu_pmu_fw_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
struct pmu_rtos_fw *rtos_fw) struct pmu_rtos_fw *rtos_fw)
{ {
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (rtos_fw == NULL) { if (rtos_fw == NULL) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -52,6 +52,7 @@
/* PMU version specific functions */ /* PMU version specific functions */
static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu)
{ {
(void)pmu;
return (u32)sizeof(struct pmu_perfmon_counter_v2); return (u32)sizeof(struct pmu_perfmon_counter_v2);
} }
@@ -95,6 +96,7 @@ static void pmu_set_cmd_line_args_trace_dma_base_v4(struct nvgpu_pmu *pmu)
static u32 pmu_cmd_line_size_v4(struct nvgpu_pmu *pmu) static u32 pmu_cmd_line_size_v4(struct nvgpu_pmu *pmu)
{ {
(void)pmu;
return (u32)sizeof(struct pmu_cmdline_args_v4); return (u32)sizeof(struct pmu_cmdline_args_v4);
} }
@@ -120,16 +122,19 @@ static void pmu_set_cmd_line_args_trace_dma_idx_v4(
static u32 pmu_cmd_line_size_v6(struct nvgpu_pmu *pmu) static u32 pmu_cmd_line_size_v6(struct nvgpu_pmu *pmu)
{ {
(void)pmu;
return (u32)sizeof(struct pmu_cmdline_args_v6); return (u32)sizeof(struct pmu_cmdline_args_v6);
} }
static u32 pmu_cmd_line_size_v7(struct nvgpu_pmu *pmu) static u32 pmu_cmd_line_size_v7(struct nvgpu_pmu *pmu)
{ {
(void)pmu;
return (u32)sizeof(struct pmu_cmdline_args_v7); return (u32)sizeof(struct pmu_cmdline_args_v7);
} }
static void pmu_set_cmd_line_args_cpu_freq_v5(struct nvgpu_pmu *pmu, u32 freq) static void pmu_set_cmd_line_args_cpu_freq_v5(struct nvgpu_pmu *pmu, u32 freq)
{ {
(void)freq;
pmu->fw->args_v5.cpu_freq_hz = 204000000; pmu->fw->args_v5.cpu_freq_hz = 204000000;
} }
static void pmu_set_cmd_line_args_secure_mode_v5(struct nvgpu_pmu *pmu, u8 val) static void pmu_set_cmd_line_args_secure_mode_v5(struct nvgpu_pmu *pmu, u8 val)
@@ -140,6 +145,8 @@ static void pmu_set_cmd_line_args_secure_mode_v5(struct nvgpu_pmu *pmu, u8 val)
static void pmu_set_cmd_line_args_trace_size_v5( static void pmu_set_cmd_line_args_trace_size_v5(
struct nvgpu_pmu *pmu, u32 size) struct nvgpu_pmu *pmu, u32 size)
{ {
(void)pmu;
(void)size;
/* set by surface describe */ /* set by surface describe */
} }
@@ -176,11 +183,14 @@ static void config_cmd_line_args_super_surface_v7(struct nvgpu_pmu *pmu)
static void pmu_set_cmd_line_args_trace_dma_idx_v5( static void pmu_set_cmd_line_args_trace_dma_idx_v5(
struct nvgpu_pmu *pmu, u32 idx) struct nvgpu_pmu *pmu, u32 idx)
{ {
(void)pmu;
(void)idx;
/* set by surface describe */ /* set by surface describe */
} }
static u32 pmu_cmd_line_size_v3(struct nvgpu_pmu *pmu) static u32 pmu_cmd_line_size_v3(struct nvgpu_pmu *pmu)
{ {
(void)pmu;
return (u32)sizeof(struct pmu_cmdline_args_v3); return (u32)sizeof(struct pmu_cmdline_args_v3);
} }
@@ -228,16 +238,19 @@ static void *pmu_get_cmd_line_args_ptr_v5(struct nvgpu_pmu *pmu)
static u32 pmu_get_allocation_size_v3(struct nvgpu_pmu *pmu) static u32 pmu_get_allocation_size_v3(struct nvgpu_pmu *pmu)
{ {
(void)pmu;
return (u32)sizeof(struct pmu_allocation_v3); return (u32)sizeof(struct pmu_allocation_v3);
} }
static u32 pmu_get_allocation_size_v2(struct nvgpu_pmu *pmu) static u32 pmu_get_allocation_size_v2(struct nvgpu_pmu *pmu)
{ {
(void)pmu;
return (u32)sizeof(struct pmu_allocation_v2); return (u32)sizeof(struct pmu_allocation_v2);
} }
static u32 pmu_get_allocation_size_v1(struct nvgpu_pmu *pmu) static u32 pmu_get_allocation_size_v1(struct nvgpu_pmu *pmu)
{ {
(void)pmu;
return (u32)sizeof(struct pmu_allocation_v1); return (u32)sizeof(struct pmu_allocation_v1);
} }
@@ -247,6 +260,7 @@ static void pmu_set_allocation_ptr_v3(struct nvgpu_pmu *pmu,
struct pmu_allocation_v3 **pmu_a_ptr = struct pmu_allocation_v3 **pmu_a_ptr =
(struct pmu_allocation_v3 **)pmu_alloc_ptr; (struct pmu_allocation_v3 **)pmu_alloc_ptr;
(void)pmu;
*pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr;
} }
@@ -256,6 +270,7 @@ static void pmu_set_allocation_ptr_v2(struct nvgpu_pmu *pmu,
struct pmu_allocation_v2 **pmu_a_ptr = struct pmu_allocation_v2 **pmu_a_ptr =
(struct pmu_allocation_v2 **)pmu_alloc_ptr; (struct pmu_allocation_v2 **)pmu_alloc_ptr;
(void)pmu;
*pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr;
} }
@@ -265,6 +280,7 @@ static void pmu_set_allocation_ptr_v1(struct nvgpu_pmu *pmu,
struct pmu_allocation_v1 **pmu_a_ptr = struct pmu_allocation_v1 **pmu_a_ptr =
(struct pmu_allocation_v1 **)pmu_alloc_ptr; (struct pmu_allocation_v1 **)pmu_alloc_ptr;
(void)pmu;
*pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr;
} }
@@ -274,6 +290,7 @@ static void pmu_allocation_set_dmem_size_v3(struct nvgpu_pmu *pmu,
struct pmu_allocation_v3 *pmu_a_ptr = struct pmu_allocation_v3 *pmu_a_ptr =
(struct pmu_allocation_v3 *)pmu_alloc_ptr; (struct pmu_allocation_v3 *)pmu_alloc_ptr;
(void)pmu;
pmu_a_ptr->alloc.dmem.size = size; pmu_a_ptr->alloc.dmem.size = size;
} }
@@ -283,6 +300,7 @@ static void pmu_allocation_set_dmem_size_v2(struct nvgpu_pmu *pmu,
struct pmu_allocation_v2 *pmu_a_ptr = struct pmu_allocation_v2 *pmu_a_ptr =
(struct pmu_allocation_v2 *)pmu_alloc_ptr; (struct pmu_allocation_v2 *)pmu_alloc_ptr;
(void)pmu;
pmu_a_ptr->alloc.dmem.size = size; pmu_a_ptr->alloc.dmem.size = size;
} }
@@ -292,6 +310,7 @@ static void pmu_allocation_set_dmem_size_v1(struct nvgpu_pmu *pmu,
struct pmu_allocation_v1 *pmu_a_ptr = struct pmu_allocation_v1 *pmu_a_ptr =
(struct pmu_allocation_v1 *)pmu_alloc_ptr; (struct pmu_allocation_v1 *)pmu_alloc_ptr;
(void)pmu;
pmu_a_ptr->alloc.dmem.size = size; pmu_a_ptr->alloc.dmem.size = size;
} }
@@ -301,6 +320,7 @@ static u16 pmu_allocation_get_dmem_size_v3(struct nvgpu_pmu *pmu,
struct pmu_allocation_v3 *pmu_a_ptr = struct pmu_allocation_v3 *pmu_a_ptr =
(struct pmu_allocation_v3 *)pmu_alloc_ptr; (struct pmu_allocation_v3 *)pmu_alloc_ptr;
(void)pmu;
return pmu_a_ptr->alloc.dmem.size; return pmu_a_ptr->alloc.dmem.size;
} }
@@ -310,6 +330,7 @@ static u16 pmu_allocation_get_dmem_size_v2(struct nvgpu_pmu *pmu,
struct pmu_allocation_v2 *pmu_a_ptr = struct pmu_allocation_v2 *pmu_a_ptr =
(struct pmu_allocation_v2 *)pmu_alloc_ptr; (struct pmu_allocation_v2 *)pmu_alloc_ptr;
(void)pmu;
return pmu_a_ptr->alloc.dmem.size; return pmu_a_ptr->alloc.dmem.size;
} }
@@ -319,6 +340,7 @@ static u16 pmu_allocation_get_dmem_size_v1(struct nvgpu_pmu *pmu,
struct pmu_allocation_v1 *pmu_a_ptr = struct pmu_allocation_v1 *pmu_a_ptr =
(struct pmu_allocation_v1 *)pmu_alloc_ptr; (struct pmu_allocation_v1 *)pmu_alloc_ptr;
(void)pmu;
return pmu_a_ptr->alloc.dmem.size; return pmu_a_ptr->alloc.dmem.size;
} }
@@ -328,6 +350,7 @@ static u32 pmu_allocation_get_dmem_offset_v3(struct nvgpu_pmu *pmu,
struct pmu_allocation_v3 *pmu_a_ptr = struct pmu_allocation_v3 *pmu_a_ptr =
(struct pmu_allocation_v3 *)pmu_alloc_ptr; (struct pmu_allocation_v3 *)pmu_alloc_ptr;
(void)pmu;
return pmu_a_ptr->alloc.dmem.offset; return pmu_a_ptr->alloc.dmem.offset;
} }
@@ -337,6 +360,7 @@ static u32 pmu_allocation_get_dmem_offset_v2(struct nvgpu_pmu *pmu,
struct pmu_allocation_v2 *pmu_a_ptr = struct pmu_allocation_v2 *pmu_a_ptr =
(struct pmu_allocation_v2 *)pmu_alloc_ptr; (struct pmu_allocation_v2 *)pmu_alloc_ptr;
(void)pmu;
return pmu_a_ptr->alloc.dmem.offset; return pmu_a_ptr->alloc.dmem.offset;
} }
@@ -346,6 +370,7 @@ static u32 pmu_allocation_get_dmem_offset_v1(struct nvgpu_pmu *pmu,
struct pmu_allocation_v1 *pmu_a_ptr = struct pmu_allocation_v1 *pmu_a_ptr =
(struct pmu_allocation_v1 *)pmu_alloc_ptr; (struct pmu_allocation_v1 *)pmu_alloc_ptr;
(void)pmu;
return pmu_a_ptr->alloc.dmem.offset; return pmu_a_ptr->alloc.dmem.offset;
} }
@@ -355,6 +380,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct nvgpu_pmu *pmu,
struct pmu_allocation_v3 *pmu_a_ptr = struct pmu_allocation_v3 *pmu_a_ptr =
(struct pmu_allocation_v3 *)pmu_alloc_ptr; (struct pmu_allocation_v3 *)pmu_alloc_ptr;
(void)pmu;
return &pmu_a_ptr->alloc.dmem.offset; return &pmu_a_ptr->alloc.dmem.offset;
} }
@@ -364,6 +390,7 @@ static void *pmu_allocation_get_fb_addr_v3(
struct pmu_allocation_v3 *pmu_a_ptr = struct pmu_allocation_v3 *pmu_a_ptr =
(struct pmu_allocation_v3 *)pmu_alloc_ptr; (struct pmu_allocation_v3 *)pmu_alloc_ptr;
(void)pmu;
return (void *)&pmu_a_ptr->alloc.fb; return (void *)&pmu_a_ptr->alloc.fb;
} }
@@ -373,6 +400,7 @@ static u32 pmu_allocation_get_fb_size_v3(
struct pmu_allocation_v3 *pmu_a_ptr = struct pmu_allocation_v3 *pmu_a_ptr =
(struct pmu_allocation_v3 *)pmu_alloc_ptr; (struct pmu_allocation_v3 *)pmu_alloc_ptr;
(void)pmu;
return (u32)sizeof(pmu_a_ptr->alloc.fb); return (u32)sizeof(pmu_a_ptr->alloc.fb);
} }
@@ -382,6 +410,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct nvgpu_pmu *pmu,
struct pmu_allocation_v2 *pmu_a_ptr = struct pmu_allocation_v2 *pmu_a_ptr =
(struct pmu_allocation_v2 *)pmu_alloc_ptr; (struct pmu_allocation_v2 *)pmu_alloc_ptr;
(void)pmu;
return &pmu_a_ptr->alloc.dmem.offset; return &pmu_a_ptr->alloc.dmem.offset;
} }
@@ -391,6 +420,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct nvgpu_pmu *pmu,
struct pmu_allocation_v1 *pmu_a_ptr = struct pmu_allocation_v1 *pmu_a_ptr =
(struct pmu_allocation_v1 *)pmu_alloc_ptr; (struct pmu_allocation_v1 *)pmu_alloc_ptr;
(void)pmu;
return &pmu_a_ptr->alloc.dmem.offset; return &pmu_a_ptr->alloc.dmem.offset;
} }
@@ -400,6 +430,7 @@ static void pmu_allocation_set_dmem_offset_v3(struct nvgpu_pmu *pmu,
struct pmu_allocation_v3 *pmu_a_ptr = struct pmu_allocation_v3 *pmu_a_ptr =
(struct pmu_allocation_v3 *)pmu_alloc_ptr; (struct pmu_allocation_v3 *)pmu_alloc_ptr;
(void)pmu;
pmu_a_ptr->alloc.dmem.offset = offset; pmu_a_ptr->alloc.dmem.offset = offset;
} }
@@ -409,6 +440,7 @@ static void pmu_allocation_set_dmem_offset_v2(struct nvgpu_pmu *pmu,
struct pmu_allocation_v2 *pmu_a_ptr = struct pmu_allocation_v2 *pmu_a_ptr =
(struct pmu_allocation_v2 *)pmu_alloc_ptr; (struct pmu_allocation_v2 *)pmu_alloc_ptr;
(void)pmu;
pmu_a_ptr->alloc.dmem.offset = offset; pmu_a_ptr->alloc.dmem.offset = offset;
} }
@@ -418,6 +450,7 @@ static void pmu_allocation_set_dmem_offset_v1(struct nvgpu_pmu *pmu,
struct pmu_allocation_v1 *pmu_a_ptr = struct pmu_allocation_v1 *pmu_a_ptr =
(struct pmu_allocation_v1 *)pmu_alloc_ptr; (struct pmu_allocation_v1 *)pmu_alloc_ptr;
(void)pmu;
pmu_a_ptr->alloc.dmem.offset = offset; pmu_a_ptr->alloc.dmem.offset = offset;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -202,7 +202,7 @@ static int pmu_payload_allocate(struct gk20a *g, struct pmu_sequence *seq,
nvgpu_pmu_seq_set_fbq_out_offset(seq, buffer_size); nvgpu_pmu_seq_set_fbq_out_offset(seq, buffer_size);
/* Save target address in FBQ work buffer. */ /* Save target address in FBQ work buffer. */
alloc->dmem_offset = buffer_size; alloc->dmem_offset = buffer_size;
buffer_size += alloc->dmem_size; buffer_size = (u16)(buffer_size + alloc->dmem_size);
nvgpu_pmu_seq_set_buffer_size(seq, buffer_size); nvgpu_pmu_seq_set_buffer_size(seq, buffer_size);
} else { } else {
tmp = nvgpu_alloc(&pmu->dmem, alloc->dmem_size); tmp = nvgpu_alloc(&pmu->dmem, alloc->dmem_size);
@@ -235,8 +235,8 @@ static int pmu_cmd_payload_setup_rpc(struct gk20a *g, struct pmu_cmd *cmd,
(void) memset(&alloc, 0, sizeof(struct falcon_payload_alloc)); (void) memset(&alloc, 0, sizeof(struct falcon_payload_alloc));
alloc.dmem_size = payload->rpc.size_rpc + alloc.dmem_size = (u16)(payload->rpc.size_rpc +
payload->rpc.size_scratch; payload->rpc.size_scratch);
err = pmu_payload_allocate(g, seq, &alloc); err = pmu_payload_allocate(g, seq, &alloc);
if (err != 0) { if (err != 0) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -550,7 +550,7 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
nvgpu_pmu_dbg(g, "ctrl_flags = 0x%08x, seq_id = 0x%08x", nvgpu_pmu_dbg(g, "ctrl_flags = 0x%08x, seq_id = 0x%08x",
msg.hdr.ctrl_flags, msg.hdr.seq_id); msg.hdr.ctrl_flags, msg.hdr.seq_id);
msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK; msg.hdr.ctrl_flags &= (u8)(~PMU_CMD_FLAGS_PMU_MASK);
if ((msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) || if ((msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) ||
(msg.hdr.ctrl_flags == PMU_CMD_FLAGS_RPC_EVENT)) { (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_RPC_EVENT)) {
@@ -617,6 +617,8 @@ void nvgpu_pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
struct rpc_handler_payload *rpc_payload = struct rpc_handler_payload *rpc_payload =
(struct rpc_handler_payload *)param; (struct rpc_handler_payload *)param;
(void)status;
if (nvgpu_can_busy(g) == 0) { if (nvgpu_can_busy(g) == 0) {
return; return;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,7 @@
#include <nvgpu/kmem.h> #include <nvgpu/kmem.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/string.h>
struct nvgpu_pmu; struct nvgpu_pmu;
@@ -34,6 +35,8 @@ void nvgpu_pmu_sequences_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
{ {
u32 i; u32 i;
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
(void) memset(sequences->seq, 0, (void) memset(sequences->seq, 0,
@@ -52,6 +55,8 @@ int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu,
int err = 0; int err = 0;
struct pmu_sequences *sequences; struct pmu_sequences *sequences;
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (*sequences_p != NULL) { if (*sequences_p != NULL) {
@@ -85,6 +90,8 @@ exit:
void nvgpu_pmu_sequences_deinit(struct gk20a *g, struct nvgpu_pmu *pmu, void nvgpu_pmu_sequences_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
struct pmu_sequences *sequences) struct pmu_sequences *sequences)
{ {
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (sequences == NULL) { if (sequences == NULL) {
@@ -145,6 +152,8 @@ void nvgpu_pmu_seq_release(struct gk20a *g,
struct pmu_sequences *sequences, struct pmu_sequences *sequences,
struct pmu_sequence *seq) struct pmu_sequence *seq)
{ {
(void)g;
seq->state = PMU_SEQ_STATE_FREE; seq->state = PMU_SEQ_STATE_FREE;
seq->callback = NULL; seq->callback = NULL;
seq->cb_params = NULL; seq->cb_params = NULL;
@@ -258,6 +267,6 @@ void nvgpu_pmu_seq_callback(struct gk20a *g, struct pmu_sequence *seq,
struct pmu_msg *msg, int err) struct pmu_msg *msg, int err)
{ {
if (seq->callback != NULL) { if (seq->callback != NULL) {
seq->callback(g, msg, seq->cb_params, err); seq->callback(g, msg, seq->cb_params, (u32)err);
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -24,6 +24,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/pmu/lpwr.h> #include <nvgpu/pmu/lpwr.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/pmu_pstate.h> #include <nvgpu/pmu/pmu_pstate.h>

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -41,6 +41,7 @@
static bool is_lsfm_supported(struct gk20a *g, static bool is_lsfm_supported(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm) struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm)
{ {
(void)pmu;
/* /*
* Low secure falcon manager is a secure iGPU functionality to support * Low secure falcon manager is a secure iGPU functionality to support
* Lazy bootstrap feature. Enabling lsfm will allow nvgpu to send cmds * Lazy bootstrap feature. Enabling lsfm will allow nvgpu to send cmds

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -24,6 +24,7 @@
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/lsfm.h> #include <nvgpu/pmu/lsfm.h>
#include <nvgpu/pmu/fw.h> #include <nvgpu/pmu/fw.h>
@@ -36,6 +37,9 @@ static void lsfm_handle_acr_init_wpr_region_msg(struct gk20a *g,
{ {
struct nvgpu_pmu *pmu = g->pmu; struct nvgpu_pmu *pmu = g->pmu;
(void)param;
(void)status;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); nvgpu_pmu_dbg(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
@@ -76,6 +80,9 @@ void gm20b_pmu_lsfm_handle_bootstrap_falcon_msg(struct gk20a *g,
{ {
struct nvgpu_pmu *pmu = g->pmu; struct nvgpu_pmu *pmu = g->pmu;
(void)param;
(void)status;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); nvgpu_pmu_dbg(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
@@ -179,7 +186,7 @@ int gm20b_pmu_lsfm_pmu_cmd_line_args_copy(struct gk20a *g,
/* Copying pmu cmdline args */ /* Copying pmu cmdline args */
pmu->fw->ops.set_cmd_line_args_cpu_freq(pmu, pmu->fw->ops.set_cmd_line_args_cpu_freq(pmu,
g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK)); (u32)g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
pmu->fw->ops.set_cmd_line_args_secure_mode(pmu, 1U); pmu->fw->ops.set_cmd_line_args_secure_mode(pmu, 1U);
pmu->fw->ops.set_cmd_line_args_trace_size( pmu->fw->ops.set_cmd_line_args_trace_size(
pmu, PMU_RTOS_TRACE_BUFSIZE); pmu, PMU_RTOS_TRACE_BUFSIZE);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -24,6 +24,7 @@
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/lsfm.h> #include <nvgpu/pmu/lsfm.h>

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/lsfm.h> #include <nvgpu/pmu/lsfm.h>
#include <nvgpu/pmu/fw.h> #include <nvgpu/pmu/fw.h>

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
#include <nvgpu/boardobjgrp_e32.h> #include <nvgpu/boardobjgrp_e32.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/clk/clk.h> #include <nvgpu/pmu/clk/clk.h>
#include <nvgpu/pmu/perf.h> #include <nvgpu/pmu/perf.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
@@ -134,7 +135,7 @@ static void build_change_seq_boot (struct gk20a *g)
/* Assume everything is P0 - Need to find the index for P0 */ /* Assume everything is P0 - Need to find the index for P0 */
script_last->buf.change.data.pstate_index = script_last->buf.change.data.pstate_index =
perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0); (u32)perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0);
nvgpu_mem_wr_n(g, nvgpu_pmu_super_surface_mem(g, nvgpu_mem_wr_n(g, nvgpu_pmu_super_surface_mem(g,
pmu, pmu->super_surface), pmu, pmu->super_surface),
@@ -213,7 +214,7 @@ int perf_change_seq_pmu_setup(struct gk20a *g)
/* Assume everything is P0 - Need to find the index for P0 */ /* Assume everything is P0 - Need to find the index for P0 */
perf_change_seq_pmu->script_last.buf.change.data.pstate_index = perf_change_seq_pmu->script_last.buf.change.data.pstate_index =
perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0);; (u32)perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0);
nvgpu_mem_wr_n(g, nvgpu_pmu_super_surface_mem(g, nvgpu_mem_wr_n(g, nvgpu_pmu_super_surface_mem(g,
pmu, pmu->super_surface), pmu, pmu->super_surface),
@@ -254,7 +255,7 @@ int nvgpu_pmu_perf_changeseq_set_clks(struct gk20a *g,
vf_point, &change_input.clk); vf_point, &change_input.clk);
change_input.pstate_index = change_input.pstate_index =
perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0); (u32)perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0);
change_input.flags = (u32)CTRL_PERF_CHANGE_SEQ_CHANGE_FORCE; change_input.flags = (u32)CTRL_PERF_CHANGE_SEQ_CHANGE_FORCE;
change_input.vf_points_cache_counter = 0xFFFFFFFFU; change_input.vf_points_cache_counter = 0xFFFFFFFFU;
@@ -299,7 +300,7 @@ int nvgpu_pmu_perf_changeseq_set_clks(struct gk20a *g,
sizeof(struct nv_pmu_rpc_perf_change_seq_queue_change)); sizeof(struct nv_pmu_rpc_perf_change_seq_queue_change));
rpc.change = change_input; rpc.change = change_input;
rpc.change.pstate_index = rpc.change.pstate_index =
perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0); (u32)perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0);
change_seq_pmu->change_state = 0U; change_seq_pmu->change_state = 0U;
change_seq_pmu->start_time = nvgpu_current_time_us(); change_seq_pmu->start_time = nvgpu_current_time_us();
PMU_RPC_EXECUTE_CPB(status, pmu, PERF, PMU_RPC_EXECUTE_CPB(status, pmu, PERF,

View File

@@ -1,6 +1,6 @@
/* /*
* *
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -24,6 +24,7 @@
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/clk/clk.h> #include <nvgpu/pmu/clk/clk.h>
#include <nvgpu/clk_arb.h> #include <nvgpu/clk_arb.h>
#include <nvgpu/pmu/perf.h> #include <nvgpu/pmu/perf.h>

View File

@@ -1,7 +1,7 @@
/* /*
* general p state infrastructure * general p state infrastructure
* *
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -28,6 +28,7 @@
#include <nvgpu/boardobjgrp.h> #include <nvgpu/boardobjgrp.h>
#include <nvgpu/boardobjgrp_e32.h> #include <nvgpu/boardobjgrp_e32.h>
#include <nvgpu/boardobjgrp_e255.h> #include <nvgpu/boardobjgrp_e255.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/boardobjgrp_classes.h> #include <nvgpu/pmu/boardobjgrp_classes.h>
#include <nvgpu/pmu/clk/clk.h> #include <nvgpu/pmu/clk/clk.h>
#include <nvgpu/pmu/perf.h> #include <nvgpu/pmu/perf.h>
@@ -85,22 +86,22 @@ static int pstate_init_pmudata(struct gk20a *g,
for (clkidx = 0; clkidx < pstate->clklist.num_info; clkidx++) { for (clkidx = 0; clkidx < pstate->clklist.num_info; clkidx++) {
pstate_pmu_data->clkEntries[clkidx].max.baseFreqKhz = pstate_pmu_data->clkEntries[clkidx].max.baseFreqKhz =
pstate->clklist.clksetinfo[clkidx].max_mhz*1000; (u32)(pstate->clklist.clksetinfo[clkidx].max_mhz*1000);
pstate_pmu_data->clkEntries[clkidx].max.freqKz = pstate_pmu_data->clkEntries[clkidx].max.freqKz =
pstate->clklist.clksetinfo[clkidx].max_mhz*1000; (u32)(pstate->clklist.clksetinfo[clkidx].max_mhz*1000);
pstate_pmu_data->clkEntries[clkidx].max.origFreqKhz = pstate_pmu_data->clkEntries[clkidx].max.origFreqKhz =
pstate->clklist.clksetinfo[clkidx].max_mhz*1000; (u32)(pstate->clklist.clksetinfo[clkidx].max_mhz*1000);
pstate_pmu_data->clkEntries[clkidx].max.porFreqKhz = pstate_pmu_data->clkEntries[clkidx].max.porFreqKhz =
pstate->clklist.clksetinfo[clkidx].max_mhz*1000; (u32)(pstate->clklist.clksetinfo[clkidx].max_mhz*1000);
pstate_pmu_data->clkEntries[clkidx].min.baseFreqKhz = pstate_pmu_data->clkEntries[clkidx].min.baseFreqKhz =
pstate->clklist.clksetinfo[clkidx].min_mhz*1000; (u32)(pstate->clklist.clksetinfo[clkidx].min_mhz*1000);
pstate_pmu_data->clkEntries[clkidx].min.freqKz = pstate_pmu_data->clkEntries[clkidx].min.freqKz =
pstate->clklist.clksetinfo[clkidx].min_mhz*1000; (u32)(pstate->clklist.clksetinfo[clkidx].min_mhz*1000);
pstate_pmu_data->clkEntries[clkidx].min.origFreqKhz = pstate_pmu_data->clkEntries[clkidx].min.origFreqKhz =
pstate->clklist.clksetinfo[clkidx].min_mhz*1000; (u32)(pstate->clklist.clksetinfo[clkidx].min_mhz*1000);
pstate_pmu_data->clkEntries[clkidx].min.porFreqKhz = pstate_pmu_data->clkEntries[clkidx].min.porFreqKhz =
pstate->clklist.clksetinfo[clkidx].min_mhz*1000; (u32)(pstate->clklist.clksetinfo[clkidx].min_mhz*1000);
pstate_pmu_data->clkEntries[clkidx].nom.baseFreqKhz = pstate_pmu_data->clkEntries[clkidx].nom.baseFreqKhz =
pstate->clklist.clksetinfo[clkidx].nominal_mhz*1000; pstate->clklist.clksetinfo[clkidx].nominal_mhz*1000;
@@ -347,7 +348,7 @@ static int perf_pstate_pmudatainit(struct gk20a *g,
pset->numClkDomains = pprogs->num_clk_domains; pset->numClkDomains = pprogs->num_clk_domains;
pset->boot_pstate_idx = pset->boot_pstate_idx =
perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0); (u8)perf_pstate_get_table_entry_idx(g, CTRL_PERF_PSTATE_P0);
done: done:
return status; return status;
@@ -361,6 +362,8 @@ static int perf_pstate_pmudata_instget(struct gk20a *g,
(struct nv_pmu_perf_pstate_boardobj_grp_set *) (struct nv_pmu_perf_pstate_boardobj_grp_set *)
(void *)pmuboardobjgrp; (void *)pmuboardobjgrp;
(void)g;
/* check whether pmuboardobjgrp has a valid boardobj in index */ /* check whether pmuboardobjgrp has a valid boardobj in index */
if (idx >= CTRL_BOARDOBJGRP_E32_MAX_OBJECTS) { if (idx >= CTRL_BOARDOBJGRP_E32_MAX_OBJECTS) {
return -EINVAL; return -EINVAL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -41,7 +41,7 @@ static int vfe_equ_node_depending_mask_combine(struct gk20a *g,
struct boardobjgrp *pboardobjgrp, u8 equ_idx, struct boardobjgrp *pboardobjgrp, u8 equ_idx,
struct boardobjgrpmask *pmask_dst) struct boardobjgrpmask *pmask_dst)
{ {
int status; int status = 0;
struct vfe_equ *tmp_vfe_equ; struct vfe_equ *tmp_vfe_equ;
while (equ_idx != CTRL_BOARDOBJ_IDX_INVALID) { while (equ_idx != CTRL_BOARDOBJ_IDX_INVALID) {
@@ -145,6 +145,7 @@ static int vfe_equ_build_depending_mask_quad(struct gk20a *g,
struct boardobjgrp *pboardobjgrp, struct boardobjgrp *pboardobjgrp,
struct vfe_equ *pvfe_equ) struct vfe_equ *pvfe_equ)
{ {
(void)pboardobjgrp;
return vfe_equ_build_depending_mask_super(g, pvfe_equ); return vfe_equ_build_depending_mask_super(g, pvfe_equ);
} }
@@ -176,7 +177,7 @@ static int vfe_equ_build_depending_mask_equ_scalar(struct gk20a *g,
static int vfe_equ_dependency_mask_build(struct gk20a *g, static int vfe_equ_dependency_mask_build(struct gk20a *g,
struct vfe_equs *pvfe_equs, struct vfe_vars *pvfe_vars) struct vfe_equs *pvfe_equs, struct vfe_vars *pvfe_vars)
{ {
int status; int status = 0;
struct vfe_equ *tmp_vfe_equ; struct vfe_equ *tmp_vfe_equ;
struct vfe_var *tmp_vfe_var; struct vfe_var *tmp_vfe_var;
u8 index_1, index_2; u8 index_1, index_2;
@@ -839,7 +840,7 @@ static int devinit_get_vfe_equ_table(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&pvfeequobjs->super.super, status = boardobjgrp_objinsert(&pvfeequobjs->super.super,
(struct pmu_board_obj *)pequ, index); (struct pmu_board_obj *)pequ, (u8)index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, "error adding vfe_equ boardobj %d", index); nvgpu_err(g, "error adding vfe_equ boardobj %d", index);
status = -EINVAL; status = -EINVAL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -84,6 +84,8 @@ static int vfe_vars_pmustatus_instget(struct gk20a *g, void *pboardobjgrppmu,
(struct nv_pmu_perf_vfe_var_boardobj_grp_get_status *) (struct nv_pmu_perf_vfe_var_boardobj_grp_get_status *)
pboardobjgrppmu; pboardobjgrppmu;
(void)g;
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0U) { pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0U) {
return -EINVAL; return -EINVAL;
@@ -129,7 +131,7 @@ static int vfe_var_get_s_param_value(struct gk20a *g,
static int vfe_var_dependency_mask_build(struct gk20a *g, static int vfe_var_dependency_mask_build(struct gk20a *g,
struct vfe_vars *pvfe_vars) struct vfe_vars *pvfe_vars)
{ {
int status; int status = 0;
u8 index_1 = 0, index_2 = 0; u8 index_1 = 0, index_2 = 0;
struct vfe_var *tmp_vfe_var_1 = NULL, *tmp_vfe_var_2 = NULL; struct vfe_var *tmp_vfe_var_1 = NULL, *tmp_vfe_var_2 = NULL;
struct pmu_board_obj *obj_tmp_1 = NULL, *obj_tmp_2 = NULL; struct pmu_board_obj *obj_tmp_1 = NULL, *obj_tmp_2 = NULL;
@@ -331,6 +333,9 @@ static int vfe_var_build_depending_mask_null(struct gk20a *g,
struct boardobjgrp *pboardobjgrp, struct boardobjgrp *pboardobjgrp,
struct vfe_var *pvfe_var) struct vfe_var *pvfe_var)
{ {
(void)g;
(void)pboardobjgrp;
(void)pvfe_var;
/* Individual vfe_var members should over_ride this with their */ /* Individual vfe_var members should over_ride this with their */
/* respective function types */ /* respective function types */
return -EINVAL; return -EINVAL;
@@ -640,6 +645,8 @@ static int vfe_var_build_depending_mask_single(struct gk20a *g,
struct boardobjgrp *pboardobjgrp, struct boardobjgrp *pboardobjgrp,
struct vfe_var *pvfe_var) struct vfe_var *pvfe_var)
{ {
(void)g;
(void)pboardobjgrp;
return nvgpu_boardobjgrpmask_bit_set( return nvgpu_boardobjgrpmask_bit_set(
&pvfe_var->mask_depending_vars.super, &pvfe_var->mask_depending_vars.super,
pvfe_var->super.idx); pvfe_var->super.idx);
@@ -1266,7 +1273,7 @@ static int devinit_get_vfe_var_table(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&pvfevarobjs->super.super, status = boardobjgrp_objinsert(&pvfevarobjs->super.super,
(struct pmu_board_obj *)pvar, index); (struct pmu_board_obj *)pvar, (u8)index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, "error adding vfe_var boardobj %d", index); nvgpu_err(g, "error adding vfe_var boardobj %d", index);
status = -EINVAL; status = -EINVAL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -33,6 +33,7 @@
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/pmu/pmuif/nvgpu_cmdif.h> #include <nvgpu/pmu/pmuif/nvgpu_cmdif.h>
#include <nvgpu/kmem.h> #include <nvgpu/kmem.h>
#include <nvgpu/string.h>
#include "pmu_perfmon_sw_gm20b.h" #include "pmu_perfmon_sw_gm20b.h"
#include "pmu_perfmon_sw_gv11b.h" #include "pmu_perfmon_sw_gv11b.h"
@@ -122,6 +123,8 @@ int nvgpu_pmu_initialize_perfmon(struct gk20a *g, struct nvgpu_pmu *pmu,
int err = 0; int err = 0;
u32 ver = g->params.gpu_arch + g->params.gpu_impl; u32 ver = g->params.gpu_arch + g->params.gpu_impl;
(void)pmu;
if (*perfmon_ptr != NULL) { if (*perfmon_ptr != NULL) {
/* Not to allocate a new buffer after railgating /* Not to allocate a new buffer after railgating
is done. Use the same memory for pmu_perfmon is done. Use the same memory for pmu_perfmon
@@ -705,27 +708,28 @@ u32 nvgpu_pmu_perfmon_get_load_avg(struct nvgpu_pmu *pmu)
int nvgpu_pmu_perfmon_initialization(struct gk20a *g, int nvgpu_pmu_perfmon_initialization(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon) struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon)
{ {
(void)g;
return perfmon->init_perfmon(pmu); return perfmon->init_perfmon(pmu);
} }
int nvgpu_pmu_perfmon_start_sample(struct gk20a *g, int nvgpu_pmu_perfmon_start_sample(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon) struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon)
{ {
(void)g;
return perfmon->start_sampling(pmu); return perfmon->start_sampling(pmu);
} }
int nvgpu_pmu_perfmon_stop_sample(struct gk20a *g, int nvgpu_pmu_perfmon_stop_sample(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon) struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon)
{ {
(void)g;
return perfmon->stop_sampling(pmu); return perfmon->stop_sampling(pmu);
} }
int nvgpu_pmu_perfmon_get_sample(struct gk20a *g, int nvgpu_pmu_perfmon_get_sample(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon) struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon)
{ {
(void)g;
return perfmon->get_samples_rpc(pmu); return perfmon->get_samples_rpc(pmu);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,17 +27,21 @@
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/string.h>
#include "pg_sw_gm20b.h" #include "pg_sw_gm20b.h"
#include "pmu_pg.h" #include "pmu_pg.h"
u32 gm20b_pmu_pg_engines_list(struct gk20a *g) u32 gm20b_pmu_pg_engines_list(struct gk20a *g)
{ {
(void)g;
return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS); return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS);
} }
u32 gm20b_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) u32 gm20b_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
{ {
(void)g;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING; return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING;
} }
@@ -49,6 +53,8 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status) void *param, u32 status)
{ {
struct nvgpu_pmu *pmu = param; struct nvgpu_pmu *pmu = param;
(void)msg;
(void)status;
nvgpu_pmu_dbg(g, "reply ZBC_TABLE_UPDATE"); nvgpu_pmu_dbg(g, "reply ZBC_TABLE_UPDATE");
pmu->pg->zbc_save_done = true; pmu->pg->zbc_save_done = true;
} }
@@ -342,6 +348,8 @@ int gm20b_pmu_pg_init_send(struct gk20a *g, struct nvgpu_pmu *pmu,
void nvgpu_gm20b_pg_sw_init(struct gk20a *g, void nvgpu_gm20b_pg_sw_init(struct gk20a *g,
struct nvgpu_pmu_pg *pg) struct nvgpu_pmu_pg *pg)
{ {
(void)g;
pg->elpg_statistics = gm20b_pmu_elpg_statistics; pg->elpg_statistics = gm20b_pmu_elpg_statistics;
pg->init_param = NULL; pg->init_param = NULL;
pg->supported_engines_list = gm20b_pmu_pg_engines_list; pg->supported_engines_list = gm20b_pmu_pg_engines_list;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,7 @@
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/clk_arb.h> #include <nvgpu/clk_arb.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
#include "pg_sw_gp106.h" #include "pg_sw_gp106.h"
@@ -33,6 +34,8 @@
static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status) void *param, u32 status)
{ {
(void)param;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (status != 0U) { if (status != 0U) {
@@ -125,12 +128,15 @@ int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
u32 gp106_pmu_pg_engines_list(struct gk20a *g) u32 gp106_pmu_pg_engines_list(struct gk20a *g)
{ {
(void)g;
return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) | return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) |
BIT32(PMU_PG_ELPG_ENGINE_ID_MS); BIT32(PMU_PG_ELPG_ENGINE_ID_MS);
} }
u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
{ {
(void)g;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
return NVGPU_PMU_GR_FEATURE_MASK_RPPG; return NVGPU_PMU_GR_FEATURE_MASK_RPPG;
} }
@@ -144,5 +150,7 @@ u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
{ {
(void)g;
(void)feature_id;
return false; return false;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -24,6 +24,7 @@
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
@@ -33,6 +34,8 @@
static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status) void *param, u32 status)
{ {
(void)param;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (status != 0U) { if (status != 0U) {
@@ -106,6 +109,8 @@ int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
void nvgpu_gp10b_pg_sw_init(struct gk20a *g, void nvgpu_gp10b_pg_sw_init(struct gk20a *g,
struct nvgpu_pmu_pg *pg) struct nvgpu_pmu_pg *pg)
{ {
(void)g;
pg->elpg_statistics = gp10b_pmu_elpg_statistics; pg->elpg_statistics = gp10b_pmu_elpg_statistics;
pg->init_param = gp10b_pg_gr_init; pg->init_param = gp10b_pg_gr_init;
pg->supported_engines_list = gm20b_pmu_pg_engines_list; pg->supported_engines_list = gm20b_pmu_pg_engines_list;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,7 @@
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
@@ -33,6 +34,8 @@
static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status) void *param, u32 status)
{ {
(void)param;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (status != 0U) { if (status != 0U) {
@@ -47,6 +50,8 @@ static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status) void *param, u32 status)
{ {
(void)param;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (status != 0U) { if (status != 0U) {
@@ -132,6 +137,8 @@ int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
void nvgpu_gv11b_pg_sw_init(struct gk20a *g, void nvgpu_gv11b_pg_sw_init(struct gk20a *g,
struct nvgpu_pmu_pg *pg) struct nvgpu_pmu_pg *pg)
{ {
(void)g;
pg->elpg_statistics = gp106_pmu_elpg_statistics; pg->elpg_statistics = gp106_pmu_elpg_statistics;
pg->init_param = gv11b_pg_gr_init; pg->init_param = gv11b_pg_gr_init;
pg->supported_engines_list = gm20b_pmu_pg_engines_list; pg->supported_engines_list = gm20b_pmu_pg_engines_list;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -75,6 +75,8 @@ static void ap_callback_init_and_enable_ctrl(
struct gk20a *g, struct pmu_msg *msg, struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status) void *param, u32 status)
{ {
(void)param;
WARN_ON(msg == NULL); WARN_ON(msg == NULL);
if (status == 0U) { if (status == 0U) {
@@ -101,6 +103,8 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g,
pmu_callback p_callback = NULL; pmu_callback p_callback = NULL;
u64 tmp; u64 tmp;
(void)b_block;
(void) memset(&cmd, 0, sizeof(struct pmu_cmd)); (void) memset(&cmd, 0, sizeof(struct pmu_cmd));
/* Copy common members */ /* Copy common members */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -679,6 +679,8 @@ static int pmu_pg_init_powergating(struct gk20a *g, struct nvgpu_pmu *pmu,
u32 pg_engine_id_list = 0; u32 pg_engine_id_list = 0;
int err = 0; int err = 0;
(void)pg;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (pmu->pg->supported_engines_list != NULL) { if (pmu->pg->supported_engines_list != NULL) {
@@ -747,6 +749,8 @@ static int pmu_pg_init_bind_fecs(struct gk20a *g, struct nvgpu_pmu *pmu,
int err = 0; int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
(void)pg;
nvgpu_pmu_dbg(g, nvgpu_pmu_dbg(g,
"cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_PG_BUF, false); nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_PG_BUF, false);
@@ -767,6 +771,8 @@ static int pmu_pg_setup_hw_load_zbc(struct gk20a *g, struct nvgpu_pmu *pmu,
{ {
int err = 0; int err = 0;
(void)pg;
nvgpu_pmu_dbg(g, nvgpu_pmu_dbg(g,
"cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC"); "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_ZBC, false); nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_ZBC, false);
@@ -935,6 +941,8 @@ static int pmu_pg_init_seq_buf(struct gk20a *g, struct nvgpu_pmu *pmu,
int err; int err;
u8 *ptr; u8 *ptr;
(void)pmu;
err = nvgpu_dma_alloc_map_sys(vm, PMU_PG_SEQ_BUF_SIZE, err = nvgpu_dma_alloc_map_sys(vm, PMU_PG_SEQ_BUF_SIZE,
&pg->seq_buf); &pg->seq_buf);
if (err != 0) { if (err != 0) {
@@ -1038,6 +1046,8 @@ int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,
int err = 0; int err = 0;
u32 ver = g->params.gpu_arch + g->params.gpu_impl; u32 ver = g->params.gpu_arch + g->params.gpu_impl;
(void)pmu;
if (!g->support_ls_pmu || !g->can_elpg) { if (!g->support_ls_pmu || !g->can_elpg) {
return 0; return 0;
} }
@@ -1106,6 +1116,8 @@ void nvgpu_pmu_pg_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm; struct vm_gk20a *vm = mm->pmu.vm;
(void)pmu;
if (!is_pg_supported(g, pg)) { if (!is_pg_supported(g, pg)) {
return; return;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -44,6 +44,8 @@ static void pmgr_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
struct pmgr_pmucmdhandler_params *phandlerparams = struct pmgr_pmucmdhandler_params *phandlerparams =
(struct pmgr_pmucmdhandler_params *)param; (struct pmgr_pmucmdhandler_params *)param;
(void)status;
if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) && if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) &&
(msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) && (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) &&
(msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) { (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) {
@@ -459,7 +461,7 @@ exit:
static int pmgr_pmu_load_blocking(struct gk20a *g) static int pmgr_pmu_load_blocking(struct gk20a *g)
{ {
struct pmu_cmd cmd = { {0} }; struct pmu_cmd cmd = { };
struct nv_pmu_pmgr_cmd_load *pcmd; struct nv_pmu_pmgr_cmd_load *pcmd;
int status; int status;
struct pmgr_pmucmdhandler_params handlerparams = {0}; struct pmgr_pmucmdhandler_params handlerparams = {0};

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -100,6 +100,8 @@ static struct pmu_board_obj *construct_pwr_device(struct gk20a *g,
struct pwr_device_ina3221 *pwrdev; struct pwr_device_ina3221 *pwrdev;
struct pwr_device_ina3221 *ina3221 = (struct pwr_device_ina3221*)pargs; struct pwr_device_ina3221 *ina3221 = (struct pwr_device_ina3221*)pargs;
(void)type;
pwrdev = nvgpu_kzalloc(g, pargs_size); pwrdev = nvgpu_kzalloc(g, pargs_size);
if (pwrdev == NULL) { if (pwrdev == NULL) {
return NULL; return NULL;
@@ -281,7 +283,7 @@ static int devinit_get_pwr_device_table(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&ppwrdeviceobjs->super.super, status = boardobjgrp_objinsert(&ppwrdeviceobjs->super.super,
obj_tmp, obj_index); obj_tmp, (u8)obj_index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, nvgpu_err(g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -153,6 +153,8 @@ static struct pmu_board_obj *construct_pwr_topology(struct gk20a *g,
struct pwr_channel_sensor *pwrchannel; struct pwr_channel_sensor *pwrchannel;
struct pwr_channel_sensor *sensor = (struct pwr_channel_sensor*)pargs; struct pwr_channel_sensor *sensor = (struct pwr_channel_sensor*)pargs;
(void)type;
pwrchannel = nvgpu_kzalloc(g, pargs_size); pwrchannel = nvgpu_kzalloc(g, pargs_size);
if (pwrchannel == NULL) { if (pwrchannel == NULL) {
return NULL; return NULL;
@@ -298,7 +300,7 @@ static int devinit_get_pwr_topology_table(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&ppwrmonitorobjs->pwr_channels.super, status = boardobjgrp_objinsert(&ppwrmonitorobjs->pwr_channels.super,
obj_tmp, obj_index); obj_tmp, (u8)obj_index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, nvgpu_err(g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -411,7 +411,7 @@ static int _pwr_policy_construct_WAR_SW_Threshold_policy(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super, status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
obj_tmp, obj_index); obj_tmp, (u8)obj_index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, nvgpu_err(g,
@@ -688,7 +688,7 @@ static int devinit_get_pwr_policy_table(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super, status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
obj_tmp, obj_index); obj_tmp, (u8)obj_index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, nvgpu_err(g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -32,6 +32,8 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
{ {
u32 i = 0, j = (u32)strlen(strings); u32 i = 0, j = (u32)strlen(strings);
(void)g;
for (; i < j; i++) { for (; i < j; i++) {
if (strings[i] == '%') { if (strings[i] == '%') {
if (strings[i + 1U] == 'x' || strings[i + 1U] == 'X') { if (strings[i + 1U] == 'x' || strings[i + 1U] == 'X') {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -88,6 +88,8 @@ void nvgpu_pmu_mutex_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
{ {
u32 i; u32 i;
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
for (i = 0; i < mutexes->cnt; i++) { for (i = 0; i < mutexes->cnt; i++) {
@@ -102,6 +104,8 @@ int nvgpu_pmu_init_mutexe(struct gk20a *g, struct nvgpu_pmu *pmu,
struct pmu_mutexes *mutexes; struct pmu_mutexes *mutexes;
int err = 0; int err = 0;
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (*mutexes_p != NULL) { if (*mutexes_p != NULL) {
@@ -136,6 +140,8 @@ exit:
void nvgpu_pmu_mutexe_deinit(struct gk20a *g, struct nvgpu_pmu *pmu, void nvgpu_pmu_mutexe_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
struct pmu_mutexes *mutexes) struct pmu_mutexes *mutexes)
{ {
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (mutexes == NULL) { if (mutexes == NULL) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -249,7 +249,7 @@ void nvgpu_pmu_rtos_cmdline_args_init(struct gk20a *g, struct nvgpu_pmu *pmu)
pmu, GK20A_PMU_DMAIDX_VIRT); pmu, GK20A_PMU_DMAIDX_VIRT);
pmu->fw->ops.set_cmd_line_args_cpu_freq(pmu, pmu->fw->ops.set_cmd_line_args_cpu_freq(pmu,
g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK)); (u32)g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
if (pmu->fw->ops.config_cmd_line_args_super_surface != NULL) { if (pmu->fw->ops.config_cmd_line_args_super_surface != NULL) {
pmu->fw->ops.config_cmd_line_args_super_surface(pmu); pmu->fw->ops.config_cmd_line_args_super_surface(pmu);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,7 @@
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/super_surface.h> #include <nvgpu/pmu/super_surface.h>
#include "super_surface_priv.h" #include "super_surface_priv.h"
@@ -71,6 +72,8 @@ int nvgpu_pmu_super_surface_buf_alloc(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_mem *nvgpu_pmu_super_surface_mem(struct gk20a *g, struct nvgpu_mem *nvgpu_pmu_super_surface_mem(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_super_surface *ss) struct nvgpu_pmu *pmu, struct nvgpu_pmu_super_surface *ss)
{ {
(void)g;
(void)pmu;
return &ss->super_surface_buf; return &ss->super_surface_buf;
} }
@@ -90,6 +93,8 @@ int nvgpu_pmu_ss_create_ssmd_lookup_table(struct gk20a *g,
u32 idx = 0U; u32 idx = 0U;
int err = 0; int err = 0;
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (ss == NULL) { if (ss == NULL) {
@@ -154,30 +159,37 @@ int nvgpu_pmu_ss_create_ssmd_lookup_table(struct gk20a *g,
u32 nvgpu_pmu_get_ss_member_set_offset(struct gk20a *g, u32 nvgpu_pmu_get_ss_member_set_offset(struct gk20a *g,
struct nvgpu_pmu *pmu, u32 member_id) struct nvgpu_pmu *pmu, u32 member_id)
{ {
(void)g;
return pmu->super_surface->ssmd_set[member_id].offset; return pmu->super_surface->ssmd_set[member_id].offset;
} }
u32 nvgpu_pmu_get_ss_member_set_size(struct gk20a *g, u32 nvgpu_pmu_get_ss_member_set_size(struct gk20a *g,
struct nvgpu_pmu *pmu, u32 member_id) struct nvgpu_pmu *pmu, u32 member_id)
{ {
(void)g;
return pmu->super_surface->ssmd_set[member_id].size; return pmu->super_surface->ssmd_set[member_id].size;
} }
u32 nvgpu_pmu_get_ss_member_get_status_offset(struct gk20a *g, u32 nvgpu_pmu_get_ss_member_get_status_offset(struct gk20a *g,
struct nvgpu_pmu *pmu, u32 member_id) struct nvgpu_pmu *pmu, u32 member_id)
{ {
(void)g;
return pmu->super_surface->ssmd_get_status[member_id].offset; return pmu->super_surface->ssmd_get_status[member_id].offset;
} }
u32 nvgpu_pmu_get_ss_member_get_status_size(struct gk20a *g, u32 nvgpu_pmu_get_ss_member_get_status_size(struct gk20a *g,
struct nvgpu_pmu *pmu, u32 member_id) struct nvgpu_pmu *pmu, u32 member_id)
{ {
(void)g;
return pmu->super_surface->ssmd_get_status[member_id].size; return pmu->super_surface->ssmd_get_status[member_id].size;
} }
u32 nvgpu_pmu_get_ss_cmd_fbq_offset(struct gk20a *g, u32 nvgpu_pmu_get_ss_cmd_fbq_offset(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_super_surface *ss, u32 id) struct nvgpu_pmu *pmu, struct nvgpu_pmu_super_surface *ss, u32 id)
{ {
(void)g;
(void)pmu;
(void)ss;
return (u32)offsetof(struct super_surface, return (u32)offsetof(struct super_surface,
fbq.cmd_queues.queue[id]); fbq.cmd_queues.queue[id]);
} }
@@ -185,6 +197,9 @@ u32 nvgpu_pmu_get_ss_cmd_fbq_offset(struct gk20a *g,
u32 nvgpu_pmu_get_ss_msg_fbq_offset(struct gk20a *g, u32 nvgpu_pmu_get_ss_msg_fbq_offset(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_super_surface *ss) struct nvgpu_pmu *pmu, struct nvgpu_pmu_super_surface *ss)
{ {
(void)g;
(void)pmu;
(void)ss;
return (u32)offsetof(struct super_surface, return (u32)offsetof(struct super_surface,
fbq.msg_queue); fbq.msg_queue);
} }
@@ -192,6 +207,9 @@ u32 nvgpu_pmu_get_ss_msg_fbq_offset(struct gk20a *g,
u32 nvgpu_pmu_get_ss_msg_fbq_element_offset(struct gk20a *g, u32 nvgpu_pmu_get_ss_msg_fbq_element_offset(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_super_surface *ss, u32 idx) struct nvgpu_pmu *pmu, struct nvgpu_pmu_super_surface *ss, u32 idx)
{ {
(void)g;
(void)pmu;
(void)ss;
return (u32)offsetof(struct super_surface, return (u32)offsetof(struct super_surface,
fbq.msg_queue.element[idx]); fbq.msg_queue.element[idx]);
} }
@@ -212,6 +230,8 @@ void nvgpu_pmu_ss_fbq_flush(struct gk20a *g, struct nvgpu_pmu *pmu)
void nvgpu_pmu_super_surface_deinit(struct gk20a *g, struct nvgpu_pmu *pmu, void nvgpu_pmu_super_surface_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_super_surface *ss) struct nvgpu_pmu_super_surface *ss)
{ {
(void)pmu;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (ss == NULL) { if (ss == NULL) {
@@ -228,6 +248,8 @@ void nvgpu_pmu_super_surface_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
int nvgpu_pmu_super_surface_init(struct gk20a *g, struct nvgpu_pmu *pmu, int nvgpu_pmu_super_surface_init(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_super_surface **super_surface) struct nvgpu_pmu_super_surface **super_surface)
{ {
(void)pmu;
if (*super_surface != NULL) { if (*super_surface != NULL) {
/* skip alloc/reinit for unrailgate sequence */ /* skip alloc/reinit for unrailgate sequence */
return 0; return 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -78,6 +78,8 @@ static struct pmu_board_obj *construct_channel_device(struct gk20a *g,
u16 scale_shift = BIT16(8); u16 scale_shift = BIT16(8);
struct therm_channel_device *therm_device = (struct therm_channel_device*)pargs; struct therm_channel_device *therm_device = (struct therm_channel_device*)pargs;
(void)type;
pchannel_device = nvgpu_kzalloc(g, pargs_size); pchannel_device = nvgpu_kzalloc(g, pargs_size);
if (pchannel_device == NULL) { if (pchannel_device == NULL) {
return NULL; return NULL;
@@ -141,6 +143,8 @@ static int therm_channel_pmustatus_instget(struct gk20a *g,
(struct nv_pmu_therm_therm_channel_boardobj_grp_get_status *) (struct nv_pmu_therm_therm_channel_boardobj_grp_get_status *)
(void *)pboardobjgrppmu; (void *)pboardobjgrppmu;
(void)g;
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
pmu_status->hdr.data.super.obj_mask.super.data[0]) == 0U) { pmu_status->hdr.data.super.obj_mask.super.data[0]) == 0U) {
@@ -232,7 +236,7 @@ static int devinit_get_therm_channel_table(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&pthermchannelobjs->super.super, status = boardobjgrp_objinsert(&pthermchannelobjs->super.super,
obj_tmp, obj_index); obj_tmp, (u8)obj_index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, nvgpu_err(g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -199,7 +199,7 @@ static int devinit_get_therm_device_table(struct gk20a *g,
} }
status = boardobjgrp_objinsert(&pthermdeviceobjs->super.super, status = boardobjgrp_objinsert(&pthermdeviceobjs->super.super,
obj_tmp, obj_index); obj_tmp, (u8)obj_index);
if (status != 0) { if (status != 0) {
nvgpu_err(g, nvgpu_err(g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,6 +29,7 @@
static void therm_unit_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu, static void therm_unit_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nv_pmu_rpc_header *rpc) struct nv_pmu_rpc_header *rpc)
{ {
(void)pmu;
switch (rpc->function) { switch (rpc->function) {
case NV_PMU_RPC_ID_THERM_BOARD_OBJ_GRP_CMD: case NV_PMU_RPC_ID_THERM_BOARD_OBJ_GRP_CMD:
nvgpu_pmu_dbg(g, nvgpu_pmu_dbg(g,
@@ -70,6 +71,8 @@ int nvgpu_pmu_therm_pmu_setup(struct gk20a *g, struct nvgpu_pmu *pmu)
{ {
int status; int status;
(void)pmu;
status = therm_device_pmu_setup(g); status = therm_device_pmu_setup(g);
if (status != 0) { if (status != 0) {
nvgpu_err(g, "Therm device pmu setup failed - 0x%x", status); nvgpu_err(g, "Therm device pmu setup failed - 0x%x", status);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -22,6 +22,7 @@
#include <nvgpu/pmu/volt.h> #include <nvgpu/pmu/volt.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu/cmd.h> #include <nvgpu/pmu/cmd.h>
#include "volt.h" #include "volt.h"

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -217,7 +217,7 @@ static int volt_get_voltage_device_table_1x_psv(struct gk20a *g,
u8 ext_dev_idx; u8 ext_dev_idx;
u8 steps; u8 steps;
u8 volt_domain = 0; u8 volt_domain = 0;
struct voltage_device_pwm_entry pwm_entry = { { 0 } }; struct voltage_device_pwm_entry pwm_entry = { };
ptmp_dev = nvgpu_kzalloc(g, sizeof(struct voltage_device_pwm)); ptmp_dev = nvgpu_kzalloc(g, sizeof(struct voltage_device_pwm));
if (ptmp_dev == NULL) { if (ptmp_dev == NULL) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -308,6 +308,7 @@ static int volt_rail_devgrp_pmustatus_instget(struct gk20a *g,
(struct nv_pmu_volt_volt_rail_boardobj_grp_get_status *) (struct nv_pmu_volt_volt_rail_boardobj_grp_get_status *)
pboardobjgrppmu; pboardobjgrppmu;
(void)g;
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0U) { pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0U) {
@@ -479,6 +480,7 @@ int volt_rail_pmu_setup(struct gk20a *g)
u8 volt_rail_vbios_volt_domain_convert_to_internal(struct gk20a *g, u8 volt_rail_vbios_volt_domain_convert_to_internal(struct gk20a *g,
u8 vbios_volt_domain) u8 vbios_volt_domain)
{ {
(void)vbios_volt_domain;
if (g->pmu->volt->volt_metadata->volt_rail_metadata.volt_domain_hal == if (g->pmu->volt->volt_metadata->volt_rail_metadata.volt_domain_hal ==
CTRL_VOLT_DOMAIN_HAL_GP10X_SINGLE_RAIL) { CTRL_VOLT_DOMAIN_HAL_GP10X_SINGLE_RAIL) {
return CTRL_VOLT_DOMAIN_LOGIC; return CTRL_VOLT_DOMAIN_LOGIC;
@@ -534,6 +536,7 @@ exit:
u8 nvgpu_pmu_volt_rail_volt_domain_convert_to_idx(struct gk20a *g, u8 volt_domain) u8 nvgpu_pmu_volt_rail_volt_domain_convert_to_idx(struct gk20a *g, u8 volt_domain)
{ {
(void)volt_domain;
if (g->pmu->volt->volt_metadata->volt_rail_metadata.volt_domain_hal == if (g->pmu->volt->volt_metadata->volt_rail_metadata.volt_domain_hal ==
CTRL_VOLT_DOMAIN_HAL_GP10X_SINGLE_RAIL) { CTRL_VOLT_DOMAIN_HAL_GP10X_SINGLE_RAIL) {
return 0U; return 0U;