gpu: nvgpu: rename secure ops to safe ops

Change secure_ops.h to safe_ops.h and rename unsigned
type operations from nvgpu_secure_* to nvgpu_safe_*.

NVGPU-3432

Change-Id: I395896405ee2e4269ced88f251b097c5043cdeef
Signed-off-by: Nitin Kumbhar <nkumbhar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2122571
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Nitin Kumbhar
2019-05-21 10:38:48 +05:30
committed by mobile promotions
parent a46eca3483
commit 1bf55ec715
7 changed files with 33 additions and 33 deletions

View File

@@ -14,7 +14,7 @@ nvgpu:
owner: Alex W owner: Alex W
sources: [ include/nvgpu/gk20a.h, sources: [ include/nvgpu/gk20a.h,
include/nvgpu/nvgpu_common.h, include/nvgpu/nvgpu_common.h,
include/nvgpu/secure_ops.h ] include/nvgpu/safe_ops.h ]
bios: bios:
safe: yes safe: yes

View File

@@ -21,7 +21,7 @@
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/secure_ops.h> #include <nvgpu/safe_ops.h>
#include <nvgpu/gr/global_ctx.h> #include <nvgpu/gr/global_ctx.h>
#include <nvgpu/gr/ctx.h> #include <nvgpu/gr/ctx.h>
#include <nvgpu/vm.h> #include <nvgpu/vm.h>
@@ -579,7 +579,7 @@ int nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
if (g->ops.gr.ctxsw_prog.set_pmu_options_boost_clock_frequencies != if (g->ops.gr.ctxsw_prog.set_pmu_options_boost_clock_frequencies !=
NULL) { NULL) {
g->ops.gr.ctxsw_prog.set_pmu_options_boost_clock_frequencies(g, g->ops.gr.ctxsw_prog.set_pmu_options_boost_clock_frequencies(g,
mem, nvgpu_secure_cast_bool_to_u32(gr_ctx->boosted_ctx)); mem, nvgpu_safe_cast_bool_to_u32(gr_ctx->boosted_ctx));
} }
nvgpu_log(g, gpu_dbg_info, "write patch count = %d", nvgpu_log(g, gpu_dbg_info, "write patch count = %d",
@@ -640,10 +640,10 @@ void nvgpu_gr_ctx_patch_write(struct gk20a *g,
{ {
if (patch) { if (patch) {
u32 patch_slot = u32 patch_slot =
nvgpu_secure_mult_u32(gr_ctx->patch_ctx.data_count, nvgpu_safe_mult_u32(gr_ctx->patch_ctx.data_count,
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY); PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY);
u64 patch_slot_max = u64 patch_slot_max =
nvgpu_secure_sub_u64( nvgpu_safe_sub_u64(
PATCH_CTX_ENTRIES_FROM_SIZE(gr_ctx->patch_ctx.mem.size), PATCH_CTX_ENTRIES_FROM_SIZE(gr_ctx->patch_ctx.mem.size),
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY); PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY);

View File

@@ -21,7 +21,7 @@
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/secure_ops.h> #include <nvgpu/safe_ops.h>
#include <nvgpu/gr/config.h> #include <nvgpu/gr/config.h>
#include <nvgpu/gr/fs_state.h> #include <nvgpu/gr/fs_state.h>
@@ -59,7 +59,7 @@ static void gr_load_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config)
pes++) { pes++) {
pes_tpc_mask |= nvgpu_gr_config_get_pes_tpc_mask( pes_tpc_mask |= nvgpu_gr_config_get_pes_tpc_mask(
config, gpc, pes) << config, gpc, pes) <<
nvgpu_secure_mult_u32(num_tpc_per_gpc, gpc); nvgpu_safe_mult_u32(num_tpc_per_gpc, gpc);
} }
} }
@@ -69,11 +69,11 @@ static void gr_load_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config)
if ((g->tpc_fs_mask_user != 0U) && if ((g->tpc_fs_mask_user != 0U) &&
(g->tpc_fs_mask_user != fuse_tpc_mask) && (g->tpc_fs_mask_user != fuse_tpc_mask) &&
(fuse_tpc_mask == (fuse_tpc_mask ==
nvgpu_secure_sub_u32(BIT32(max_tpc_count), U32(1)))) { nvgpu_safe_sub_u32(BIT32(max_tpc_count), U32(1)))) {
val = g->tpc_fs_mask_user; val = g->tpc_fs_mask_user;
val &= nvgpu_secure_sub_u32(BIT32(max_tpc_count), U32(1)); val &= nvgpu_safe_sub_u32(BIT32(max_tpc_count), U32(1));
/* skip tpc to disable the other tpc cause channel timeout */ /* skip tpc to disable the other tpc cause channel timeout */
val = nvgpu_secure_sub_u32(BIT32(hweight32(val)), U32(1)); val = nvgpu_safe_sub_u32(BIT32(hweight32(val)), U32(1));
pes_tpc_mask = val; pes_tpc_mask = val;
} }
g->ops.gr.init.tpc_mask(g, 0, pes_tpc_mask); g->ops.gr.init.tpc_mask(g, 0, pes_tpc_mask);
@@ -131,9 +131,9 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
if ((g->tpc_fs_mask_user != 0U) && if ((g->tpc_fs_mask_user != 0U) &&
(fuse_tpc_mask == (fuse_tpc_mask ==
nvgpu_secure_sub_u32(BIT32(max_tpc_cnt), U32(1)))) { nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1)))) {
u32 val = g->tpc_fs_mask_user; u32 val = g->tpc_fs_mask_user;
val &= nvgpu_secure_sub_u32(BIT32(max_tpc_cnt), U32(1)); val &= nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1));
tpc_cnt = (u32)hweight32(val); tpc_cnt = (u32)hweight32(val);
} }
g->ops.gr.init.cwd_gpcs_tpcs_num(g, gpc_cnt, tpc_cnt); g->ops.gr.init.cwd_gpcs_tpcs_num(g, gpc_cnt, tpc_cnt);

View File

@@ -24,7 +24,7 @@
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/unit.h> #include <nvgpu/unit.h>
#include <nvgpu/errno.h> #include <nvgpu/errno.h>
#include <nvgpu/secure_ops.h> #include <nvgpu/safe_ops.h>
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>
#include <nvgpu/gr/config.h> #include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr_intr.h> #include <nvgpu/gr/gr_intr.h>
@@ -115,7 +115,7 @@ u32 nvgpu_gr_get_no_of_sm(struct gk20a *g)
u32 nvgpu_gr_gpc_offset(struct gk20a *g, u32 gpc) u32 nvgpu_gr_gpc_offset(struct gk20a *g, u32 gpc)
{ {
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 gpc_offset = nvgpu_secure_mult_u32(gpc_stride , gpc); u32 gpc_offset = nvgpu_safe_mult_u32(gpc_stride , gpc);
return gpc_offset; return gpc_offset;
} }
@@ -124,7 +124,7 @@ u32 nvgpu_gr_tpc_offset(struct gk20a *g, u32 tpc)
{ {
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
GPU_LIT_TPC_IN_GPC_STRIDE); GPU_LIT_TPC_IN_GPC_STRIDE);
u32 tpc_offset = nvgpu_secure_mult_u32(tpc_in_gpc_stride, tpc); u32 tpc_offset = nvgpu_safe_mult_u32(tpc_in_gpc_stride, tpc);
return tpc_offset; return tpc_offset;
} }

View File

@@ -34,7 +34,7 @@
#include <nvgpu/gr/gr_falcon.h> #include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/gr/fs_state.h> #include <nvgpu/gr/fs_state.h>
#include <nvgpu/power_features/cg.h> #include <nvgpu/power_features/cg.h>
#include <nvgpu/secure_ops.h> #include <nvgpu/safe_ops.h>
#include "obj_ctx_priv.h" #include "obj_ctx_priv.h"
@@ -298,7 +298,7 @@ int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
/* global pagepool buffer */ /* global pagepool buffer */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_PAGEPOOL_VA); addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_PAGEPOOL_VA);
size = nvgpu_secure_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size( size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size(
global_ctx_buffer, NVGPU_GR_GLOBAL_CTX_PAGEPOOL)); global_ctx_buffer, NVGPU_GR_GLOBAL_CTX_PAGEPOOL));
g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, patch, g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, patch,
@@ -306,7 +306,7 @@ int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
/* global bundle cb */ /* global bundle cb */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_CIRCULAR_VA); addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_CIRCULAR_VA);
size = nvgpu_secure_cast_u64_to_u32( size = nvgpu_safe_cast_u64_to_u32(
g->ops.gr.init.get_bundle_cb_default_size(g)); g->ops.gr.init.get_bundle_cb_default_size(g));
g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size, patch);
@@ -593,7 +593,7 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
nvgpu_gr_ctx_set_size(gr_ctx_desc, nvgpu_gr_ctx_set_size(gr_ctx_desc,
NVGPU_GR_CTX_PATCH_CTX, NVGPU_GR_CTX_PATCH_CTX,
nvgpu_secure_mult_u32( nvgpu_safe_mult_u32(
g->ops.gr.init.get_patch_slots(g, config), g->ops.gr.init.get_patch_slots(g, config),
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY)); PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY));

View File

@@ -23,7 +23,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/utils.h> #include <nvgpu/utils.h>
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
#include <nvgpu/secure_ops.h> #include <nvgpu/safe_ops.h>
#include "ctxsw_prog_gm20b.h" #include "ctxsw_prog_gm20b.h"
@@ -185,7 +185,7 @@ void gm20b_ctxsw_prog_set_pc_sampling(struct gk20a *g,
data &= ~ctxsw_prog_main_image_pm_pc_sampling_m(); data &= ~ctxsw_prog_main_image_pm_pc_sampling_m();
data |= ctxsw_prog_main_image_pm_pc_sampling_f( data |= ctxsw_prog_main_image_pm_pc_sampling_f(
nvgpu_secure_cast_bool_to_u32(enable)); nvgpu_safe_cast_bool_to_u32(enable));
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data); nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data);
} }
@@ -282,7 +282,7 @@ u32 gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp(void)
u32 gm20b_ctxsw_prog_hw_get_ts_tag(u64 ts) u32 gm20b_ctxsw_prog_hw_get_ts_tag(u64 ts)
{ {
return ctxsw_prog_record_timestamp_timestamp_hi_tag_v( return ctxsw_prog_record_timestamp_timestamp_hi_tag_v(
nvgpu_secure_cast_u64_to_u32(ts >> 32)); nvgpu_safe_cast_u64_to_u32(ts >> 32));
} }
u64 gm20b_ctxsw_prog_hw_record_ts_timestamp(u64 ts) u64 gm20b_ctxsw_prog_hw_record_ts_timestamp(u64 ts)

View File

@@ -20,10 +20,10 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifndef NVGPU_SECURE_OPS_H #ifndef NVGPU_SAFE_OPS_H
#define NVGPU_SECURE_OPS_H #define NVGPU_SAFE_OPS_H
static inline u32 nvgpu_secure_add_u32(u32 ui_a, u32 ui_b) static inline u32 nvgpu_safe_add_u32(u32 ui_a, u32 ui_b)
{ {
if (UINT_MAX - ui_a < ui_b) { if (UINT_MAX - ui_a < ui_b) {
BUG(); BUG();
@@ -32,7 +32,7 @@ static inline u32 nvgpu_secure_add_u32(u32 ui_a, u32 ui_b)
} }
} }
static inline u64 nvgpu_secure_add_u64(u64 ul_a, u64 ul_b) static inline u64 nvgpu_safe_add_u64(u64 ul_a, u64 ul_b)
{ {
if (ULONG_MAX - ul_a < ul_b) { if (ULONG_MAX - ul_a < ul_b) {
BUG(); BUG();
@@ -41,7 +41,7 @@ static inline u64 nvgpu_secure_add_u64(u64 ul_a, u64 ul_b)
} }
} }
static inline u32 nvgpu_secure_sub_u32(u32 ui_a, u32 ui_b) static inline u32 nvgpu_safe_sub_u32(u32 ui_a, u32 ui_b)
{ {
if (ui_a < ui_b) { if (ui_a < ui_b) {
BUG(); BUG();
@@ -50,7 +50,7 @@ static inline u32 nvgpu_secure_sub_u32(u32 ui_a, u32 ui_b)
} }
} }
static inline u64 nvgpu_secure_sub_u64(u64 ul_a, u64 ul_b) static inline u64 nvgpu_safe_sub_u64(u64 ul_a, u64 ul_b)
{ {
if (ul_a < ul_b) { if (ul_a < ul_b) {
BUG(); BUG();
@@ -59,7 +59,7 @@ static inline u64 nvgpu_secure_sub_u64(u64 ul_a, u64 ul_b)
} }
} }
static inline u32 nvgpu_secure_mult_u32(u32 ui_a, u32 ui_b) static inline u32 nvgpu_safe_mult_u32(u32 ui_a, u32 ui_b)
{ {
if (ui_a == 0 || ui_b == 0) { if (ui_a == 0 || ui_b == 0) {
return 0U; return 0U;
@@ -70,7 +70,7 @@ static inline u32 nvgpu_secure_mult_u32(u32 ui_a, u32 ui_b)
} }
} }
static inline u64 nvgpu_secure_mult_u64(u64 ul_a, u64 ul_b) static inline u64 nvgpu_safe_mult_u64(u64 ul_a, u64 ul_b)
{ {
if (ul_a == 0 || ul_b == 0) { if (ul_a == 0 || ul_b == 0) {
return 0UL; return 0UL;
@@ -81,7 +81,7 @@ static inline u64 nvgpu_secure_mult_u64(u64 ul_a, u64 ul_b)
} }
} }
static inline u32 nvgpu_secure_cast_u64_to_u32(u64 ul_a) static inline u32 nvgpu_safe_cast_u64_to_u32(u64 ul_a)
{ {
if (ul_a > UINT_MAX) { if (ul_a > UINT_MAX) {
BUG(); BUG();
@@ -90,9 +90,9 @@ static inline u32 nvgpu_secure_cast_u64_to_u32(u64 ul_a)
} }
} }
static inline u32 nvgpu_secure_cast_bool_to_u32(bool bl_a) static inline u32 nvgpu_safe_cast_bool_to_u32(bool bl_a)
{ {
return bl_a == true ? 1U : 0U; return bl_a == true ? 1U : 0U;
} }
#endif /* NVGPU_SECURE_OPS_H */ #endif /* NVGPU_SAFE_OPS_H */