diff --git a/arch/nvgpu-common.yaml b/arch/nvgpu-common.yaml index 26331a743..5ba1576b6 100644 --- a/arch/nvgpu-common.yaml +++ b/arch/nvgpu-common.yaml @@ -14,7 +14,7 @@ nvgpu: owner: Alex W sources: [ include/nvgpu/gk20a.h, include/nvgpu/nvgpu_common.h, - include/nvgpu/secure_ops.h ] + include/nvgpu/safe_ops.h ] bios: safe: yes diff --git a/drivers/gpu/nvgpu/common/gr/ctx.c b/drivers/gpu/nvgpu/common/gr/ctx.c index e87e6b401..633b28d4c 100644 --- a/drivers/gpu/nvgpu/common/gr/ctx.c +++ b/drivers/gpu/nvgpu/common/gr/ctx.c @@ -21,7 +21,7 @@ */ #include -#include +#include #include #include #include @@ -579,7 +579,7 @@ int nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g, if (g->ops.gr.ctxsw_prog.set_pmu_options_boost_clock_frequencies != NULL) { g->ops.gr.ctxsw_prog.set_pmu_options_boost_clock_frequencies(g, - mem, nvgpu_secure_cast_bool_to_u32(gr_ctx->boosted_ctx)); + mem, nvgpu_safe_cast_bool_to_u32(gr_ctx->boosted_ctx)); } nvgpu_log(g, gpu_dbg_info, "write patch count = %d", @@ -640,10 +640,10 @@ void nvgpu_gr_ctx_patch_write(struct gk20a *g, { if (patch) { u32 patch_slot = - nvgpu_secure_mult_u32(gr_ctx->patch_ctx.data_count, + nvgpu_safe_mult_u32(gr_ctx->patch_ctx.data_count, PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY); u64 patch_slot_max = - nvgpu_secure_sub_u64( + nvgpu_safe_sub_u64( PATCH_CTX_ENTRIES_FROM_SIZE(gr_ctx->patch_ctx.mem.size), PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY); diff --git a/drivers/gpu/nvgpu/common/gr/fs_state.c b/drivers/gpu/nvgpu/common/gr/fs_state.c index 151125c61..f43f5aaad 100644 --- a/drivers/gpu/nvgpu/common/gr/fs_state.c +++ b/drivers/gpu/nvgpu/common/gr/fs_state.c @@ -21,7 +21,7 @@ */ #include -#include +#include #include #include @@ -59,7 +59,7 @@ static void gr_load_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config) pes++) { pes_tpc_mask |= nvgpu_gr_config_get_pes_tpc_mask( config, gpc, pes) << - nvgpu_secure_mult_u32(num_tpc_per_gpc, gpc); + nvgpu_safe_mult_u32(num_tpc_per_gpc, gpc); } } @@ -69,11 +69,11 @@ static void gr_load_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config) if ((g->tpc_fs_mask_user != 0U) && (g->tpc_fs_mask_user != fuse_tpc_mask) && (fuse_tpc_mask == - nvgpu_secure_sub_u32(BIT32(max_tpc_count), U32(1)))) { + nvgpu_safe_sub_u32(BIT32(max_tpc_count), U32(1)))) { val = g->tpc_fs_mask_user; - val &= nvgpu_secure_sub_u32(BIT32(max_tpc_count), U32(1)); + val &= nvgpu_safe_sub_u32(BIT32(max_tpc_count), U32(1)); /* skip tpc to disable the other tpc cause channel timeout */ - val = nvgpu_secure_sub_u32(BIT32(hweight32(val)), U32(1)); + val = nvgpu_safe_sub_u32(BIT32(hweight32(val)), U32(1)); pes_tpc_mask = val; } g->ops.gr.init.tpc_mask(g, 0, pes_tpc_mask); @@ -131,9 +131,9 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config) if ((g->tpc_fs_mask_user != 0U) && (fuse_tpc_mask == - nvgpu_secure_sub_u32(BIT32(max_tpc_cnt), U32(1)))) { + nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1)))) { u32 val = g->tpc_fs_mask_user; - val &= nvgpu_secure_sub_u32(BIT32(max_tpc_cnt), U32(1)); + val &= nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1)); tpc_cnt = (u32)hweight32(val); } g->ops.gr.init.cwd_gpcs_tpcs_num(g, gpc_cnt, tpc_cnt); diff --git a/drivers/gpu/nvgpu/common/gr/gr.c b/drivers/gpu/nvgpu/common/gr/gr.c index 87aa69c7f..5c7fd585f 100644 --- a/drivers/gpu/nvgpu/common/gr/gr.c +++ b/drivers/gpu/nvgpu/common/gr/gr.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include @@ -115,7 +115,7 @@ u32 nvgpu_gr_get_no_of_sm(struct gk20a *g) u32 nvgpu_gr_gpc_offset(struct gk20a *g, u32 gpc) { u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); - u32 gpc_offset = nvgpu_secure_mult_u32(gpc_stride , gpc); + u32 gpc_offset = nvgpu_safe_mult_u32(gpc_stride , gpc); return gpc_offset; } @@ -124,7 +124,7 @@ u32 nvgpu_gr_tpc_offset(struct gk20a *g, u32 tpc) { u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); - u32 tpc_offset = nvgpu_secure_mult_u32(tpc_in_gpc_stride, tpc); + u32 tpc_offset = nvgpu_safe_mult_u32(tpc_in_gpc_stride, tpc); return tpc_offset; } diff --git a/drivers/gpu/nvgpu/common/gr/obj_ctx.c b/drivers/gpu/nvgpu/common/gr/obj_ctx.c index 611717aa9..8f83d4b03 100644 --- a/drivers/gpu/nvgpu/common/gr/obj_ctx.c +++ b/drivers/gpu/nvgpu/common/gr/obj_ctx.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include "obj_ctx_priv.h" @@ -298,7 +298,7 @@ int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g, /* global pagepool buffer */ addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_PAGEPOOL_VA); - size = nvgpu_secure_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size( + size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size( global_ctx_buffer, NVGPU_GR_GLOBAL_CTX_PAGEPOOL)); g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, patch, @@ -306,7 +306,7 @@ int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g, /* global bundle cb */ addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_CIRCULAR_VA); - size = nvgpu_secure_cast_u64_to_u32( + size = nvgpu_safe_cast_u64_to_u32( g->ops.gr.init.get_bundle_cb_default_size(g)); g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); @@ -593,7 +593,7 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g, nvgpu_gr_ctx_set_size(gr_ctx_desc, NVGPU_GR_CTX_PATCH_CTX, - nvgpu_secure_mult_u32( + nvgpu_safe_mult_u32( g->ops.gr.init.get_patch_slots(g, config), PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY)); diff --git a/drivers/gpu/nvgpu/hal/gr/ctxsw_prog/ctxsw_prog_gm20b.c b/drivers/gpu/nvgpu/hal/gr/ctxsw_prog/ctxsw_prog_gm20b.c index 95e76cb13..cc09f375e 100644 --- a/drivers/gpu/nvgpu/hal/gr/ctxsw_prog/ctxsw_prog_gm20b.c +++ b/drivers/gpu/nvgpu/hal/gr/ctxsw_prog/ctxsw_prog_gm20b.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include "ctxsw_prog_gm20b.h" @@ -185,7 +185,7 @@ void gm20b_ctxsw_prog_set_pc_sampling(struct gk20a *g, data &= ~ctxsw_prog_main_image_pm_pc_sampling_m(); data |= ctxsw_prog_main_image_pm_pc_sampling_f( - nvgpu_secure_cast_bool_to_u32(enable)); + nvgpu_safe_cast_bool_to_u32(enable)); nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_o(), data); } @@ -282,7 +282,7 @@ u32 gm20b_ctxsw_prog_hw_get_ts_tag_invalid_timestamp(void) u32 gm20b_ctxsw_prog_hw_get_ts_tag(u64 ts) { return ctxsw_prog_record_timestamp_timestamp_hi_tag_v( - nvgpu_secure_cast_u64_to_u32(ts >> 32)); + nvgpu_safe_cast_u64_to_u32(ts >> 32)); } u64 gm20b_ctxsw_prog_hw_record_ts_timestamp(u64 ts) diff --git a/drivers/gpu/nvgpu/include/nvgpu/secure_ops.h b/drivers/gpu/nvgpu/include/nvgpu/safe_ops.h similarity index 76% rename from drivers/gpu/nvgpu/include/nvgpu/secure_ops.h rename to drivers/gpu/nvgpu/include/nvgpu/safe_ops.h index eb7e92c98..2946a5d75 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/secure_ops.h +++ b/drivers/gpu/nvgpu/include/nvgpu/safe_ops.h @@ -20,10 +20,10 @@ * DEALINGS IN THE SOFTWARE. */ -#ifndef NVGPU_SECURE_OPS_H -#define NVGPU_SECURE_OPS_H +#ifndef NVGPU_SAFE_OPS_H +#define NVGPU_SAFE_OPS_H -static inline u32 nvgpu_secure_add_u32(u32 ui_a, u32 ui_b) +static inline u32 nvgpu_safe_add_u32(u32 ui_a, u32 ui_b) { if (UINT_MAX - ui_a < ui_b) { BUG(); @@ -32,7 +32,7 @@ static inline u32 nvgpu_secure_add_u32(u32 ui_a, u32 ui_b) } } -static inline u64 nvgpu_secure_add_u64(u64 ul_a, u64 ul_b) +static inline u64 nvgpu_safe_add_u64(u64 ul_a, u64 ul_b) { if (ULONG_MAX - ul_a < ul_b) { BUG(); @@ -41,7 +41,7 @@ static inline u64 nvgpu_secure_add_u64(u64 ul_a, u64 ul_b) } } -static inline u32 nvgpu_secure_sub_u32(u32 ui_a, u32 ui_b) +static inline u32 nvgpu_safe_sub_u32(u32 ui_a, u32 ui_b) { if (ui_a < ui_b) { BUG(); @@ -50,7 +50,7 @@ static inline u32 nvgpu_secure_sub_u32(u32 ui_a, u32 ui_b) } } -static inline u64 nvgpu_secure_sub_u64(u64 ul_a, u64 ul_b) +static inline u64 nvgpu_safe_sub_u64(u64 ul_a, u64 ul_b) { if (ul_a < ul_b) { BUG(); @@ -59,7 +59,7 @@ static inline u64 nvgpu_secure_sub_u64(u64 ul_a, u64 ul_b) } } -static inline u32 nvgpu_secure_mult_u32(u32 ui_a, u32 ui_b) +static inline u32 nvgpu_safe_mult_u32(u32 ui_a, u32 ui_b) { if (ui_a == 0 || ui_b == 0) { return 0U; @@ -70,7 +70,7 @@ static inline u32 nvgpu_secure_mult_u32(u32 ui_a, u32 ui_b) } } -static inline u64 nvgpu_secure_mult_u64(u64 ul_a, u64 ul_b) +static inline u64 nvgpu_safe_mult_u64(u64 ul_a, u64 ul_b) { if (ul_a == 0 || ul_b == 0) { return 0UL; @@ -81,7 +81,7 @@ static inline u64 nvgpu_secure_mult_u64(u64 ul_a, u64 ul_b) } } -static inline u32 nvgpu_secure_cast_u64_to_u32(u64 ul_a) +static inline u32 nvgpu_safe_cast_u64_to_u32(u64 ul_a) { if (ul_a > UINT_MAX) { BUG(); @@ -90,9 +90,9 @@ static inline u32 nvgpu_secure_cast_u64_to_u32(u64 ul_a) } } -static inline u32 nvgpu_secure_cast_bool_to_u32(bool bl_a) +static inline u32 nvgpu_safe_cast_bool_to_u32(bool bl_a) { return bl_a == true ? 1U : 0U; } -#endif /* NVGPU_SECURE_OPS_H */ +#endif /* NVGPU_SAFE_OPS_H */