diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c index 92dab45be..9b9d8142a 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c @@ -683,6 +683,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, u32 sub_wpr_header; #endif u32 wpr_offset; + u32 flcn_cnt; /* * Start with an array of WPR headers at the base of the WPR. @@ -690,8 +691,9 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, * read of this array and cache it internally so it's OK to pack these. * Also, we add 1 to the falcon count to indicate the end of the array. */ + flcn_cnt = U32(plsfm->managed_flcn_cnt); wpr_offset = nvgpu_safe_mult_u32(U32(sizeof(struct lsf_wpr_header)), - nvgpu_safe_add_u32(U32(plsfm->managed_flcn_cnt), U32(1))); + nvgpu_safe_add_u32(flcn_cnt, U32(1))); #ifdef CONFIG_NVGPU_DGPU if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) { @@ -919,14 +921,13 @@ static int lsfm_init_wpr_contents(struct gk20a *g, { struct lsfm_managed_ucode_img *pnode = plsfm->ucode_img_list; struct lsf_wpr_header last_wpr_hdr; - u32 i; + u32 i = 0; u64 tmp; int err = 0; /* The WPR array is at the base of the WPR */ pnode = plsfm->ucode_img_list; (void) memset(&last_wpr_hdr, MEMSET_VALUE, sizeof(struct lsf_wpr_header)); - i = 0; #ifdef CONFIG_NVGPU_DGPU if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) { diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct.h b/drivers/gpu/nvgpu/common/acr/acr_blob_construct.h index 29c5e4254..c6beddcac 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct.h +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -51,7 +51,7 @@ struct ls_falcon_ucode_desc { u32 app_resident_data_size; u32 nb_imem_overlays; u32 nb_dmem_overlays; - struct {u32 start; u32 size; } load_ovl[64]; + struct {u32 start; u32 size; } load_ovl[UCODE_NB_MAX_DATE_LENGTH]; u32 compressed; }; diff --git a/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c b/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c index 5e8faacc9..5aa0a3e09 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c +++ b/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -126,6 +126,7 @@ static void acr_ucode_patch_sig(struct gk20a *g, struct nvgpu_acr *acr = g->acr; #endif unsigned int i, j, *p_sig; + const u32 dmem_word_size = 4U; nvgpu_acr_dbg(g, " "); if (!g->ops.pmu.is_debug_mode_enabled(g)) { @@ -143,11 +144,13 @@ static void acr_ucode_patch_sig(struct gk20a *g, #endif /* Patching logic:*/ - sig_size = sig_size / 4U; - for (i = 0U; i < (sizeof(*p_patch_loc)>>2U); i++) { + sig_size = sig_size / dmem_word_size; + for (i = 0U; i < (sizeof(*p_patch_loc) / dmem_word_size); i++) { for (j = 0U; j < sig_size; j++) { - p_img[nvgpu_safe_add_u32((p_patch_loc[i]>>2U), j)] = - p_sig[nvgpu_safe_add_u32((p_patch_ind[i]<<2U), j)]; + p_img[nvgpu_safe_add_u32( + (p_patch_loc[i] / dmem_word_size), j)] = + p_sig[nvgpu_safe_add_u32( + (p_patch_ind[i] * dmem_word_size), j)]; } } } diff --git a/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c b/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c index 18e6f7a59..0e258c3ac 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c +++ b/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -35,8 +35,9 @@ #include "acr_bootstrap.h" #include "acr_sw_gv11b.h" -#define RECOVERY_UCODE_BLOB_SIZE (0U) -#define WPR_OFFSET (0U) +#define RECOVERY_UCODE_BLOB_SIZE (0U) +#define WPR_OFFSET (0U) +#define ACR_REGIONS (1U) static int gv11b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr) { @@ -61,6 +62,7 @@ static int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g, struct flcn_acr_desc *acr_dmem_desc; u32 *acr_ucode_header = NULL; u32 *acr_ucode_data = NULL; + const u32 acr_desc_offset = 2U; nvgpu_log_fn(g, " "); #ifdef CONFIG_NVGPU_NON_FUSA @@ -81,7 +83,7 @@ static int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g, /* Patch WPR info to ucode */ acr_dmem_desc = (struct flcn_acr_desc *)(void *) - &(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]); + &(((u8 *)acr_ucode_data)[acr_ucode_header[acr_desc_offset]]); acr_desc->acr_dmem_desc = acr_dmem_desc; @@ -90,7 +92,7 @@ static int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g, nvgpu_assert(g->acr->ucode_blob.size <= U32_MAX); acr_dmem_desc->nonwpr_ucode_blob_size = (u32)g->acr->ucode_blob.size; - acr_dmem_desc->regions.no_regions = 1U; + acr_dmem_desc->regions.no_regions = ACR_REGIONS; acr_dmem_desc->wpr_offset = WPR_OFFSET; } diff --git a/drivers/gpu/nvgpu/common/acr/nvgpu_acr_interface.h b/drivers/gpu/nvgpu/common/acr/nvgpu_acr_interface.h index a5f622814..47e3d26d7 100644 --- a/drivers/gpu/nvgpu/common/acr/nvgpu_acr_interface.h +++ b/drivers/gpu/nvgpu/common/acr/nvgpu_acr_interface.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -355,6 +355,7 @@ struct lsf_lsb_header { u32 flags; }; +#define FLCN_SIG_SIZE (4U) /** @} */ /** @@ -369,12 +370,12 @@ struct lsf_lsb_header { */ struct flcn_bl_dmem_desc { /** Should be always first element */ - u32 reserved[4]; + u32 reserved[FLCN_SIG_SIZE]; /** * Signature should follow reserved 16B signature for secure code. * 0s if no secure code */ - u32 signature[4]; + u32 signature[FLCN_SIG_SIZE]; /** * Type of memory-aperture DMA index used by the bootloader * while loading code/data. @@ -513,6 +514,8 @@ struct flcn_acr_regions { struct flcn_acr_region_prop region_props[NVGPU_FLCN_ACR_MAX_REGIONS]; }; +#define DMEM_WORD_SIZE 4U +#define DUMMY_SPACE_SIZE 4U /** * The descriptor used by ACR HS ucode to figure out the * WPR & non-WPR blob details. @@ -528,10 +531,10 @@ struct flcn_acr_desc { * NOTE: This has to be the first member always. */ union { - u32 reserved_dmem[(LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE/4)]; + u32 reserved_dmem[(LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE/DMEM_WORD_SIZE)]; } ucode_reserved_space; /** Signature of ACR ucode. */ - u32 signatures[4]; + u32 signatures[FLCN_SIG_SIZE]; /** * WPR Region ID holding the WPR header and its details * @@ -564,7 +567,7 @@ struct flcn_acr_desc { */ u64 nonwpr_ucode_blob_start; /** dummy space, not used by iGPU */ - u32 dummy[4]; + u32 dummy[DUMMY_SPACE_SIZE]; }; struct flcn2_acr_desc {