mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: fix MISRA violations in Posix unit
Fix violations of MISRA rule 5.4 in Posix unit. JIRA NVGPU-6534 Change-Id: I9471e5fca913ca8cc19403998fdbe5450fb49879 Signed-off-by: ajesh <akv@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2488184 (cherry picked from commit f9bc21ca8d96e9c531a1b0077cfe1e78502e7ee5) Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2491855 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -203,21 +203,21 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
|
|||||||
}
|
}
|
||||||
|
|
||||||
p_img->desc->bootloader_start_offset = fecs->boot.offset;
|
p_img->desc->bootloader_start_offset = fecs->boot.offset;
|
||||||
p_img->desc->bootloader_size = ALIGN(fecs->boot.size,
|
p_img->desc->bootloader_size = NVGPU_ALIGN(fecs->boot.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT);
|
LSF_DATA_SIZE_ALIGNMENT);
|
||||||
p_img->desc->bootloader_imem_offset = fecs->boot_imem_offset;
|
p_img->desc->bootloader_imem_offset = fecs->boot_imem_offset;
|
||||||
p_img->desc->bootloader_entry_point = fecs->boot_entry;
|
p_img->desc->bootloader_entry_point = fecs->boot_entry;
|
||||||
|
|
||||||
tmp_size = nvgpu_safe_add_u32(ALIGN(fecs->boot.size,
|
tmp_size = nvgpu_safe_add_u32(NVGPU_ALIGN(fecs->boot.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT),
|
LSF_DATA_SIZE_ALIGNMENT),
|
||||||
ALIGN(fecs->code.size,
|
NVGPU_ALIGN(fecs->code.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT));
|
LSF_DATA_SIZE_ALIGNMENT));
|
||||||
p_img->desc->image_size = nvgpu_safe_add_u32(tmp_size,
|
p_img->desc->image_size = nvgpu_safe_add_u32(tmp_size,
|
||||||
ALIGN(fecs->data.size,
|
NVGPU_ALIGN(fecs->data.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT));
|
LSF_DATA_SIZE_ALIGNMENT));
|
||||||
p_img->desc->app_size = nvgpu_safe_add_u32(ALIGN(fecs->code.size,
|
p_img->desc->app_size = nvgpu_safe_add_u32(NVGPU_ALIGN(fecs->code.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT),
|
LSF_DATA_SIZE_ALIGNMENT),
|
||||||
ALIGN(fecs->data.size,
|
NVGPU_ALIGN(fecs->data.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT));
|
LSF_DATA_SIZE_ALIGNMENT));
|
||||||
p_img->desc->app_start_offset = fecs->code.offset;
|
p_img->desc->app_start_offset = fecs->code.offset;
|
||||||
p_img->desc->app_imem_offset = APP_IMEM_OFFSET;
|
p_img->desc->app_imem_offset = APP_IMEM_OFFSET;
|
||||||
@@ -312,42 +312,42 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
|
|||||||
}
|
}
|
||||||
|
|
||||||
p_img->desc->bootloader_start_offset = BL_START_OFFSET;
|
p_img->desc->bootloader_start_offset = BL_START_OFFSET;
|
||||||
p_img->desc->bootloader_size = ALIGN(gpccs->boot.size,
|
p_img->desc->bootloader_size = NVGPU_ALIGN(gpccs->boot.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT);
|
LSF_DATA_SIZE_ALIGNMENT);
|
||||||
p_img->desc->bootloader_imem_offset = gpccs->boot_imem_offset;
|
p_img->desc->bootloader_imem_offset = gpccs->boot_imem_offset;
|
||||||
p_img->desc->bootloader_entry_point = gpccs->boot_entry;
|
p_img->desc->bootloader_entry_point = gpccs->boot_entry;
|
||||||
|
|
||||||
tmp_size = nvgpu_safe_add_u32(ALIGN(gpccs->boot.size,
|
tmp_size = nvgpu_safe_add_u32(NVGPU_ALIGN(gpccs->boot.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT),
|
LSF_DATA_SIZE_ALIGNMENT),
|
||||||
ALIGN(gpccs->code.size,
|
NVGPU_ALIGN(gpccs->code.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT));
|
LSF_DATA_SIZE_ALIGNMENT));
|
||||||
|
|
||||||
p_img->desc->image_size = nvgpu_safe_add_u32(tmp_size,
|
p_img->desc->image_size = nvgpu_safe_add_u32(tmp_size,
|
||||||
ALIGN(gpccs->data.size,
|
NVGPU_ALIGN(gpccs->data.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT));
|
LSF_DATA_SIZE_ALIGNMENT));
|
||||||
p_img->desc->app_size =
|
p_img->desc->app_size =
|
||||||
nvgpu_safe_add_u32(ALIGN(gpccs->code.size,
|
nvgpu_safe_add_u32(NVGPU_ALIGN(gpccs->code.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT),
|
LSF_DATA_SIZE_ALIGNMENT),
|
||||||
ALIGN(gpccs->data.size,
|
NVGPU_ALIGN(gpccs->data.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT));
|
LSF_DATA_SIZE_ALIGNMENT));
|
||||||
p_img->desc->app_start_offset = p_img->desc->bootloader_size;
|
p_img->desc->app_start_offset = p_img->desc->bootloader_size;
|
||||||
p_img->desc->app_imem_offset = APP_IMEM_OFFSET;
|
p_img->desc->app_imem_offset = APP_IMEM_OFFSET;
|
||||||
p_img->desc->app_imem_entry = APP_IMEM_ENTRY;
|
p_img->desc->app_imem_entry = APP_IMEM_ENTRY;
|
||||||
p_img->desc->app_dmem_offset = APP_DMEM_OFFSET;
|
p_img->desc->app_dmem_offset = APP_DMEM_OFFSET;
|
||||||
p_img->desc->app_resident_code_offset = APP_RESIDENT_CODE_OFFSET;
|
p_img->desc->app_resident_code_offset = APP_RESIDENT_CODE_OFFSET;
|
||||||
p_img->desc->app_resident_code_size = ALIGN(gpccs->code.size,
|
p_img->desc->app_resident_code_size = NVGPU_ALIGN(gpccs->code.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT);
|
LSF_DATA_SIZE_ALIGNMENT);
|
||||||
p_img->desc->app_resident_data_offset =
|
p_img->desc->app_resident_data_offset =
|
||||||
nvgpu_safe_sub_u32(ALIGN(gpccs->data.offset,
|
nvgpu_safe_sub_u32(NVGPU_ALIGN(gpccs->data.offset,
|
||||||
LSF_DATA_SIZE_ALIGNMENT),
|
LSF_DATA_SIZE_ALIGNMENT),
|
||||||
ALIGN(gpccs->code.offset,
|
NVGPU_ALIGN(gpccs->code.offset,
|
||||||
LSF_DATA_SIZE_ALIGNMENT));
|
LSF_DATA_SIZE_ALIGNMENT));
|
||||||
p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size,
|
p_img->desc->app_resident_data_size = NVGPU_ALIGN(gpccs->data.size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT);
|
LSF_DATA_SIZE_ALIGNMENT);
|
||||||
p_img->data = (u32 *)
|
p_img->data = (u32 *)
|
||||||
(void *)((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon)
|
(void *)((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon)
|
||||||
+ gpccs->boot.offset);
|
+ gpccs->boot.offset);
|
||||||
p_img->data_size = ALIGN(p_img->desc->image_size,
|
p_img->data_size = NVGPU_ALIGN(p_img->desc->image_size,
|
||||||
LSF_DATA_SIZE_ALIGNMENT);
|
LSF_DATA_SIZE_ALIGNMENT);
|
||||||
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
|
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
|
||||||
|
|
||||||
@@ -480,15 +480,15 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
|
|||||||
* the code following it is aligned, but the size in the image
|
* the code following it is aligned, but the size in the image
|
||||||
* desc is not, bloat it up to be on a 256 byte alignment.
|
* desc is not, bloat it up to be on a 256 byte alignment.
|
||||||
*/
|
*/
|
||||||
pnode->lsb_header.bl_code_size = ALIGN(
|
pnode->lsb_header.bl_code_size = NVGPU_ALIGN(
|
||||||
pnode->ucode_img.desc->bootloader_size,
|
pnode->ucode_img.desc->bootloader_size,
|
||||||
LSF_BL_CODE_SIZE_ALIGNMENT);
|
LSF_BL_CODE_SIZE_ALIGNMENT);
|
||||||
full_app_size = nvgpu_safe_add_u32(
|
full_app_size = nvgpu_safe_add_u32(
|
||||||
ALIGN(pnode->ucode_img.desc->app_size,
|
NVGPU_ALIGN(pnode->ucode_img.desc->app_size,
|
||||||
LSF_BL_CODE_SIZE_ALIGNMENT),
|
LSF_BL_CODE_SIZE_ALIGNMENT),
|
||||||
pnode->lsb_header.bl_code_size);
|
pnode->lsb_header.bl_code_size);
|
||||||
|
|
||||||
pnode->lsb_header.ucode_size = nvgpu_safe_add_u32(ALIGN(
|
pnode->lsb_header.ucode_size = nvgpu_safe_add_u32(NVGPU_ALIGN(
|
||||||
pnode->ucode_img.desc->app_resident_data_offset,
|
pnode->ucode_img.desc->app_resident_data_offset,
|
||||||
LSF_BL_CODE_SIZE_ALIGNMENT),
|
LSF_BL_CODE_SIZE_ALIGNMENT),
|
||||||
pnode->lsb_header.bl_code_size);
|
pnode->lsb_header.bl_code_size);
|
||||||
@@ -715,7 +715,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g,
|
|||||||
*/
|
*/
|
||||||
while (pnode != NULL) {
|
while (pnode != NULL) {
|
||||||
/* Align, save off, and include an LSB header size */
|
/* Align, save off, and include an LSB header size */
|
||||||
wpr_offset = ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT);
|
wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT);
|
||||||
pnode->wpr_header.lsb_offset = wpr_offset;
|
pnode->wpr_header.lsb_offset = wpr_offset;
|
||||||
wpr_offset = nvgpu_safe_add_u32(wpr_offset,
|
wpr_offset = nvgpu_safe_add_u32(wpr_offset,
|
||||||
(u32)sizeof(struct lsf_lsb_header));
|
(u32)sizeof(struct lsf_lsb_header));
|
||||||
@@ -724,7 +724,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g,
|
|||||||
* Align, save off, and include the original (static)ucode
|
* Align, save off, and include the original (static)ucode
|
||||||
* image size
|
* image size
|
||||||
*/
|
*/
|
||||||
wpr_offset = ALIGN(wpr_offset, LSF_UCODE_DATA_ALIGNMENT);
|
wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_UCODE_DATA_ALIGNMENT);
|
||||||
pnode->lsb_header.ucode_off = wpr_offset;
|
pnode->lsb_header.ucode_off = wpr_offset;
|
||||||
wpr_offset = nvgpu_safe_add_u32(wpr_offset,
|
wpr_offset = nvgpu_safe_add_u32(wpr_offset,
|
||||||
pnode->ucode_img.data_size);
|
pnode->ucode_img.data_size);
|
||||||
@@ -743,13 +743,13 @@ static int lsf_gen_wpr_requirements(struct gk20a *g,
|
|||||||
* generic one, which is the largest it will will ever be.
|
* generic one, which is the largest it will will ever be.
|
||||||
*/
|
*/
|
||||||
/* Align (size bloat) and save off generic descriptor size*/
|
/* Align (size bloat) and save off generic descriptor size*/
|
||||||
pnode->lsb_header.bl_data_size = ALIGN(
|
pnode->lsb_header.bl_data_size = NVGPU_ALIGN(
|
||||||
nvgpu_safe_cast_u64_to_u32(
|
nvgpu_safe_cast_u64_to_u32(
|
||||||
sizeof(pnode->bl_gen_desc)),
|
sizeof(pnode->bl_gen_desc)),
|
||||||
LSF_BL_DATA_SIZE_ALIGNMENT);
|
LSF_BL_DATA_SIZE_ALIGNMENT);
|
||||||
|
|
||||||
/*Align, save off, and include the additional BL data*/
|
/*Align, save off, and include the additional BL data*/
|
||||||
wpr_offset = ALIGN(wpr_offset, LSF_BL_DATA_ALIGNMENT);
|
wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_BL_DATA_ALIGNMENT);
|
||||||
pnode->lsb_header.bl_data_off = wpr_offset;
|
pnode->lsb_header.bl_data_off = wpr_offset;
|
||||||
wpr_offset = nvgpu_safe_add_u32(wpr_offset,
|
wpr_offset = nvgpu_safe_add_u32(wpr_offset,
|
||||||
pnode->lsb_header.bl_data_size);
|
pnode->lsb_header.bl_data_size);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -101,14 +101,14 @@ int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
|||||||
}
|
}
|
||||||
|
|
||||||
p_img->desc->bootloader_start_offset = fecs->boot.offset;
|
p_img->desc->bootloader_start_offset = fecs->boot.offset;
|
||||||
p_img->desc->bootloader_size = ALIGN(fecs->boot.size, 256U);
|
p_img->desc->bootloader_size = NVGPU_ALIGN(fecs->boot.size, 256U);
|
||||||
p_img->desc->bootloader_imem_offset = fecs->boot_imem_offset;
|
p_img->desc->bootloader_imem_offset = fecs->boot_imem_offset;
|
||||||
p_img->desc->bootloader_entry_point = fecs->boot_entry;
|
p_img->desc->bootloader_entry_point = fecs->boot_entry;
|
||||||
|
|
||||||
p_img->desc->image_size = ALIGN(fecs->boot.size, 256U) +
|
p_img->desc->image_size = NVGPU_ALIGN(fecs->boot.size, 256U) +
|
||||||
ALIGN(fecs->code.size, 256U) + ALIGN(fecs->data.size, 256U);
|
NVGPU_ALIGN(fecs->code.size, 256U) + NVGPU_ALIGN(fecs->data.size, 256U);
|
||||||
p_img->desc->app_size = ALIGN(fecs->code.size, 256U) +
|
p_img->desc->app_size = NVGPU_ALIGN(fecs->code.size, 256U) +
|
||||||
ALIGN(fecs->data.size, 256U);
|
NVGPU_ALIGN(fecs->data.size, 256U);
|
||||||
p_img->desc->app_start_offset = fecs->code.offset;
|
p_img->desc->app_start_offset = fecs->code.offset;
|
||||||
p_img->desc->app_imem_offset = 0;
|
p_img->desc->app_imem_offset = 0;
|
||||||
p_img->desc->app_imem_entry = 0;
|
p_img->desc->app_imem_entry = 0;
|
||||||
@@ -168,28 +168,29 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
|||||||
|
|
||||||
p_img->desc->bootloader_start_offset =
|
p_img->desc->bootloader_start_offset =
|
||||||
0;
|
0;
|
||||||
p_img->desc->bootloader_size = ALIGN(gpccs->boot.size, 256U);
|
p_img->desc->bootloader_size = NVGPU_ALIGN(gpccs->boot.size, 256U);
|
||||||
p_img->desc->bootloader_imem_offset = gpccs->boot_imem_offset;
|
p_img->desc->bootloader_imem_offset = gpccs->boot_imem_offset;
|
||||||
p_img->desc->bootloader_entry_point = gpccs->boot_entry;
|
p_img->desc->bootloader_entry_point = gpccs->boot_entry;
|
||||||
|
|
||||||
p_img->desc->image_size = ALIGN(gpccs->boot.size, 256U) +
|
p_img->desc->image_size = NVGPU_ALIGN(gpccs->boot.size, 256U) +
|
||||||
ALIGN(gpccs->code.size, 256U) + ALIGN(gpccs->data.size, 256U);
|
NVGPU_ALIGN(gpccs->code.size, 256U) +
|
||||||
p_img->desc->app_size =
|
NVGPU_ALIGN(gpccs->data.size, 256U);
|
||||||
ALIGN(gpccs->code.size, 256U) + ALIGN(gpccs->data.size, 256U);
|
p_img->desc->app_size = NVGPU_ALIGN(gpccs->code.size, 256U) +
|
||||||
|
NVGPU_ALIGN(gpccs->data.size, 256U);
|
||||||
p_img->desc->app_start_offset = p_img->desc->bootloader_size;
|
p_img->desc->app_start_offset = p_img->desc->bootloader_size;
|
||||||
p_img->desc->app_imem_offset = 0;
|
p_img->desc->app_imem_offset = 0;
|
||||||
p_img->desc->app_imem_entry = 0;
|
p_img->desc->app_imem_entry = 0;
|
||||||
p_img->desc->app_dmem_offset = 0;
|
p_img->desc->app_dmem_offset = 0;
|
||||||
p_img->desc->app_resident_code_offset = 0;
|
p_img->desc->app_resident_code_offset = 0;
|
||||||
p_img->desc->app_resident_code_size = ALIGN(gpccs->code.size, 256U);
|
p_img->desc->app_resident_code_size = NVGPU_ALIGN(gpccs->code.size, 256U);
|
||||||
p_img->desc->app_resident_data_offset =
|
p_img->desc->app_resident_data_offset =
|
||||||
ALIGN(gpccs->data.offset, 256U) -
|
NVGPU_ALIGN(gpccs->data.offset, 256U) -
|
||||||
ALIGN(gpccs->code.offset, 256U);
|
NVGPU_ALIGN(gpccs->code.offset, 256U);
|
||||||
p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, 256U);
|
p_img->desc->app_resident_data_size = NVGPU_ALIGN(gpccs->data.size, 256U);
|
||||||
p_img->data = (u32 *)
|
p_img->data = (u32 *)
|
||||||
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) +
|
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) +
|
||||||
gpccs->boot.offset);
|
gpccs->boot.offset);
|
||||||
p_img->data_size = ALIGN(p_img->desc->image_size, 256U);
|
p_img->data_size = NVGPU_ALIGN(p_img->desc->image_size, 256U);
|
||||||
p_img->lsf_desc = (struct lsf_ucode_desc_v0 *)lsf_desc;
|
p_img->lsf_desc = (struct lsf_ucode_desc_v0 *)lsf_desc;
|
||||||
nvgpu_acr_dbg(g, "gpccs fw loaded\n");
|
nvgpu_acr_dbg(g, "gpccs fw loaded\n");
|
||||||
nvgpu_release_firmware(g, gpccs_sig);
|
nvgpu_release_firmware(g, gpccs_sig);
|
||||||
@@ -226,13 +227,13 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
|
|||||||
* the code following it is aligned, but the size in the image
|
* the code following it is aligned, but the size in the image
|
||||||
* desc is not, bloat it up to be on a 256 byte alignment.
|
* desc is not, bloat it up to be on a 256 byte alignment.
|
||||||
*/
|
*/
|
||||||
pnode->lsb_header.bl_code_size = ALIGN(
|
pnode->lsb_header.bl_code_size = NVGPU_ALIGN(
|
||||||
pnode->ucode_img.desc->bootloader_size,
|
pnode->ucode_img.desc->bootloader_size,
|
||||||
LSF_BL_CODE_SIZE_ALIGNMENT);
|
LSF_BL_CODE_SIZE_ALIGNMENT);
|
||||||
full_app_size = ALIGN(pnode->ucode_img.desc->app_size,
|
full_app_size = NVGPU_ALIGN(pnode->ucode_img.desc->app_size,
|
||||||
LSF_BL_CODE_SIZE_ALIGNMENT) +
|
LSF_BL_CODE_SIZE_ALIGNMENT) +
|
||||||
pnode->lsb_header.bl_code_size;
|
pnode->lsb_header.bl_code_size;
|
||||||
pnode->lsb_header.ucode_size = ALIGN(
|
pnode->lsb_header.ucode_size = NVGPU_ALIGN(
|
||||||
pnode->ucode_img.desc->app_resident_data_offset,
|
pnode->ucode_img.desc->app_resident_data_offset,
|
||||||
LSF_BL_CODE_SIZE_ALIGNMENT) +
|
LSF_BL_CODE_SIZE_ALIGNMENT) +
|
||||||
pnode->lsb_header.bl_code_size;
|
pnode->lsb_header.bl_code_size;
|
||||||
@@ -362,7 +363,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsf
|
|||||||
*/
|
*/
|
||||||
while (pnode != NULL) {
|
while (pnode != NULL) {
|
||||||
/* Align, save off, and include an LSB header size */
|
/* Align, save off, and include an LSB header size */
|
||||||
wpr_offset = ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT);
|
wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT);
|
||||||
pnode->wpr_header.lsb_offset = wpr_offset;
|
pnode->wpr_header.lsb_offset = wpr_offset;
|
||||||
wpr_offset += (u32)sizeof(struct lsf_lsb_header_v0);
|
wpr_offset += (u32)sizeof(struct lsf_lsb_header_v0);
|
||||||
|
|
||||||
@@ -370,7 +371,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsf
|
|||||||
* Align, save off, and include the original (static)
|
* Align, save off, and include the original (static)
|
||||||
* ucode image size
|
* ucode image size
|
||||||
*/
|
*/
|
||||||
wpr_offset = ALIGN(wpr_offset,
|
wpr_offset = NVGPU_ALIGN(wpr_offset,
|
||||||
LSF_UCODE_DATA_ALIGNMENT);
|
LSF_UCODE_DATA_ALIGNMENT);
|
||||||
pnode->lsb_header.ucode_off = wpr_offset;
|
pnode->lsb_header.ucode_off = wpr_offset;
|
||||||
wpr_offset += pnode->ucode_img.data_size;
|
wpr_offset += pnode->ucode_img.data_size;
|
||||||
@@ -389,12 +390,12 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsf
|
|||||||
* generic one, which is the largest it will will ever be.
|
* generic one, which is the largest it will will ever be.
|
||||||
*/
|
*/
|
||||||
/* Align (size bloat) and save off generic descriptor size */
|
/* Align (size bloat) and save off generic descriptor size */
|
||||||
pnode->lsb_header.bl_data_size = ALIGN(
|
pnode->lsb_header.bl_data_size = NVGPU_ALIGN(
|
||||||
(u32)sizeof(pnode->bl_gen_desc),
|
(u32)sizeof(pnode->bl_gen_desc),
|
||||||
LSF_BL_DATA_SIZE_ALIGNMENT);
|
LSF_BL_DATA_SIZE_ALIGNMENT);
|
||||||
|
|
||||||
/* Align, save off, and include the additional BL data */
|
/* Align, save off, and include the additional BL data */
|
||||||
wpr_offset = ALIGN(wpr_offset,
|
wpr_offset = NVGPU_ALIGN(wpr_offset,
|
||||||
LSF_BL_DATA_ALIGNMENT);
|
LSF_BL_DATA_ALIGNMENT);
|
||||||
pnode->lsb_header.bl_data_off = wpr_offset;
|
pnode->lsb_header.bl_data_off = wpr_offset;
|
||||||
wpr_offset += pnode->lsb_header.bl_data_size;
|
wpr_offset += pnode->lsb_header.bl_data_size;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -70,7 +70,7 @@ static bool engine_mem_queue_has_room(struct nvgpu_engine_mem_queue *queue,
|
|||||||
bool q_rewind = false;
|
bool q_rewind = false;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
size = ALIGN(size, QUEUE_ALIGNMENT);
|
size = NVGPU_ALIGN(size, QUEUE_ALIGNMENT);
|
||||||
|
|
||||||
err = mem_queue_get_head_tail(queue, &q_head, &q_tail);
|
err = mem_queue_get_head_tail(queue, &q_head, &q_tail);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
@@ -117,7 +117,7 @@ static int engine_mem_queue_rewind(struct nvgpu_falcon *flcn,
|
|||||||
goto exit;
|
goto exit;
|
||||||
} else {
|
} else {
|
||||||
queue->position += nvgpu_safe_cast_u32_to_u8(
|
queue->position += nvgpu_safe_cast_u32_to_u8(
|
||||||
ALIGN(U32(cmd.hdr.size), QUEUE_ALIGNMENT));
|
NVGPU_ALIGN(U32(cmd.hdr.size), QUEUE_ALIGNMENT));
|
||||||
nvgpu_log_info(g, "flcn-%d queue-%d, rewinded",
|
nvgpu_log_info(g, "flcn-%d queue-%d, rewinded",
|
||||||
queue->flcn_id, queue->id);
|
queue->flcn_id, queue->id);
|
||||||
}
|
}
|
||||||
@@ -207,7 +207,7 @@ int nvgpu_engine_mem_queue_push(struct nvgpu_falcon *flcn,
|
|||||||
goto unlock_mutex;
|
goto unlock_mutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
|
queue->position += NVGPU_ALIGN(size, QUEUE_ALIGNMENT);
|
||||||
|
|
||||||
err = queue->head(g, queue->id, queue->index,
|
err = queue->head(g, queue->id, queue->index,
|
||||||
&queue->position, QUEUE_SET);
|
&queue->position, QUEUE_SET);
|
||||||
@@ -279,7 +279,7 @@ int nvgpu_engine_mem_queue_pop(struct nvgpu_falcon *flcn,
|
|||||||
goto unlock_mutex;
|
goto unlock_mutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
|
queue->position += NVGPU_ALIGN(size, QUEUE_ALIGNMENT);
|
||||||
|
|
||||||
err = queue->tail(g, queue->id, queue->index,
|
err = queue->tail(g, queue->id, queue->index,
|
||||||
&queue->position, QUEUE_SET);
|
&queue->position, QUEUE_SET);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -244,7 +244,7 @@ static void nvgpu_gr_falcon_init_ctxsw_ucode_segment(
|
|||||||
p_seg->offset = *offset;
|
p_seg->offset = *offset;
|
||||||
p_seg->size = size;
|
p_seg->size = size;
|
||||||
ucode_offset = nvgpu_safe_add_u32(*offset, size);
|
ucode_offset = nvgpu_safe_add_u32(*offset, size);
|
||||||
*offset = ALIGN(ucode_offset, 256U);
|
*offset = NVGPU_ALIGN(ucode_offset, 256U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvgpu_gr_falcon_init_ctxsw_ucode_segments(
|
static void nvgpu_gr_falcon_init_ctxsw_ucode_segments(
|
||||||
@@ -252,7 +252,7 @@ static void nvgpu_gr_falcon_init_ctxsw_ucode_segments(
|
|||||||
struct nvgpu_ctxsw_bootloader_desc *bootdesc,
|
struct nvgpu_ctxsw_bootloader_desc *bootdesc,
|
||||||
u32 code_size, u32 data_size)
|
u32 code_size, u32 data_size)
|
||||||
{
|
{
|
||||||
u32 boot_size = ALIGN(bootdesc->size, sizeof(u32));
|
u32 boot_size = NVGPU_ALIGN(bootdesc->size, sizeof(u32));
|
||||||
|
|
||||||
segments->boot_entry = bootdesc->entry_point;
|
segments->boot_entry = bootdesc->entry_point;
|
||||||
segments->boot_imem_offset = bootdesc->imem_offset;
|
segments->boot_imem_offset = bootdesc->imem_offset;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -320,7 +320,7 @@ static int add_ctxsw_buffer_map_entries_gpcs(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*offset = ALIGN(*offset, 256U);
|
*offset = NVGPU_ALIGN(*offset, 256U);
|
||||||
|
|
||||||
base = (g->ops.perf.get_pmmgpc_per_chiplet_offset() * gpc_num);
|
base = (g->ops.perf.get_pmmgpc_per_chiplet_offset() * gpc_num);
|
||||||
if (add_ctxsw_buffer_map_entries(map,
|
if (add_ctxsw_buffer_map_entries(map,
|
||||||
@@ -329,7 +329,7 @@ static int add_ctxsw_buffer_map_entries_gpcs(struct gk20a *g,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
*offset = ALIGN(*offset, 256U);
|
*offset = NVGPU_ALIGN(*offset, 256U);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -446,7 +446,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g,
|
|||||||
|
|
||||||
/* Add entries from _LIST_nv_perf_sys_control_ctx_reg*/
|
/* Add entries from _LIST_nv_perf_sys_control_ctx_reg*/
|
||||||
if (nvgpu_netlist_get_perf_sys_control_ctxsw_regs(g)->count > 0U) {
|
if (nvgpu_netlist_get_perf_sys_control_ctxsw_regs(g)->count > 0U) {
|
||||||
offset = ALIGN(offset, 256U);
|
offset = NVGPU_ALIGN(offset, 256U);
|
||||||
|
|
||||||
ret = add_ctxsw_buffer_map_entries(map,
|
ret = add_ctxsw_buffer_map_entries(map,
|
||||||
nvgpu_netlist_get_perf_sys_control_ctxsw_regs(g),
|
nvgpu_netlist_get_perf_sys_control_ctxsw_regs(g),
|
||||||
@@ -469,7 +469,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = ALIGN(offset, 256U);
|
offset = NVGPU_ALIGN(offset, 256U);
|
||||||
|
|
||||||
/* Add entries from _LIST_nv_perf_pma_control_ctx_reg*/
|
/* Add entries from _LIST_nv_perf_pma_control_ctx_reg*/
|
||||||
ret = add_ctxsw_buffer_map_entries(map,
|
ret = add_ctxsw_buffer_map_entries(map,
|
||||||
@@ -479,7 +479,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = ALIGN(offset, 256U);
|
offset = NVGPU_ALIGN(offset, 256U);
|
||||||
|
|
||||||
/* Add entries from _LIST_nv_perf_fbp_ctx_regs */
|
/* Add entries from _LIST_nv_perf_fbp_ctx_regs */
|
||||||
if (add_ctxsw_buffer_map_entries_subunits(map,
|
if (add_ctxsw_buffer_map_entries_subunits(map,
|
||||||
@@ -529,7 +529,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = ALIGN(offset, 256U);
|
offset = NVGPU_ALIGN(offset, 256U);
|
||||||
|
|
||||||
/* Add entries from _LIST_nv_perf_fbp_control_ctx_regs */
|
/* Add entries from _LIST_nv_perf_fbp_control_ctx_regs */
|
||||||
if (add_ctxsw_buffer_map_entries_subunits(map,
|
if (add_ctxsw_buffer_map_entries_subunits(map,
|
||||||
@@ -541,7 +541,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = ALIGN(offset, 256U);
|
offset = NVGPU_ALIGN(offset, 256U);
|
||||||
|
|
||||||
/* Add GPC entries */
|
/* Add GPC entries */
|
||||||
if (add_ctxsw_buffer_map_entries_gpcs(g, map, &count, &offset,
|
if (add_ctxsw_buffer_map_entries_gpcs(g, map, &count, &offset,
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ static void balloc_compute_max_order(struct nvgpu_buddy_allocator *a)
|
|||||||
*/
|
*/
|
||||||
static void balloc_allocator_align(struct nvgpu_buddy_allocator *a)
|
static void balloc_allocator_align(struct nvgpu_buddy_allocator *a)
|
||||||
{
|
{
|
||||||
a->start = ALIGN(a->base, a->blk_size);
|
a->start = NVGPU_ALIGN(a->base, a->blk_size);
|
||||||
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 10_3), "Bug 2277532")
|
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 10_3), "Bug 2277532")
|
||||||
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 14_4), "Bug 2277532")
|
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 14_4), "Bug 2277532")
|
||||||
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 15_6), "Bug 2277532")
|
NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 15_6), "Bug 2277532")
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -664,7 +664,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages(
|
|||||||
u64 pages;
|
u64 pages;
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
|
|
||||||
pages = ALIGN(len, a->page_size) >> a->page_shift;
|
pages = NVGPU_ALIGN(len, a->page_size) >> a->page_shift;
|
||||||
|
|
||||||
alloc = do_nvgpu_alloc_pages(a, pages);
|
alloc = do_nvgpu_alloc_pages(a, pages);
|
||||||
if (alloc == NULL) {
|
if (alloc == NULL) {
|
||||||
@@ -839,7 +839,7 @@ static u64 nvgpu_page_palloc_fixed(struct nvgpu_allocator *na,
|
|||||||
u64 aligned_len, pages;
|
u64 aligned_len, pages;
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
|
|
||||||
aligned_len = ALIGN(len, a->page_size);
|
aligned_len = NVGPU_ALIGN(len, a->page_size);
|
||||||
pages = aligned_len >> a->page_shift;
|
pages = aligned_len >> a->page_shift;
|
||||||
|
|
||||||
alloc_lock(na);
|
alloc_lock(na);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -202,7 +202,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
|
|||||||
* PD must have mem_offs be 0 for the invalidate code to work, so we
|
* PD must have mem_offs be 0 for the invalidate code to work, so we
|
||||||
* can't use the PD cache.
|
* can't use the PD cache.
|
||||||
*/
|
*/
|
||||||
pdb_size = ALIGN(pd_get_size(&vm->mmu_levels[0], &attrs), NVGPU_CPU_PAGE_SIZE);
|
pdb_size = NVGPU_ALIGN(pd_get_size(&vm->mmu_levels[0], &attrs), NVGPU_CPU_PAGE_SIZE);
|
||||||
|
|
||||||
err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size);
|
err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
@@ -227,7 +227,7 @@ static u64 nvgpu_align_map_length(struct vm_gk20a *vm, u64 length,
|
|||||||
{
|
{
|
||||||
u64 page_size = vm->gmmu_page_sizes[attrs->pgsz];
|
u64 page_size = vm->gmmu_page_sizes[attrs->pgsz];
|
||||||
|
|
||||||
return ALIGN(length, page_size);
|
return NVGPU_ALIGN(length, page_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 pd_entries(const struct gk20a_mmu_level *l,
|
static u32 pd_entries(const struct gk20a_mmu_level *l,
|
||||||
|
|||||||
@@ -261,7 +261,7 @@ u64 nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Be certain we round up to page_size if needed */
|
/* Be certain we round up to page_size if needed */
|
||||||
size = ALIGN(size, page_size);
|
size = NVGPU_ALIGN(size, page_size);
|
||||||
|
|
||||||
addr = nvgpu_alloc_pte(vma, size, page_size);
|
addr = nvgpu_alloc_pte(vma, size, page_size);
|
||||||
if (addr == 0ULL) {
|
if (addr == 0ULL) {
|
||||||
@@ -1402,7 +1402,7 @@ static int nvgpu_vm_new_mapping(struct vm_gk20a *vm,
|
|||||||
min_t(u64, binfo_ptr->size, align));
|
min_t(u64, binfo_ptr->size, align));
|
||||||
}
|
}
|
||||||
map_size = (map_size != 0ULL) ? map_size : binfo_ptr->size;
|
map_size = (map_size != 0ULL) ? map_size : binfo_ptr->size;
|
||||||
map_size = ALIGN(map_size, SZ_4K);
|
map_size = NVGPU_ALIGN(map_size, SZ_4K);
|
||||||
|
|
||||||
if ((map_size > binfo_ptr->size) ||
|
if ((map_size > binfo_ptr->size) ||
|
||||||
(phys_offset > (binfo_ptr->size - map_size))) {
|
(phys_offset > (binfo_ptr->size - map_size))) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -36,7 +36,7 @@ void nvgpu_pmu_allocator_dmem_init(struct gk20a *g,
|
|||||||
if (!nvgpu_alloc_initialized(dmem)) {
|
if (!nvgpu_alloc_initialized(dmem)) {
|
||||||
/* Align start and end addresses */
|
/* Align start and end addresses */
|
||||||
u32 start =
|
u32 start =
|
||||||
ALIGN(U32(fw_ops->get_init_msg_sw_mngd_area_off(init)),
|
NVGPU_ALIGN(U32(fw_ops->get_init_msg_sw_mngd_area_off(init)),
|
||||||
PMU_DMEM_ALLOC_ALIGNMENT);
|
PMU_DMEM_ALLOC_ALIGNMENT);
|
||||||
u32 end = (U32(fw_ops->get_init_msg_sw_mngd_area_off(init)) +
|
u32 end = (U32(fw_ops->get_init_msg_sw_mngd_area_off(init)) +
|
||||||
U32(fw_ops->get_init_msg_sw_mngd_area_size(init))) &
|
U32(fw_ops->get_init_msg_sw_mngd_area_size(init))) &
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -389,7 +389,7 @@ static int pmu_process_init_msg_dmem(struct gk20a *g, struct nvgpu_pmu *pmu,
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
tail += ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT);
|
tail += NVGPU_ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT);
|
||||||
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET);
|
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET);
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -148,7 +148,7 @@ static int sec2_process_init_msg(struct nvgpu_sec2 *sec2,
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
tail += ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT);
|
tail += NVGPU_ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT);
|
||||||
g->ops.sec2.msgq_tail(g, sec2, &tail, QUEUE_SET);
|
g->ops.sec2.msgq_tail(g, sec2, &tail, QUEUE_SET);
|
||||||
|
|
||||||
sec2_init = &msg->msg.init.sec2_init;
|
sec2_init = &msg->msg.init.sec2_init;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -31,7 +31,7 @@ int nvgpu_sec2_dmem_allocator_init(struct gk20a *g,
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
if (!nvgpu_alloc_initialized(dmem)) {
|
if (!nvgpu_alloc_initialized(dmem)) {
|
||||||
/* Align start and end addresses */
|
/* Align start and end addresses */
|
||||||
u32 start = ALIGN(sec2_init->nv_managed_area_offset,
|
u32 start = NVGPU_ALIGN(sec2_init->nv_managed_area_offset,
|
||||||
PMU_DMEM_ALLOC_ALIGNMENT);
|
PMU_DMEM_ALLOC_ALIGNMENT);
|
||||||
|
|
||||||
u32 end = (sec2_init->nv_managed_area_offset +
|
u32 end = (sec2_init->nv_managed_area_offset +
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Virtualized GPU Memory Management
|
* Virtualized GPU Memory Management
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -266,7 +266,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
|
|||||||
struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
|
struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
|
||||||
struct tegra_vgpu_mem_desc *mem_desc;
|
struct tegra_vgpu_mem_desc *mem_desc;
|
||||||
u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
|
u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
|
||||||
u64 buffer_size = ALIGN(size, SZ_4K);
|
u64 buffer_size = NVGPU_ALIGN(size, SZ_4K);
|
||||||
u64 space_to_skip = buffer_offset;
|
u64 space_to_skip = buffer_offset;
|
||||||
u32 mem_desc_count = 0, i;
|
u32 mem_desc_count = 0, i;
|
||||||
void *handle = NULL;
|
void *handle = NULL;
|
||||||
@@ -314,7 +314,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
|
|||||||
sgl = sgt->sgl;
|
sgl = sgt->sgl;
|
||||||
|
|
||||||
/* Align size to page size */
|
/* Align size to page size */
|
||||||
size = ALIGN(size, page_size);
|
size = NVGPU_ALIGN(size, page_size);
|
||||||
|
|
||||||
while (sgl) {
|
while (sgl) {
|
||||||
u64 phys_addr;
|
u64 phys_addr;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -203,7 +203,7 @@ NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 11_3), "TID-415")
|
|||||||
nvgpu_falcon_writel(flcn, falcon_falcon_dmemd_r(port), data);
|
nvgpu_falcon_writel(flcn, falcon_falcon_dmemd_r(port), data);
|
||||||
}
|
}
|
||||||
|
|
||||||
size = ALIGN(size, 4U);
|
size = NVGPU_ALIGN(size, 4U);
|
||||||
data = nvgpu_falcon_readl(flcn, falcon_falcon_dmemc_r(port)) &
|
data = nvgpu_falcon_readl(flcn, falcon_falcon_dmemc_r(port)) &
|
||||||
addr_mask;
|
addr_mask;
|
||||||
if (data != (nvgpu_safe_add_u32(dst, size) & addr_mask)) {
|
if (data != (nvgpu_safe_add_u32(dst, size) & addr_mask)) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -82,7 +82,7 @@ int gr_tu104_get_offset_in_gpccs_segment(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* aligned to next 256 byte */
|
/* aligned to next 256 byte */
|
||||||
offset_in_segment = ALIGN(offset_in_segment, 256U);
|
offset_in_segment = NVGPU_ALIGN(offset_in_segment, 256U);
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gpu_dbg,
|
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gpu_dbg,
|
||||||
"egpc etpc offset_in_segment 0x%#08x",
|
"egpc etpc offset_in_segment 0x%#08x",
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
void gv100_gr_hwpm_map_align_regs_perf_pma(u32 *offset)
|
void gv100_gr_hwpm_map_align_regs_perf_pma(u32 *offset)
|
||||||
{
|
{
|
||||||
*offset = ALIGN(*offset, 256U);
|
*offset = NVGPU_ALIGN(*offset, 256U);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 gv100_gr_hwpm_map_get_active_fbpa_mask(struct gk20a *g)
|
u32 gv100_gr_hwpm_map_get_active_fbpa_mask(struct gk20a *g)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -231,7 +231,7 @@ u32 gp10b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count,
|
|||||||
gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(),
|
gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(),
|
||||||
max_tpc)));
|
max_tpc)));
|
||||||
|
|
||||||
size = ALIGN(size, 128U);
|
size = NVGPU_ALIGN(size, 128U);
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -359,6 +359,6 @@ u32 gp10b_gr_init_get_ctx_attrib_cb_size(struct gk20a *g, u32 betacb_size,
|
|||||||
gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(),
|
gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(),
|
||||||
max_tpc));
|
max_tpc));
|
||||||
|
|
||||||
return ALIGN(size, 128U);
|
return NVGPU_ALIGN(size, 128U);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -703,7 +703,7 @@ u32 gv11b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count,
|
|||||||
gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(),
|
gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(),
|
||||||
max_tpc)));
|
max_tpc)));
|
||||||
|
|
||||||
size = ALIGN(size, 128U);
|
size = NVGPU_ALIGN(size, 128U);
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
@@ -915,7 +915,7 @@ u32 gv11b_gr_init_get_patch_slots(struct gk20a *g,
|
|||||||
/*
|
/*
|
||||||
* Align to 4K size
|
* Align to 4K size
|
||||||
*/
|
*/
|
||||||
size = ALIGN(size, slot_size);
|
size = NVGPU_ALIGN(size, slot_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increase the size to accommodate for additional TPC partition update
|
* Increase the size to accommodate for additional TPC partition update
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -23,4 +23,6 @@
|
|||||||
/** User memory macro. */
|
/** User memory macro. */
|
||||||
#define nvgpu_user __user
|
#define nvgpu_user __user
|
||||||
|
|
||||||
|
#define NVGPU_ALIGN ALIGN
|
||||||
|
|
||||||
#endif /* __NVGPU_UTILS_LINUX_H__ */
|
#endif /* __NVGPU_UTILS_LINUX_H__ */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -269,7 +269,7 @@
|
|||||||
*
|
*
|
||||||
* @return Returns \a x aligned with the value mentioned in \a a.
|
* @return Returns \a x aligned with the value mentioned in \a a.
|
||||||
*/
|
*/
|
||||||
#define ALIGN(x, a) \
|
#define NVGPU_ALIGN(x, a) \
|
||||||
__builtin_choose_expr( \
|
__builtin_choose_expr( \
|
||||||
(IS_UNSIGNED_TYPE(x) && IS_UNSIGNED_TYPE(a)), \
|
(IS_UNSIGNED_TYPE(x) && IS_UNSIGNED_TYPE(a)), \
|
||||||
__builtin_choose_expr( \
|
__builtin_choose_expr( \
|
||||||
@@ -288,7 +288,7 @@
|
|||||||
*
|
*
|
||||||
* @return Returns \a x aligned with the page size value.
|
* @return Returns \a x aligned with the page size value.
|
||||||
*/
|
*/
|
||||||
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
|
#define PAGE_ALIGN(x) NVGPU_ALIGN(x, PAGE_SIZE)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Convert hertz to kilo hertz.
|
* @brief Convert hertz to kilo hertz.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -423,7 +423,7 @@ int test_align_macros(struct unit_module *m,
|
|||||||
|
|
||||||
test1 = ALIGN_TEST_VALUE;
|
test1 = ALIGN_TEST_VALUE;
|
||||||
result = ALIGN_WITH_VALUE;
|
result = ALIGN_WITH_VALUE;
|
||||||
test1 = ALIGN(test1, result);
|
test1 = NVGPU_ALIGN(test1, result);
|
||||||
if (test1 & (ALIGN_WITH_VALUE - 1)) {
|
if (test1 & (ALIGN_WITH_VALUE - 1)) {
|
||||||
unit_return_fail(m,
|
unit_return_fail(m,
|
||||||
"ALIGN failure %x\n", test1);
|
"ALIGN failure %x\n", test1);
|
||||||
|
|||||||
Reference in New Issue
Block a user