diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c index 68e0a6066..92dab45be 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -203,21 +203,21 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img) } p_img->desc->bootloader_start_offset = fecs->boot.offset; - p_img->desc->bootloader_size = ALIGN(fecs->boot.size, + p_img->desc->bootloader_size = NVGPU_ALIGN(fecs->boot.size, LSF_DATA_SIZE_ALIGNMENT); p_img->desc->bootloader_imem_offset = fecs->boot_imem_offset; p_img->desc->bootloader_entry_point = fecs->boot_entry; - tmp_size = nvgpu_safe_add_u32(ALIGN(fecs->boot.size, + tmp_size = nvgpu_safe_add_u32(NVGPU_ALIGN(fecs->boot.size, LSF_DATA_SIZE_ALIGNMENT), - ALIGN(fecs->code.size, + NVGPU_ALIGN(fecs->code.size, LSF_DATA_SIZE_ALIGNMENT)); p_img->desc->image_size = nvgpu_safe_add_u32(tmp_size, - ALIGN(fecs->data.size, + NVGPU_ALIGN(fecs->data.size, LSF_DATA_SIZE_ALIGNMENT)); - p_img->desc->app_size = nvgpu_safe_add_u32(ALIGN(fecs->code.size, + p_img->desc->app_size = nvgpu_safe_add_u32(NVGPU_ALIGN(fecs->code.size, LSF_DATA_SIZE_ALIGNMENT), - ALIGN(fecs->data.size, + NVGPU_ALIGN(fecs->data.size, LSF_DATA_SIZE_ALIGNMENT)); p_img->desc->app_start_offset = fecs->code.offset; p_img->desc->app_imem_offset = APP_IMEM_OFFSET; @@ -312,42 +312,42 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img) } p_img->desc->bootloader_start_offset = BL_START_OFFSET; - p_img->desc->bootloader_size = ALIGN(gpccs->boot.size, + p_img->desc->bootloader_size = NVGPU_ALIGN(gpccs->boot.size, LSF_DATA_SIZE_ALIGNMENT); p_img->desc->bootloader_imem_offset = gpccs->boot_imem_offset; p_img->desc->bootloader_entry_point = gpccs->boot_entry; - tmp_size = nvgpu_safe_add_u32(ALIGN(gpccs->boot.size, + tmp_size = nvgpu_safe_add_u32(NVGPU_ALIGN(gpccs->boot.size, LSF_DATA_SIZE_ALIGNMENT), - ALIGN(gpccs->code.size, + NVGPU_ALIGN(gpccs->code.size, LSF_DATA_SIZE_ALIGNMENT)); p_img->desc->image_size = nvgpu_safe_add_u32(tmp_size, - ALIGN(gpccs->data.size, + NVGPU_ALIGN(gpccs->data.size, LSF_DATA_SIZE_ALIGNMENT)); p_img->desc->app_size = - nvgpu_safe_add_u32(ALIGN(gpccs->code.size, + nvgpu_safe_add_u32(NVGPU_ALIGN(gpccs->code.size, LSF_DATA_SIZE_ALIGNMENT), - ALIGN(gpccs->data.size, + NVGPU_ALIGN(gpccs->data.size, LSF_DATA_SIZE_ALIGNMENT)); p_img->desc->app_start_offset = p_img->desc->bootloader_size; p_img->desc->app_imem_offset = APP_IMEM_OFFSET; p_img->desc->app_imem_entry = APP_IMEM_ENTRY; p_img->desc->app_dmem_offset = APP_DMEM_OFFSET; p_img->desc->app_resident_code_offset = APP_RESIDENT_CODE_OFFSET; - p_img->desc->app_resident_code_size = ALIGN(gpccs->code.size, + p_img->desc->app_resident_code_size = NVGPU_ALIGN(gpccs->code.size, LSF_DATA_SIZE_ALIGNMENT); p_img->desc->app_resident_data_offset = - nvgpu_safe_sub_u32(ALIGN(gpccs->data.offset, + nvgpu_safe_sub_u32(NVGPU_ALIGN(gpccs->data.offset, LSF_DATA_SIZE_ALIGNMENT), - ALIGN(gpccs->code.offset, + NVGPU_ALIGN(gpccs->code.offset, LSF_DATA_SIZE_ALIGNMENT)); - p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, + p_img->desc->app_resident_data_size = NVGPU_ALIGN(gpccs->data.size, LSF_DATA_SIZE_ALIGNMENT); p_img->data = (u32 *) (void *)((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) + gpccs->boot.offset); - p_img->data_size = ALIGN(p_img->desc->image_size, + p_img->data_size = NVGPU_ALIGN(p_img->desc->image_size, LSF_DATA_SIZE_ALIGNMENT); p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; @@ -480,15 +480,15 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g, * the code following it is aligned, but the size in the image * desc is not, bloat it up to be on a 256 byte alignment. */ - pnode->lsb_header.bl_code_size = ALIGN( + pnode->lsb_header.bl_code_size = NVGPU_ALIGN( pnode->ucode_img.desc->bootloader_size, LSF_BL_CODE_SIZE_ALIGNMENT); full_app_size = nvgpu_safe_add_u32( - ALIGN(pnode->ucode_img.desc->app_size, + NVGPU_ALIGN(pnode->ucode_img.desc->app_size, LSF_BL_CODE_SIZE_ALIGNMENT), - pnode->lsb_header.bl_code_size); + pnode->lsb_header.bl_code_size); - pnode->lsb_header.ucode_size = nvgpu_safe_add_u32(ALIGN( + pnode->lsb_header.ucode_size = nvgpu_safe_add_u32(NVGPU_ALIGN( pnode->ucode_img.desc->app_resident_data_offset, LSF_BL_CODE_SIZE_ALIGNMENT), pnode->lsb_header.bl_code_size); @@ -715,7 +715,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, */ while (pnode != NULL) { /* Align, save off, and include an LSB header size */ - wpr_offset = ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT); + wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT); pnode->wpr_header.lsb_offset = wpr_offset; wpr_offset = nvgpu_safe_add_u32(wpr_offset, (u32)sizeof(struct lsf_lsb_header)); @@ -724,7 +724,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, * Align, save off, and include the original (static)ucode * image size */ - wpr_offset = ALIGN(wpr_offset, LSF_UCODE_DATA_ALIGNMENT); + wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_UCODE_DATA_ALIGNMENT); pnode->lsb_header.ucode_off = wpr_offset; wpr_offset = nvgpu_safe_add_u32(wpr_offset, pnode->ucode_img.data_size); @@ -743,13 +743,13 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, * generic one, which is the largest it will will ever be. */ /* Align (size bloat) and save off generic descriptor size*/ - pnode->lsb_header.bl_data_size = ALIGN( + pnode->lsb_header.bl_data_size = NVGPU_ALIGN( nvgpu_safe_cast_u64_to_u32( sizeof(pnode->bl_gen_desc)), LSF_BL_DATA_SIZE_ALIGNMENT); /*Align, save off, and include the additional BL data*/ - wpr_offset = ALIGN(wpr_offset, LSF_BL_DATA_ALIGNMENT); + wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_BL_DATA_ALIGNMENT); pnode->lsb_header.bl_data_off = wpr_offset; wpr_offset = nvgpu_safe_add_u32(wpr_offset, pnode->lsb_header.bl_data_size); diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c index b00cab87b..848d4c0ca 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -101,14 +101,14 @@ int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img) } p_img->desc->bootloader_start_offset = fecs->boot.offset; - p_img->desc->bootloader_size = ALIGN(fecs->boot.size, 256U); + p_img->desc->bootloader_size = NVGPU_ALIGN(fecs->boot.size, 256U); p_img->desc->bootloader_imem_offset = fecs->boot_imem_offset; p_img->desc->bootloader_entry_point = fecs->boot_entry; - p_img->desc->image_size = ALIGN(fecs->boot.size, 256U) + - ALIGN(fecs->code.size, 256U) + ALIGN(fecs->data.size, 256U); - p_img->desc->app_size = ALIGN(fecs->code.size, 256U) + - ALIGN(fecs->data.size, 256U); + p_img->desc->image_size = NVGPU_ALIGN(fecs->boot.size, 256U) + + NVGPU_ALIGN(fecs->code.size, 256U) + NVGPU_ALIGN(fecs->data.size, 256U); + p_img->desc->app_size = NVGPU_ALIGN(fecs->code.size, 256U) + + NVGPU_ALIGN(fecs->data.size, 256U); p_img->desc->app_start_offset = fecs->code.offset; p_img->desc->app_imem_offset = 0; p_img->desc->app_imem_entry = 0; @@ -168,28 +168,29 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img) p_img->desc->bootloader_start_offset = 0; - p_img->desc->bootloader_size = ALIGN(gpccs->boot.size, 256U); + p_img->desc->bootloader_size = NVGPU_ALIGN(gpccs->boot.size, 256U); p_img->desc->bootloader_imem_offset = gpccs->boot_imem_offset; p_img->desc->bootloader_entry_point = gpccs->boot_entry; - p_img->desc->image_size = ALIGN(gpccs->boot.size, 256U) + - ALIGN(gpccs->code.size, 256U) + ALIGN(gpccs->data.size, 256U); - p_img->desc->app_size = - ALIGN(gpccs->code.size, 256U) + ALIGN(gpccs->data.size, 256U); + p_img->desc->image_size = NVGPU_ALIGN(gpccs->boot.size, 256U) + + NVGPU_ALIGN(gpccs->code.size, 256U) + + NVGPU_ALIGN(gpccs->data.size, 256U); + p_img->desc->app_size = NVGPU_ALIGN(gpccs->code.size, 256U) + + NVGPU_ALIGN(gpccs->data.size, 256U); p_img->desc->app_start_offset = p_img->desc->bootloader_size; p_img->desc->app_imem_offset = 0; p_img->desc->app_imem_entry = 0; p_img->desc->app_dmem_offset = 0; p_img->desc->app_resident_code_offset = 0; - p_img->desc->app_resident_code_size = ALIGN(gpccs->code.size, 256U); + p_img->desc->app_resident_code_size = NVGPU_ALIGN(gpccs->code.size, 256U); p_img->desc->app_resident_data_offset = - ALIGN(gpccs->data.offset, 256U) - - ALIGN(gpccs->code.offset, 256U); - p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, 256U); + NVGPU_ALIGN(gpccs->data.offset, 256U) - + NVGPU_ALIGN(gpccs->code.offset, 256U); + p_img->desc->app_resident_data_size = NVGPU_ALIGN(gpccs->data.size, 256U); p_img->data = (u32 *) ((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) + gpccs->boot.offset); - p_img->data_size = ALIGN(p_img->desc->image_size, 256U); + p_img->data_size = NVGPU_ALIGN(p_img->desc->image_size, 256U); p_img->lsf_desc = (struct lsf_ucode_desc_v0 *)lsf_desc; nvgpu_acr_dbg(g, "gpccs fw loaded\n"); nvgpu_release_firmware(g, gpccs_sig); @@ -226,13 +227,13 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g, * the code following it is aligned, but the size in the image * desc is not, bloat it up to be on a 256 byte alignment. */ - pnode->lsb_header.bl_code_size = ALIGN( + pnode->lsb_header.bl_code_size = NVGPU_ALIGN( pnode->ucode_img.desc->bootloader_size, LSF_BL_CODE_SIZE_ALIGNMENT); - full_app_size = ALIGN(pnode->ucode_img.desc->app_size, + full_app_size = NVGPU_ALIGN(pnode->ucode_img.desc->app_size, LSF_BL_CODE_SIZE_ALIGNMENT) + pnode->lsb_header.bl_code_size; - pnode->lsb_header.ucode_size = ALIGN( + pnode->lsb_header.ucode_size = NVGPU_ALIGN( pnode->ucode_img.desc->app_resident_data_offset, LSF_BL_CODE_SIZE_ALIGNMENT) + pnode->lsb_header.bl_code_size; @@ -362,7 +363,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsf */ while (pnode != NULL) { /* Align, save off, and include an LSB header size */ - wpr_offset = ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT); + wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT); pnode->wpr_header.lsb_offset = wpr_offset; wpr_offset += (u32)sizeof(struct lsf_lsb_header_v0); @@ -370,7 +371,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsf * Align, save off, and include the original (static) * ucode image size */ - wpr_offset = ALIGN(wpr_offset, + wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_UCODE_DATA_ALIGNMENT); pnode->lsb_header.ucode_off = wpr_offset; wpr_offset += pnode->ucode_img.data_size; @@ -389,12 +390,12 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsf * generic one, which is the largest it will will ever be. */ /* Align (size bloat) and save off generic descriptor size */ - pnode->lsb_header.bl_data_size = ALIGN( + pnode->lsb_header.bl_data_size = NVGPU_ALIGN( (u32)sizeof(pnode->bl_gen_desc), LSF_BL_DATA_SIZE_ALIGNMENT); /* Align, save off, and include the additional BL data */ - wpr_offset = ALIGN(wpr_offset, + wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_BL_DATA_ALIGNMENT); pnode->lsb_header.bl_data_off = wpr_offset; wpr_offset += pnode->lsb_header.bl_data_size; diff --git a/drivers/gpu/nvgpu/common/engine_queues/engine_mem_queue.c b/drivers/gpu/nvgpu/common/engine_queues/engine_mem_queue.c index 8e00098e1..76e178221 100644 --- a/drivers/gpu/nvgpu/common/engine_queues/engine_mem_queue.c +++ b/drivers/gpu/nvgpu/common/engine_queues/engine_mem_queue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -70,7 +70,7 @@ static bool engine_mem_queue_has_room(struct nvgpu_engine_mem_queue *queue, bool q_rewind = false; int err = 0; - size = ALIGN(size, QUEUE_ALIGNMENT); + size = NVGPU_ALIGN(size, QUEUE_ALIGNMENT); err = mem_queue_get_head_tail(queue, &q_head, &q_tail); if (err != 0) { @@ -117,7 +117,7 @@ static int engine_mem_queue_rewind(struct nvgpu_falcon *flcn, goto exit; } else { queue->position += nvgpu_safe_cast_u32_to_u8( - ALIGN(U32(cmd.hdr.size), QUEUE_ALIGNMENT)); + NVGPU_ALIGN(U32(cmd.hdr.size), QUEUE_ALIGNMENT)); nvgpu_log_info(g, "flcn-%d queue-%d, rewinded", queue->flcn_id, queue->id); } @@ -207,7 +207,7 @@ int nvgpu_engine_mem_queue_push(struct nvgpu_falcon *flcn, goto unlock_mutex; } - queue->position += ALIGN(size, QUEUE_ALIGNMENT); + queue->position += NVGPU_ALIGN(size, QUEUE_ALIGNMENT); err = queue->head(g, queue->id, queue->index, &queue->position, QUEUE_SET); @@ -279,7 +279,7 @@ int nvgpu_engine_mem_queue_pop(struct nvgpu_falcon *flcn, goto unlock_mutex; } - queue->position += ALIGN(size, QUEUE_ALIGNMENT); + queue->position += NVGPU_ALIGN(size, QUEUE_ALIGNMENT); err = queue->tail(g, queue->id, queue->index, &queue->position, QUEUE_SET); diff --git a/drivers/gpu/nvgpu/common/gr/gr_falcon.c b/drivers/gpu/nvgpu/common/gr/gr_falcon.c index f5c8bbf24..f3bfef690 100644 --- a/drivers/gpu/nvgpu/common/gr/gr_falcon.c +++ b/drivers/gpu/nvgpu/common/gr/gr_falcon.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -244,7 +244,7 @@ static void nvgpu_gr_falcon_init_ctxsw_ucode_segment( p_seg->offset = *offset; p_seg->size = size; ucode_offset = nvgpu_safe_add_u32(*offset, size); - *offset = ALIGN(ucode_offset, 256U); + *offset = NVGPU_ALIGN(ucode_offset, 256U); } static void nvgpu_gr_falcon_init_ctxsw_ucode_segments( @@ -252,7 +252,7 @@ static void nvgpu_gr_falcon_init_ctxsw_ucode_segments( struct nvgpu_ctxsw_bootloader_desc *bootdesc, u32 code_size, u32 data_size) { - u32 boot_size = ALIGN(bootdesc->size, sizeof(u32)); + u32 boot_size = NVGPU_ALIGN(bootdesc->size, sizeof(u32)); segments->boot_entry = bootdesc->entry_point; segments->boot_imem_offset = bootdesc->imem_offset; diff --git a/drivers/gpu/nvgpu/common/gr/hwpm_map.c b/drivers/gpu/nvgpu/common/gr/hwpm_map.c index aeb34d4d0..8344bb101 100644 --- a/drivers/gpu/nvgpu/common/gr/hwpm_map.c +++ b/drivers/gpu/nvgpu/common/gr/hwpm_map.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -320,7 +320,7 @@ static int add_ctxsw_buffer_map_entries_gpcs(struct gk20a *g, } } - *offset = ALIGN(*offset, 256U); + *offset = NVGPU_ALIGN(*offset, 256U); base = (g->ops.perf.get_pmmgpc_per_chiplet_offset() * gpc_num); if (add_ctxsw_buffer_map_entries(map, @@ -329,7 +329,7 @@ static int add_ctxsw_buffer_map_entries_gpcs(struct gk20a *g, return -EINVAL; } - *offset = ALIGN(*offset, 256U); + *offset = NVGPU_ALIGN(*offset, 256U); } return 0; } @@ -446,7 +446,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g, /* Add entries from _LIST_nv_perf_sys_control_ctx_reg*/ if (nvgpu_netlist_get_perf_sys_control_ctxsw_regs(g)->count > 0U) { - offset = ALIGN(offset, 256U); + offset = NVGPU_ALIGN(offset, 256U); ret = add_ctxsw_buffer_map_entries(map, nvgpu_netlist_get_perf_sys_control_ctxsw_regs(g), @@ -469,7 +469,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g, goto cleanup; } - offset = ALIGN(offset, 256U); + offset = NVGPU_ALIGN(offset, 256U); /* Add entries from _LIST_nv_perf_pma_control_ctx_reg*/ ret = add_ctxsw_buffer_map_entries(map, @@ -479,7 +479,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g, goto cleanup; } - offset = ALIGN(offset, 256U); + offset = NVGPU_ALIGN(offset, 256U); /* Add entries from _LIST_nv_perf_fbp_ctx_regs */ if (add_ctxsw_buffer_map_entries_subunits(map, @@ -529,7 +529,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g, goto cleanup; } - offset = ALIGN(offset, 256U); + offset = NVGPU_ALIGN(offset, 256U); /* Add entries from _LIST_nv_perf_fbp_control_ctx_regs */ if (add_ctxsw_buffer_map_entries_subunits(map, @@ -541,7 +541,7 @@ static int nvgpu_gr_hwpm_map_create(struct gk20a *g, goto cleanup; } - offset = ALIGN(offset, 256U); + offset = NVGPU_ALIGN(offset, 256U); /* Add GPC entries */ if (add_ctxsw_buffer_map_entries_gpcs(g, map, &count, &offset, diff --git a/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator.c index 636dc46c1..b4211a2cd 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator.c @@ -114,7 +114,7 @@ static void balloc_compute_max_order(struct nvgpu_buddy_allocator *a) */ static void balloc_allocator_align(struct nvgpu_buddy_allocator *a) { - a->start = ALIGN(a->base, a->blk_size); + a->start = NVGPU_ALIGN(a->base, a->blk_size); NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 10_3), "Bug 2277532") NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 14_4), "Bug 2277532") NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 15_6), "Bug 2277532") diff --git a/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c index 6449fc1b2..790869910 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -664,7 +664,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages( u64 pages; u32 i = 0; - pages = ALIGN(len, a->page_size) >> a->page_shift; + pages = NVGPU_ALIGN(len, a->page_size) >> a->page_shift; alloc = do_nvgpu_alloc_pages(a, pages); if (alloc == NULL) { @@ -839,7 +839,7 @@ static u64 nvgpu_page_palloc_fixed(struct nvgpu_allocator *na, u64 aligned_len, pages; u32 i = 0; - aligned_len = ALIGN(len, a->page_size); + aligned_len = NVGPU_ALIGN(len, a->page_size); pages = aligned_len >> a->page_shift; alloc_lock(na); diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index 00ff3b078..e2963f458 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -202,7 +202,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm) * PD must have mem_offs be 0 for the invalidate code to work, so we * can't use the PD cache. */ - pdb_size = ALIGN(pd_get_size(&vm->mmu_levels[0], &attrs), NVGPU_CPU_PAGE_SIZE); + pdb_size = NVGPU_ALIGN(pd_get_size(&vm->mmu_levels[0], &attrs), NVGPU_CPU_PAGE_SIZE); err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); if (err != 0) { @@ -227,7 +227,7 @@ static u64 nvgpu_align_map_length(struct vm_gk20a *vm, u64 length, { u64 page_size = vm->gmmu_page_sizes[attrs->pgsz]; - return ALIGN(length, page_size); + return NVGPU_ALIGN(length, page_size); } static u32 pd_entries(const struct gk20a_mmu_level *l, diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index b80566895..804323608 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -261,7 +261,7 @@ u64 nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) } /* Be certain we round up to page_size if needed */ - size = ALIGN(size, page_size); + size = NVGPU_ALIGN(size, page_size); addr = nvgpu_alloc_pte(vma, size, page_size); if (addr == 0ULL) { @@ -1402,7 +1402,7 @@ static int nvgpu_vm_new_mapping(struct vm_gk20a *vm, min_t(u64, binfo_ptr->size, align)); } map_size = (map_size != 0ULL) ? map_size : binfo_ptr->size; - map_size = ALIGN(map_size, SZ_4K); + map_size = NVGPU_ALIGN(map_size, SZ_4K); if ((map_size > binfo_ptr->size) || (phys_offset > (binfo_ptr->size - map_size))) { diff --git a/drivers/gpu/nvgpu/common/pmu/allocator.c b/drivers/gpu/nvgpu/common/pmu/allocator.c index e9c71d77d..ff9566ff3 100644 --- a/drivers/gpu/nvgpu/common/pmu/allocator.c +++ b/drivers/gpu/nvgpu/common/pmu/allocator.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,7 +36,7 @@ void nvgpu_pmu_allocator_dmem_init(struct gk20a *g, if (!nvgpu_alloc_initialized(dmem)) { /* Align start and end addresses */ u32 start = - ALIGN(U32(fw_ops->get_init_msg_sw_mngd_area_off(init)), + NVGPU_ALIGN(U32(fw_ops->get_init_msg_sw_mngd_area_off(init)), PMU_DMEM_ALLOC_ALIGNMENT); u32 end = (U32(fw_ops->get_init_msg_sw_mngd_area_off(init)) + U32(fw_ops->get_init_msg_sw_mngd_area_size(init))) & diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c index 21cd9be19..8cfd0ea1f 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -389,7 +389,7 @@ static int pmu_process_init_msg_dmem(struct gk20a *g, struct nvgpu_pmu *pmu, goto exit; } - tail += ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT); + tail += NVGPU_ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT); g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET); exit: diff --git a/drivers/gpu/nvgpu/common/sec2/ipc/sec2_msg.c b/drivers/gpu/nvgpu/common/sec2/ipc/sec2_msg.c index 312d3f4b0..3aa1d013c 100644 --- a/drivers/gpu/nvgpu/common/sec2/ipc/sec2_msg.c +++ b/drivers/gpu/nvgpu/common/sec2/ipc/sec2_msg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -148,7 +148,7 @@ static int sec2_process_init_msg(struct nvgpu_sec2 *sec2, goto exit; } - tail += ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT); + tail += NVGPU_ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT); g->ops.sec2.msgq_tail(g, sec2, &tail, QUEUE_SET); sec2_init = &msg->msg.init.sec2_init; diff --git a/drivers/gpu/nvgpu/common/sec2/sec2_allocator.c b/drivers/gpu/nvgpu/common/sec2/sec2_allocator.c index cf5b52d1a..92c5f93b2 100644 --- a/drivers/gpu/nvgpu/common/sec2/sec2_allocator.c +++ b/drivers/gpu/nvgpu/common/sec2/sec2_allocator.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -31,7 +31,7 @@ int nvgpu_sec2_dmem_allocator_init(struct gk20a *g, int err = 0; if (!nvgpu_alloc_initialized(dmem)) { /* Align start and end addresses */ - u32 start = ALIGN(sec2_init->nv_managed_area_offset, + u32 start = NVGPU_ALIGN(sec2_init->nv_managed_area_offset, PMU_DMEM_ALLOC_ALIGNMENT); u32 end = (sec2_init->nv_managed_area_offset + diff --git a/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c index d07370c7f..37b1a0e7c 100644 --- a/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c @@ -1,7 +1,7 @@ /* * Virtualized GPU Memory Management * - * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -266,7 +266,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex; struct tegra_vgpu_mem_desc *mem_desc; u32 page_size = vm->gmmu_page_sizes[pgsz_idx]; - u64 buffer_size = ALIGN(size, SZ_4K); + u64 buffer_size = NVGPU_ALIGN(size, SZ_4K); u64 space_to_skip = buffer_offset; u32 mem_desc_count = 0, i; void *handle = NULL; @@ -314,7 +314,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, sgl = sgt->sgl; /* Align size to page size */ - size = ALIGN(size, page_size); + size = NVGPU_ALIGN(size, page_size); while (sgl) { u64 phys_addr; diff --git a/drivers/gpu/nvgpu/hal/falcon/falcon_gk20a_fusa.c b/drivers/gpu/nvgpu/hal/falcon/falcon_gk20a_fusa.c index 4f628c298..59fce48c6 100644 --- a/drivers/gpu/nvgpu/hal/falcon/falcon_gk20a_fusa.c +++ b/drivers/gpu/nvgpu/hal/falcon/falcon_gk20a_fusa.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -203,7 +203,7 @@ NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 11_3), "TID-415") nvgpu_falcon_writel(flcn, falcon_falcon_dmemd_r(port), data); } - size = ALIGN(size, 4U); + size = NVGPU_ALIGN(size, 4U); data = nvgpu_falcon_readl(flcn, falcon_falcon_dmemc_r(port)) & addr_mask; if (data != (nvgpu_safe_add_u32(dst, size) & addr_mask)) { diff --git a/drivers/gpu/nvgpu/hal/gr/gr/gr_tu104.c b/drivers/gpu/nvgpu/hal/gr/gr/gr_tu104.c index 83351a466..a3592679b 100644 --- a/drivers/gpu/nvgpu/hal/gr/gr/gr_tu104.c +++ b/drivers/gpu/nvgpu/hal/gr/gr/gr_tu104.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -82,7 +82,7 @@ int gr_tu104_get_offset_in_gpccs_segment(struct gk20a *g, } /* aligned to next 256 byte */ - offset_in_segment = ALIGN(offset_in_segment, 256U); + offset_in_segment = NVGPU_ALIGN(offset_in_segment, 256U); nvgpu_log(g, gpu_dbg_info | gpu_dbg_gpu_dbg, "egpc etpc offset_in_segment 0x%#08x", diff --git a/drivers/gpu/nvgpu/hal/gr/hwpm_map/hwpm_map_gv100.c b/drivers/gpu/nvgpu/hal/gr/hwpm_map/hwpm_map_gv100.c index 52024c9fb..0f237cbcc 100644 --- a/drivers/gpu/nvgpu/hal/gr/hwpm_map/hwpm_map_gv100.c +++ b/drivers/gpu/nvgpu/hal/gr/hwpm_map/hwpm_map_gv100.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -28,7 +28,7 @@ void gv100_gr_hwpm_map_align_regs_perf_pma(u32 *offset) { - *offset = ALIGN(*offset, 256U); + *offset = NVGPU_ALIGN(*offset, 256U); } u32 gv100_gr_hwpm_map_get_active_fbpa_mask(struct gk20a *g) diff --git a/drivers/gpu/nvgpu/hal/gr/init/gr_init_gp10b.c b/drivers/gpu/nvgpu/hal/gr/init/gr_init_gp10b.c index b1f82a8ee..c94610f30 100644 --- a/drivers/gpu/nvgpu/hal/gr/init/gr_init_gp10b.c +++ b/drivers/gpu/nvgpu/hal/gr/init/gr_init_gp10b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -231,7 +231,7 @@ u32 gp10b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count, gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(), max_tpc))); - size = ALIGN(size, 128U); + size = NVGPU_ALIGN(size, 128U); return size; } diff --git a/drivers/gpu/nvgpu/hal/gr/init/gr_init_gp10b_fusa.c b/drivers/gpu/nvgpu/hal/gr/init/gr_init_gp10b_fusa.c index c3bcb568a..1f1ad3de3 100644 --- a/drivers/gpu/nvgpu/hal/gr/init/gr_init_gp10b_fusa.c +++ b/drivers/gpu/nvgpu/hal/gr/init/gr_init_gp10b_fusa.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -359,6 +359,6 @@ u32 gp10b_gr_init_get_ctx_attrib_cb_size(struct gk20a *g, u32 betacb_size, gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(), max_tpc)); - return ALIGN(size, 128U); + return NVGPU_ALIGN(size, 128U); } #endif diff --git a/drivers/gpu/nvgpu/hal/gr/init/gr_init_gv11b_fusa.c b/drivers/gpu/nvgpu/hal/gr/init/gr_init_gv11b_fusa.c index 000a5e2c7..b67ff6fa7 100644 --- a/drivers/gpu/nvgpu/hal/gr/init/gr_init_gv11b_fusa.c +++ b/drivers/gpu/nvgpu/hal/gr/init/gr_init_gv11b_fusa.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -703,7 +703,7 @@ u32 gv11b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count, gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(), max_tpc))); - size = ALIGN(size, 128U); + size = NVGPU_ALIGN(size, 128U); return size; } @@ -915,7 +915,7 @@ u32 gv11b_gr_init_get_patch_slots(struct gk20a *g, /* * Align to 4K size */ - size = ALIGN(size, slot_size); + size = NVGPU_ALIGN(size, slot_size); /* * Increase the size to accommodate for additional TPC partition update diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/utils.h b/drivers/gpu/nvgpu/include/nvgpu/linux/utils.h index 018f1505e..a16413b52 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/linux/utils.h +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/utils.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -23,4 +23,6 @@ /** User memory macro. */ #define nvgpu_user __user +#define NVGPU_ALIGN ALIGN + #endif /* __NVGPU_UTILS_LINUX_H__ */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/posix/utils.h b/drivers/gpu/nvgpu/include/nvgpu/posix/utils.h index ce173fd7f..9416077f7 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/posix/utils.h +++ b/drivers/gpu/nvgpu/include/nvgpu/posix/utils.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -269,7 +269,7 @@ * * @return Returns \a x aligned with the value mentioned in \a a. */ -#define ALIGN(x, a) \ +#define NVGPU_ALIGN(x, a) \ __builtin_choose_expr( \ (IS_UNSIGNED_TYPE(x) && IS_UNSIGNED_TYPE(a)), \ __builtin_choose_expr( \ @@ -288,7 +288,7 @@ * * @return Returns \a x aligned with the page size value. */ -#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) +#define PAGE_ALIGN(x) NVGPU_ALIGN(x, PAGE_SIZE) /** * @brief Convert hertz to kilo hertz. diff --git a/userspace/units/posix/utils/posix-utils.c b/userspace/units/posix/utils/posix-utils.c index cdf942c6c..39ef0205c 100644 --- a/userspace/units/posix/utils/posix-utils.c +++ b/userspace/units/posix/utils/posix-utils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -423,7 +423,7 @@ int test_align_macros(struct unit_module *m, test1 = ALIGN_TEST_VALUE; result = ALIGN_WITH_VALUE; - test1 = ALIGN(test1, result); + test1 = NVGPU_ALIGN(test1, result); if (test1 & (ALIGN_WITH_VALUE - 1)) { unit_return_fail(m, "ALIGN failure %x\n", test1);