mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: fb: size of compression apis for mm
The fb APIs compression_page_size() and compression_align_mask() were returning u32s, but the users all really need u64s. This also eliminates MISRA Rule 10.3 violations for implicit casts to smaller size objects. JIRA NVGPU-2954 Change-Id: I8dc2b434d9564c89c0e8a1b19c4acbe167e339c1 Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2075595 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
30fd2a5dcc
commit
f9c4d6b60b
@@ -780,11 +780,11 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
int err = 0;
|
||||
bool allocated = false;
|
||||
u32 ctag_granularity = g->ops.fb.compression_page_size(g);
|
||||
u64 ctag_granularity = g->ops.fb.compression_page_size(g);
|
||||
struct nvgpu_gmmu_attrs attrs = {
|
||||
.pgsz = pgsz_idx,
|
||||
.kind_v = kind_v,
|
||||
.ctag = (u64)ctag_offset * (u64)ctag_granularity,
|
||||
.ctag = (u64)ctag_offset * ctag_granularity,
|
||||
.cacheable = ((flags & NVGPU_VM_MAP_CACHEABLE) != 0U),
|
||||
.rw_flag = rw_flag,
|
||||
.sparse = sparse,
|
||||
@@ -800,7 +800,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
* boundaries.
|
||||
*/
|
||||
if (attrs.ctag != 0ULL) {
|
||||
attrs.ctag += buffer_offset & (U64(ctag_granularity) - U64(1));
|
||||
attrs.ctag += buffer_offset & (ctag_granularity - U64(1));
|
||||
}
|
||||
|
||||
attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
|
||||
|
||||
@@ -292,7 +292,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
|
||||
"PTE: i=%-4u size=%-2u | "
|
||||
"GPU %#-12llx phys %#-12llx "
|
||||
"pgsz: %3dkb perm=%-2s kind=%#02x APT=%-6s %c%c%c%c%c "
|
||||
"ctag=0x%08x "
|
||||
"ctag=0x%08llx "
|
||||
"[0x%08x, 0x%08x]",
|
||||
vm->name,
|
||||
pd_idx, l->entry_size,
|
||||
@@ -306,7 +306,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
|
||||
attrs->priv ? 'P' : '-',
|
||||
attrs->valid ? 'V' : '-',
|
||||
attrs->platform_atomic ? 'A' : '-',
|
||||
(u32)attrs->ctag / g->ops.fb.compression_page_size(g),
|
||||
attrs->ctag / g->ops.fb.compression_page_size(g),
|
||||
pte_w[1], pte_w[0]);
|
||||
|
||||
nvgpu_pd_write(g, pd, (size_t)pd_offset + (size_t)0, pte_w[0]);
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/*
|
||||
* GM20B GPC MMU
|
||||
*
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
@@ -191,7 +191,7 @@ u32 gm20b_fb_mmu_debug_rd(struct gk20a *g)
|
||||
return gk20a_readl(g, fb_mmu_debug_rd_r());
|
||||
}
|
||||
|
||||
unsigned int gm20b_fb_compression_page_size(struct gk20a *g)
|
||||
u64 gm20b_fb_compression_page_size(struct gk20a *g)
|
||||
{
|
||||
return SZ_128K;
|
||||
}
|
||||
@@ -201,9 +201,9 @@ unsigned int gm20b_fb_compressible_page_size(struct gk20a *g)
|
||||
return SZ_64K;
|
||||
}
|
||||
|
||||
u32 gm20b_fb_compression_align_mask(struct gk20a *g)
|
||||
u64 gm20b_fb_compression_align_mask(struct gk20a *g)
|
||||
{
|
||||
return SZ_64K - 1U;
|
||||
return SZ_64K - 1UL;
|
||||
}
|
||||
|
||||
void gm20b_fb_dump_vpr_info(struct gk20a *g)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GM20B FB
|
||||
*
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -39,9 +39,9 @@ u32 gm20b_fb_mmu_ctrl(struct gk20a *g);
|
||||
u32 gm20b_fb_mmu_debug_ctrl(struct gk20a *g);
|
||||
u32 gm20b_fb_mmu_debug_wr(struct gk20a *g);
|
||||
u32 gm20b_fb_mmu_debug_rd(struct gk20a *g);
|
||||
unsigned int gm20b_fb_compression_page_size(struct gk20a *g);
|
||||
u64 gm20b_fb_compression_page_size(struct gk20a *g);
|
||||
unsigned int gm20b_fb_compressible_page_size(struct gk20a *g);
|
||||
u32 gm20b_fb_compression_align_mask(struct gk20a *g);
|
||||
u64 gm20b_fb_compression_align_mask(struct gk20a *g);
|
||||
void gm20b_fb_dump_vpr_info(struct gk20a *g);
|
||||
void gm20b_fb_dump_wpr_info(struct gk20a *g);
|
||||
void gm20b_fb_read_wpr_info(struct gk20a *g, u64 *wpr_base, u64 *wpr_size);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GP10B FB
|
||||
*
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -27,7 +27,7 @@
|
||||
|
||||
#include "fb_gp10b.h"
|
||||
|
||||
unsigned int gp10b_fb_compression_page_size(struct gk20a *g)
|
||||
u64 gp10b_fb_compression_page_size(struct gk20a *g)
|
||||
{
|
||||
return SZ_64K;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GP10B FB
|
||||
*
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -26,7 +26,7 @@
|
||||
#define NVGPU_FB_GP10B_H
|
||||
struct gk20a;
|
||||
|
||||
unsigned int gp10b_fb_compression_page_size(struct gk20a *g);
|
||||
u64 gp10b_fb_compression_page_size(struct gk20a *g);
|
||||
unsigned int gp10b_fb_compressible_page_size(struct gk20a *g);
|
||||
|
||||
#endif /* NVGPU_FB_GP10B_H */
|
||||
|
||||
@@ -719,7 +719,7 @@ struct gpu_ops {
|
||||
* buffer, ctagline is increased when the virtual address
|
||||
* crosses over the compression page boundary.
|
||||
*/
|
||||
unsigned int (*compression_page_size)(struct gk20a *g);
|
||||
u64 (*compression_page_size)(struct gk20a *g);
|
||||
|
||||
/*
|
||||
* Minimum page size that can be used for compressible kinds.
|
||||
@@ -730,7 +730,7 @@ struct gpu_ops {
|
||||
* Compressible kind mappings: Mask for the virtual and physical
|
||||
* address bits that must match.
|
||||
*/
|
||||
u32 (*compression_align_mask)(struct gk20a *g);
|
||||
u64 (*compression_align_mask)(struct gk20a *g);
|
||||
|
||||
void (*dump_vpr_info)(struct gk20a *g);
|
||||
void (*dump_wpr_info)(struct gk20a *g);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -51,7 +51,7 @@ int gk20a_alloc_or_get_comptags(struct gk20a *g,
|
||||
buf->dev);
|
||||
u32 offset;
|
||||
int err;
|
||||
unsigned int ctag_granularity;
|
||||
u64 ctag_granularity;
|
||||
u32 lines;
|
||||
|
||||
if (!priv)
|
||||
|
||||
Reference in New Issue
Block a user