gpu: nvgpu: fb: size of compression apis for mm

The fb APIs compression_page_size() and compression_align_mask() were
returning u32s, but the users all really need u64s.

This also eliminates MISRA Rule 10.3 violations for implicit casts to
smaller size objects.

JIRA NVGPU-2954

Change-Id: I8dc2b434d9564c89c0e8a1b19c4acbe167e339c1
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2075595
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-03-18 10:33:29 -04:00
committed by mobile promotions
parent 30fd2a5dcc
commit f9c4d6b60b
8 changed files with 21 additions and 21 deletions

View File

@@ -780,11 +780,11 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
int err = 0; int err = 0;
bool allocated = false; bool allocated = false;
u32 ctag_granularity = g->ops.fb.compression_page_size(g); u64 ctag_granularity = g->ops.fb.compression_page_size(g);
struct nvgpu_gmmu_attrs attrs = { struct nvgpu_gmmu_attrs attrs = {
.pgsz = pgsz_idx, .pgsz = pgsz_idx,
.kind_v = kind_v, .kind_v = kind_v,
.ctag = (u64)ctag_offset * (u64)ctag_granularity, .ctag = (u64)ctag_offset * ctag_granularity,
.cacheable = ((flags & NVGPU_VM_MAP_CACHEABLE) != 0U), .cacheable = ((flags & NVGPU_VM_MAP_CACHEABLE) != 0U),
.rw_flag = rw_flag, .rw_flag = rw_flag,
.sparse = sparse, .sparse = sparse,
@@ -800,7 +800,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
* boundaries. * boundaries.
*/ */
if (attrs.ctag != 0ULL) { if (attrs.ctag != 0ULL) {
attrs.ctag += buffer_offset & (U64(ctag_granularity) - U64(1)); attrs.ctag += buffer_offset & (ctag_granularity - U64(1));
} }
attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC); attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);

View File

@@ -292,7 +292,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
"PTE: i=%-4u size=%-2u | " "PTE: i=%-4u size=%-2u | "
"GPU %#-12llx phys %#-12llx " "GPU %#-12llx phys %#-12llx "
"pgsz: %3dkb perm=%-2s kind=%#02x APT=%-6s %c%c%c%c%c " "pgsz: %3dkb perm=%-2s kind=%#02x APT=%-6s %c%c%c%c%c "
"ctag=0x%08x " "ctag=0x%08llx "
"[0x%08x, 0x%08x]", "[0x%08x, 0x%08x]",
vm->name, vm->name,
pd_idx, l->entry_size, pd_idx, l->entry_size,
@@ -306,7 +306,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
attrs->priv ? 'P' : '-', attrs->priv ? 'P' : '-',
attrs->valid ? 'V' : '-', attrs->valid ? 'V' : '-',
attrs->platform_atomic ? 'A' : '-', attrs->platform_atomic ? 'A' : '-',
(u32)attrs->ctag / g->ops.fb.compression_page_size(g), attrs->ctag / g->ops.fb.compression_page_size(g),
pte_w[1], pte_w[0]); pte_w[1], pte_w[0]);
nvgpu_pd_write(g, pd, (size_t)pd_offset + (size_t)0, pte_w[0]); nvgpu_pd_write(g, pd, (size_t)pd_offset + (size_t)0, pte_w[0]);

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B GPC MMU * GM20B GPC MMU
* *
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -191,7 +191,7 @@ u32 gm20b_fb_mmu_debug_rd(struct gk20a *g)
return gk20a_readl(g, fb_mmu_debug_rd_r()); return gk20a_readl(g, fb_mmu_debug_rd_r());
} }
unsigned int gm20b_fb_compression_page_size(struct gk20a *g) u64 gm20b_fb_compression_page_size(struct gk20a *g)
{ {
return SZ_128K; return SZ_128K;
} }
@@ -201,9 +201,9 @@ unsigned int gm20b_fb_compressible_page_size(struct gk20a *g)
return SZ_64K; return SZ_64K;
} }
u32 gm20b_fb_compression_align_mask(struct gk20a *g) u64 gm20b_fb_compression_align_mask(struct gk20a *g)
{ {
return SZ_64K - 1U; return SZ_64K - 1UL;
} }
void gm20b_fb_dump_vpr_info(struct gk20a *g) void gm20b_fb_dump_vpr_info(struct gk20a *g)

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B FB * GM20B FB
* *
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -39,9 +39,9 @@ u32 gm20b_fb_mmu_ctrl(struct gk20a *g);
u32 gm20b_fb_mmu_debug_ctrl(struct gk20a *g); u32 gm20b_fb_mmu_debug_ctrl(struct gk20a *g);
u32 gm20b_fb_mmu_debug_wr(struct gk20a *g); u32 gm20b_fb_mmu_debug_wr(struct gk20a *g);
u32 gm20b_fb_mmu_debug_rd(struct gk20a *g); u32 gm20b_fb_mmu_debug_rd(struct gk20a *g);
unsigned int gm20b_fb_compression_page_size(struct gk20a *g); u64 gm20b_fb_compression_page_size(struct gk20a *g);
unsigned int gm20b_fb_compressible_page_size(struct gk20a *g); unsigned int gm20b_fb_compressible_page_size(struct gk20a *g);
u32 gm20b_fb_compression_align_mask(struct gk20a *g); u64 gm20b_fb_compression_align_mask(struct gk20a *g);
void gm20b_fb_dump_vpr_info(struct gk20a *g); void gm20b_fb_dump_vpr_info(struct gk20a *g);
void gm20b_fb_dump_wpr_info(struct gk20a *g); void gm20b_fb_dump_wpr_info(struct gk20a *g);
void gm20b_fb_read_wpr_info(struct gk20a *g, u64 *wpr_base, u64 *wpr_size); void gm20b_fb_read_wpr_info(struct gk20a *g, u64 *wpr_base, u64 *wpr_size);

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B FB * GP10B FB
* *
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,7 +27,7 @@
#include "fb_gp10b.h" #include "fb_gp10b.h"
unsigned int gp10b_fb_compression_page_size(struct gk20a *g) u64 gp10b_fb_compression_page_size(struct gk20a *g)
{ {
return SZ_64K; return SZ_64K;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B FB * GP10B FB
* *
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,7 +26,7 @@
#define NVGPU_FB_GP10B_H #define NVGPU_FB_GP10B_H
struct gk20a; struct gk20a;
unsigned int gp10b_fb_compression_page_size(struct gk20a *g); u64 gp10b_fb_compression_page_size(struct gk20a *g);
unsigned int gp10b_fb_compressible_page_size(struct gk20a *g); unsigned int gp10b_fb_compressible_page_size(struct gk20a *g);
#endif /* NVGPU_FB_GP10B_H */ #endif /* NVGPU_FB_GP10B_H */

View File

@@ -719,7 +719,7 @@ struct gpu_ops {
* buffer, ctagline is increased when the virtual address * buffer, ctagline is increased when the virtual address
* crosses over the compression page boundary. * crosses over the compression page boundary.
*/ */
unsigned int (*compression_page_size)(struct gk20a *g); u64 (*compression_page_size)(struct gk20a *g);
/* /*
* Minimum page size that can be used for compressible kinds. * Minimum page size that can be used for compressible kinds.
@@ -730,7 +730,7 @@ struct gpu_ops {
* Compressible kind mappings: Mask for the virtual and physical * Compressible kind mappings: Mask for the virtual and physical
* address bits that must match. * address bits that must match.
*/ */
u32 (*compression_align_mask)(struct gk20a *g); u64 (*compression_align_mask)(struct gk20a *g);
void (*dump_vpr_info)(struct gk20a *g); void (*dump_vpr_info)(struct gk20a *g);
void (*dump_wpr_info)(struct gk20a *g); void (*dump_wpr_info)(struct gk20a *g);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -51,7 +51,7 @@ int gk20a_alloc_or_get_comptags(struct gk20a *g,
buf->dev); buf->dev);
u32 offset; u32 offset;
int err; int err;
unsigned int ctag_granularity; u64 ctag_granularity;
u32 lines; u32 lines;
if (!priv) if (!priv)