gpu: nvgpu: fb: size of compression apis for mm

The fb APIs compression_page_size() and compression_align_mask() were
returning u32s, but the users all really need u64s.

This also eliminates MISRA Rule 10.3 violations for implicit casts to
smaller size objects.

JIRA NVGPU-2954

Change-Id: I8dc2b434d9564c89c0e8a1b19c4acbe167e339c1
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2075595
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-03-18 10:33:29 -04:00
committed by mobile promotions
parent 30fd2a5dcc
commit f9c4d6b60b
8 changed files with 21 additions and 21 deletions

View File

@@ -780,11 +780,11 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
struct gk20a *g = gk20a_from_vm(vm);
int err = 0;
bool allocated = false;
u32 ctag_granularity = g->ops.fb.compression_page_size(g);
u64 ctag_granularity = g->ops.fb.compression_page_size(g);
struct nvgpu_gmmu_attrs attrs = {
.pgsz = pgsz_idx,
.kind_v = kind_v,
.ctag = (u64)ctag_offset * (u64)ctag_granularity,
.ctag = (u64)ctag_offset * ctag_granularity,
.cacheable = ((flags & NVGPU_VM_MAP_CACHEABLE) != 0U),
.rw_flag = rw_flag,
.sparse = sparse,
@@ -800,7 +800,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
* boundaries.
*/
if (attrs.ctag != 0ULL) {
attrs.ctag += buffer_offset & (U64(ctag_granularity) - U64(1));
attrs.ctag += buffer_offset & (ctag_granularity - U64(1));
}
attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);