diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index d375b13ee..30677a924 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -928,7 +928,6 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm, u64 ctag_granularity = g->ops.fb.compression_page_size(g); attrs.ctag = (u64)ctag_offset * ctag_granularity; - /* * We need to add the buffer_offset within compression_page_size so that * the programmed ctagline gets increased at compression_page_size @@ -939,6 +938,12 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm, attrs.ctag = nvgpu_safe_add_u64(attrs.ctag, buffer_offset & (ctag_granularity - U64(1))); } + +#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT) + attrs.cbc_comptagline_mode = + g->ops.fb.is_comptagline_mode_enabled != NULL ? + g->ops.fb.is_comptagline_mode_enabled(g) : true; +#endif #endif attrs.l3_alloc = ((flags & NVGPU_VM_MAP_L3_ALLOC) != 0U); @@ -1009,7 +1014,13 @@ void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm, .valid = false, .aperture = APERTURE_INVALID, }; - +#ifdef CONFIG_NVGPU_COMPRESSION +#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT) + attrs.cbc_comptagline_mode = + g->ops.fb.is_comptagline_mode_enabled != NULL ? + g->ops.fb.is_comptagline_mode_enabled(g) : true; +#endif +#endif if (va_allocated) { nvgpu_vm_free_va(vm, vaddr, pgsz_idx); } diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h index ce2eb9cf0..23d150731 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h @@ -95,58 +95,62 @@ struct nvgpu_gmmu_attrs { * Min: GMMU_PAGE_SIZE_SMALL * Max: GMMU_PAGE_SIZE_KERNEL */ - u32 pgsz; + u32 pgsz; /** * Kind attributes for mapping. */ - u32 kind_v; + u32 kind_v; #ifdef CONFIG_NVGPU_COMPRESSION /** * Comptag line in the comptag cache. * updated every time we write a PTE. */ - u64 ctag; + u64 ctag; + /** + * True if cbc policy is comptagline_mode + */ + bool cbc_comptagline_mode; #endif /** * Cacheability of the mapping. * Cacheable if this flag is set to true, else non-cacheable. */ - bool cacheable; + bool cacheable; /** * Flag from enum #gk20a_mem_rw_flag * (i.e gk20a_mem_flag_none, gk20a_mem_flag_read_only, ...). */ - enum gk20a_mem_rw_flag rw_flag; + enum gk20a_mem_rw_flag rw_flag; /** * True if the mapping should be sparse. */ - bool sparse; + bool sparse; /** * True if the mapping should be Privileged. */ - bool priv; + bool priv; /** * True if the PTE should be marked valid. */ - bool valid; + bool valid; /** * This flag variable designates where the memory actually * was allocated from. #nvgpu_aperture. * (i.e APERTURE_SYSMEM, APERTURE_VIDMEM, ...). */ - enum nvgpu_aperture aperture; + enum nvgpu_aperture aperture; /** * When set (i.e True) print debugging info. */ - bool debug; + bool debug; /** * True if l3_alloc flag is valid. */ - bool l3_alloc; + bool l3_alloc; /** * True if platform_atomic flag is valid. */ - bool platform_atomic; + bool platform_atomic; }; /** diff --git a/drivers/gpu/nvgpu/include/nvgpu/gops_fb.h b/drivers/gpu/nvgpu/include/nvgpu/gops_fb.h index 733ab89ed..1e628b4a5 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gops_fb.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gops_fb.h @@ -388,6 +388,12 @@ struct gops_fb { void (*cbc_configure)(struct gk20a *g, struct nvgpu_cbc *cbc); bool (*set_use_full_comp_tag_line)(struct gk20a *g); + /* + * Check if comptagline mode is enabled. + * Legacy chips support only comptagline mode + */ + bool (*is_comptagline_mode_enabled)(struct gk20a *g); + /* * Compression tag line coverage. When mapping a compressible * buffer, ctagline is increased when the virtual address