mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*. This better reflects the purpose of the APIs (to allocate DMA suitable memory) and avoids confusion with GMMU related code. JIRA NVGPU-12 Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1325548 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
159 lines
4.5 KiB
C
159 lines
4.5 KiB
C
/*
|
|
* drivers/video/tegra/host/gk20a/ltc_common.c
|
|
*
|
|
* GK20A Graphics
|
|
*
|
|
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/delay.h>
|
|
|
|
#include <nvgpu/dma.h>
|
|
|
|
#include "gk20a.h"
|
|
#include "gr_gk20a.h"
|
|
|
|
/*
|
|
* Sets the ZBC color for the passed index.
|
|
*/
|
|
static void gk20a_ltc_set_zbc_color_entry(struct gk20a *g,
|
|
struct zbc_entry *color_val,
|
|
u32 index)
|
|
{
|
|
u32 i;
|
|
u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
|
|
|
|
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
|
|
ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
|
|
|
|
for (i = 0;
|
|
i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) {
|
|
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i),
|
|
color_val->color_l2[i]);
|
|
}
|
|
gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
|
|
}
|
|
|
|
/*
|
|
* Sets the ZBC depth for the passed index.
|
|
*/
|
|
static void gk20a_ltc_set_zbc_depth_entry(struct gk20a *g,
|
|
struct zbc_entry *depth_val,
|
|
u32 index)
|
|
{
|
|
u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
|
|
|
|
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
|
|
ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
|
|
|
|
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(),
|
|
depth_val->depth);
|
|
|
|
gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
|
|
}
|
|
|
|
static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g,
|
|
size_t compbit_backing_size)
|
|
{
|
|
struct gr_gk20a *gr = &g->gr;
|
|
|
|
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
|
|
compbit_backing_size,
|
|
&gr->compbit_store.mem);
|
|
}
|
|
|
|
static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
|
|
size_t compbit_backing_size)
|
|
{
|
|
struct gr_gk20a *gr = &g->gr;
|
|
|
|
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
|
compbit_backing_size,
|
|
&gr->compbit_store.mem);
|
|
}
|
|
|
|
static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
|
|
{
|
|
u32 max_size = gr->max_comptag_mem;
|
|
u32 max_comptag_lines = max_size << 3;
|
|
|
|
u32 compbit_base_post_divide;
|
|
u64 compbit_base_post_multiply64;
|
|
u64 compbit_store_iova;
|
|
u64 compbit_base_post_divide64;
|
|
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
|
|
|
|
if (platform->is_fmodel)
|
|
compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
|
|
else
|
|
compbit_store_iova = g->ops.mm.get_iova_addr(g,
|
|
gr->compbit_store.mem.sgt->sgl, 0);
|
|
|
|
compbit_base_post_divide64 = compbit_store_iova >>
|
|
ltc_ltcs_ltss_cbc_base_alignment_shift_v();
|
|
|
|
do_div(compbit_base_post_divide64, g->ltc_count);
|
|
compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);
|
|
|
|
compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
|
|
g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
|
|
|
|
if (compbit_base_post_multiply64 < compbit_store_iova)
|
|
compbit_base_post_divide++;
|
|
|
|
/* Bug 1477079 indicates sw adjustment on the posted divided base. */
|
|
if (g->ops.ltc.cbc_fix_config)
|
|
compbit_base_post_divide =
|
|
g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
|
|
|
|
gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
|
|
compbit_base_post_divide);
|
|
|
|
gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
|
|
"compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
|
|
(u32)(compbit_store_iova >> 32),
|
|
(u32)(compbit_store_iova & 0xffffffff),
|
|
compbit_base_post_divide);
|
|
|
|
gr->compbit_store.base_hw = compbit_base_post_divide;
|
|
|
|
g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
|
|
0, max_comptag_lines - 1);
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
static void gk20a_ltc_sync_debugfs(struct gk20a *g)
|
|
{
|
|
u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f();
|
|
|
|
nvgpu_spinlock_acquire(&g->debugfs_lock);
|
|
if (g->mm.ltc_enabled != g->mm.ltc_enabled_debug) {
|
|
u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r());
|
|
if (g->mm.ltc_enabled_debug)
|
|
/* bypass disabled (normal caching ops)*/
|
|
reg &= ~reg_f;
|
|
else
|
|
/* bypass enabled (no caching) */
|
|
reg |= reg_f;
|
|
|
|
gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg);
|
|
g->mm.ltc_enabled = g->mm.ltc_enabled_debug;
|
|
}
|
|
nvgpu_spinlock_release(&g->debugfs_lock);
|
|
}
|
|
#endif
|