Files
linux-nvgpu/drivers/gpu/nvgpu/gk20a/ltc_common.c
Alex Waterman 66a2511a36 gpu: nvgpu: Begin removing variables in struct gk20a
Begin removing all of the myriad flag variables in struct gk20a and
replace that with one API that checks for flags being enabled or
disabled. The API is as follows:

  bool nvgpu_is_enabled(struct gk20a *g, int flag);
  bool __nvgpu_set_enabled(struct gk20a *g, int flag, bool state);

These APIs allow many of the gk20a flags to be replaced by defines.
This makes flag usage consistent and saves a small amount of memory in
struct gk20a. Also it makes struct gk20a easier to read since there's
less clutter scattered through out.

JIRA NVGPU-84

Change-Id: I6525cecbe97c4e8379e5f53e29ef0b4dbd1a7fc2
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1488049
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2017-05-30 13:24:35 -07:00

156 lines
4.5 KiB
C

/*
* drivers/video/tegra/host/gk20a/ltc_common.c
*
* GK20A Graphics
*
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <nvgpu/dma.h>
#include <nvgpu/enabled.h>
#include "gk20a.h"
#include "gr_gk20a.h"
/*
* Sets the ZBC color for the passed index.
*/
static void gk20a_ltc_set_zbc_color_entry(struct gk20a *g,
struct zbc_entry *color_val,
u32 index)
{
u32 i;
u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
for (i = 0;
i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) {
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i),
color_val->color_l2[i]);
}
gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
}
/*
* Sets the ZBC depth for the passed index.
*/
static void gk20a_ltc_set_zbc_depth_entry(struct gk20a *g,
struct zbc_entry *depth_val,
u32 index)
{
u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(),
depth_val->depth);
gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
}
static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g,
size_t compbit_backing_size)
{
struct gr_gk20a *gr = &g->gr;
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
compbit_backing_size,
&gr->compbit_store.mem);
}
static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
size_t compbit_backing_size)
{
struct gr_gk20a *gr = &g->gr;
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
compbit_backing_size,
&gr->compbit_store.mem);
}
static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
{
u32 max_size = gr->max_comptag_mem;
u32 max_comptag_lines = max_size << 3;
u32 compbit_base_post_divide;
u64 compbit_base_post_multiply64;
u64 compbit_store_iova;
u64 compbit_base_post_divide64;
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
else
compbit_store_iova = g->ops.mm.get_iova_addr(g,
gr->compbit_store.mem.priv.sgt->sgl, 0);
compbit_base_post_divide64 = compbit_store_iova >>
ltc_ltcs_ltss_cbc_base_alignment_shift_v();
do_div(compbit_base_post_divide64, g->ltc_count);
compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);
compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
if (compbit_base_post_multiply64 < compbit_store_iova)
compbit_base_post_divide++;
/* Bug 1477079 indicates sw adjustment on the posted divided base. */
if (g->ops.ltc.cbc_fix_config)
compbit_base_post_divide =
g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
compbit_base_post_divide);
gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
"compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
(u32)(compbit_store_iova >> 32),
(u32)(compbit_store_iova & 0xffffffff),
compbit_base_post_divide);
gr->compbit_store.base_hw = compbit_base_post_divide;
g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
0, max_comptag_lines - 1);
}
#ifdef CONFIG_DEBUG_FS
static void gk20a_ltc_sync_debugfs(struct gk20a *g)
{
u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f();
nvgpu_spinlock_acquire(&g->debugfs_lock);
if (g->mm.ltc_enabled != g->mm.ltc_enabled_debug) {
u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r());
if (g->mm.ltc_enabled_debug)
/* bypass disabled (normal caching ops)*/
reg &= ~reg_f;
else
/* bypass enabled (no caching) */
reg |= reg_f;
gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg);
g->mm.ltc_enabled = g->mm.ltc_enabled_debug;
}
nvgpu_spinlock_release(&g->debugfs_lock);
}
#endif