mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Move FB size query to FB
Vidmem size query was in mm_xxx.c. It involves reading a register from FB, so move the query to FB HAL. JIRA NVGPU-1063 Change-Id: I30dfd2c4fdcdd6c841f85aaab7431d52473759bd Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1801425 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
a02e1c1f0b
commit
83efad7adb
@@ -295,7 +295,6 @@ nvgpu-y += \
|
||||
gp10b/gp10b.o \
|
||||
gp10b/ecc_gp10b.o \
|
||||
gp106/hal_gp106.o \
|
||||
gp106/mm_gp106.o \
|
||||
gp106/flcn_gp106.o \
|
||||
gp106/pmu_gp106.o \
|
||||
gp106/mclk_gp106.o \
|
||||
|
||||
@@ -191,7 +191,6 @@ srcs := os/posix/nvgpu.c \
|
||||
gv11b/regops_gv11b.c \
|
||||
gv11b/ecc_gv11b.c \
|
||||
gp106/hal_gp106.c \
|
||||
gp106/mm_gp106.c \
|
||||
gp106/flcn_gp106.c \
|
||||
gp106/pmu_gp106.c \
|
||||
gp106/mclk_gp106.c \
|
||||
|
||||
@@ -52,3 +52,18 @@ void gp106_fb_reset(struct gk20a *g)
|
||||
val &= ~fb_mmu_priv_level_mask_write_violation_m();
|
||||
gk20a_writel(g, fb_mmu_priv_level_mask_r(), val);
|
||||
}
|
||||
|
||||
size_t gp106_fb_get_vidmem_size(struct gk20a *g)
|
||||
{
|
||||
u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
|
||||
u32 mag = fb_mmu_local_memory_range_lower_mag_v(range);
|
||||
u32 scale = fb_mmu_local_memory_range_lower_scale_v(range);
|
||||
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
|
||||
size_t bytes = ((size_t)mag << scale) * SZ_1M;
|
||||
|
||||
if (ecc) {
|
||||
bytes = bytes / 16U * 15U;
|
||||
}
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
@@ -25,4 +25,6 @@
|
||||
struct gpu_ops;
|
||||
|
||||
void gp106_fb_reset(struct gk20a *g);
|
||||
size_t gp106_fb_get_vidmem_size(struct gk20a *g);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -281,3 +281,18 @@ int gv100_fb_enable_nvlink(struct gk20a *g)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t gv100_fb_get_vidmem_size(struct gk20a *g)
|
||||
{
|
||||
u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
|
||||
u32 mag = fb_mmu_local_memory_range_lower_mag_v(range);
|
||||
u32 scale = fb_mmu_local_memory_range_lower_scale_v(range);
|
||||
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
|
||||
size_t bytes = ((size_t)mag << scale) * SZ_1M;
|
||||
|
||||
if (ecc) {
|
||||
bytes = bytes / 16U * 15U;
|
||||
}
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
@@ -33,4 +33,6 @@ void gv100_fb_disable_hub_intr(struct gk20a *g);
|
||||
int gv100_fb_memory_unlock(struct gk20a *g);
|
||||
int gv100_fb_init_nvlink(struct gk20a *g);
|
||||
int gv100_fb_enable_nvlink(struct gk20a *g);
|
||||
size_t gv100_fb_get_vidmem_size(struct gk20a *g);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -41,7 +41,7 @@ void nvgpu_vidmem_destroy(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_timeout timeout;
|
||||
|
||||
if (!g->ops.mm.get_vidmem_size)
|
||||
if (!g->ops.fb.get_vidmem_size)
|
||||
return;
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
|
||||
@@ -293,8 +293,8 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
|
||||
static struct nvgpu_alloc_carveout bootstrap_co =
|
||||
NVGPU_CARVEOUT("bootstrap-region", 0, 0);
|
||||
|
||||
size = g->ops.mm.get_vidmem_size ?
|
||||
g->ops.mm.get_vidmem_size(g) : 0;
|
||||
size = g->ops.fb.get_vidmem_size ?
|
||||
g->ops.fb.get_vidmem_size(g) : 0;
|
||||
if (!size)
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -584,6 +584,7 @@ struct gpu_ops {
|
||||
void (*fault_buf_set_state_hw)(struct gk20a *g,
|
||||
u32 index, u32 state);
|
||||
void (*fault_buf_configure_hw)(struct gk20a *g, u32 index);
|
||||
size_t (*get_vidmem_size)(struct gk20a *g);
|
||||
} fb;
|
||||
struct {
|
||||
void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod);
|
||||
@@ -963,7 +964,6 @@ struct gpu_ops {
|
||||
struct vm_gk20a *vm);
|
||||
u64 (*gpu_phys_addr)(struct gk20a *g,
|
||||
struct nvgpu_gmmu_attrs *attrs, u64 phys);
|
||||
size_t (*get_vidmem_size)(struct gk20a *g);
|
||||
int (*alloc_inst_block)(struct gk20a *g,
|
||||
struct nvgpu_mem *inst_block);
|
||||
void (*init_inst_block)(struct nvgpu_mem *inst_block,
|
||||
|
||||
@@ -81,7 +81,6 @@
|
||||
#include "gp106/bios_gp106.h"
|
||||
#include "gp106/fifo_gp106.h"
|
||||
#include "gp106/clk_gp106.h"
|
||||
#include "gp106/mm_gp106.h"
|
||||
#include "gp106/pmu_gp106.h"
|
||||
#include "gp106/gr_ctx_gp106.h"
|
||||
#include "gp106/gr_gp106.h"
|
||||
@@ -426,6 +425,7 @@ static const struct gpu_ops gp106_ops = {
|
||||
.set_debug_mode = gm20b_fb_set_debug_mode,
|
||||
.tlb_invalidate = gm20b_fb_tlb_invalidate,
|
||||
.mem_unlock = NULL,
|
||||
.get_vidmem_size = gp106_fb_get_vidmem_size,
|
||||
},
|
||||
.clock_gating = {
|
||||
.slcg_bus_load_gating_prod =
|
||||
@@ -598,7 +598,6 @@ static const struct gpu_ops gp106_ops = {
|
||||
.mmu_fault_pending = gk20a_fifo_mmu_fault_pending,
|
||||
.init_bar2_vm = gp10b_init_bar2_vm,
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
.get_vidmem_size = gp106_mm_get_vidmem_size,
|
||||
.get_kind_invalid = gm20b_get_kind_invalid,
|
||||
.get_kind_pitch = gm20b_get_kind_pitch,
|
||||
},
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* GP106 memory management
|
||||
*
|
||||
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/sizes.h>
|
||||
#include <nvgpu/io.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gp10b/mm_gp10b.h"
|
||||
#include "gp106/mm_gp106.h"
|
||||
|
||||
#include <nvgpu/hw/gp106/hw_fb_gp106.h>
|
||||
|
||||
size_t gp106_mm_get_vidmem_size(struct gk20a *g)
|
||||
{
|
||||
u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
|
||||
u32 mag = fb_mmu_local_memory_range_lower_mag_v(range);
|
||||
u32 scale = fb_mmu_local_memory_range_lower_scale_v(range);
|
||||
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
|
||||
size_t bytes = ((size_t)mag << scale) * SZ_1M;
|
||||
|
||||
if (ecc) {
|
||||
bytes = bytes / 16 * 15;
|
||||
}
|
||||
|
||||
return bytes;
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
* GP106 memory management
|
||||
*
|
||||
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef MM_GP106_H
|
||||
#define MM_GP106_H
|
||||
|
||||
struct gk20a;
|
||||
|
||||
size_t gp106_mm_get_vidmem_size(struct gk20a *g);
|
||||
|
||||
#endif
|
||||
@@ -510,6 +510,7 @@ static const struct gpu_ops gv100_ops = {
|
||||
.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
|
||||
.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
|
||||
.fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
|
||||
.get_vidmem_size = gv100_fb_get_vidmem_size,
|
||||
},
|
||||
.clock_gating = {
|
||||
.slcg_bus_load_gating_prod =
|
||||
@@ -685,7 +686,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.get_default_big_page_size = gp10b_mm_get_default_big_page_size,
|
||||
.gpu_phys_addr = gv11b_gpu_phys_addr,
|
||||
.get_mmu_levels = gp10b_mm_get_mmu_levels,
|
||||
.get_vidmem_size = gv100_mm_get_vidmem_size,
|
||||
.init_pdb = gp10b_mm_init_pdb,
|
||||
.init_mm_setup_hw = gv11b_init_mm_setup_hw,
|
||||
.is_bar1_supported = gv11b_mm_is_bar1_supported,
|
||||
|
||||
@@ -28,22 +28,6 @@
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gv100/mm_gv100.h"
|
||||
|
||||
#include <nvgpu/hw/gv100/hw_fb_gv100.h>
|
||||
|
||||
size_t gv100_mm_get_vidmem_size(struct gk20a *g)
|
||||
{
|
||||
u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
|
||||
u32 mag = fb_mmu_local_memory_range_lower_mag_v(range);
|
||||
u32 scale = fb_mmu_local_memory_range_lower_scale_v(range);
|
||||
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
|
||||
size_t bytes = ((size_t)mag << scale) * SZ_1M;
|
||||
|
||||
if (ecc)
|
||||
bytes = bytes / 16 * 15;
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
u32 gv100_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op)
|
||||
{
|
||||
switch (op) {
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
|
||||
struct gk20a;
|
||||
|
||||
size_t gv100_mm_get_vidmem_size(struct gk20a *g);
|
||||
u32 gv100_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op);
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user