mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Implement common nvgpu_mem_rd* functions
nvgpu_mem_rd*() functions were implemented per OS. They also used nvgpu_pramin_access_batched() and implemented a big portion of logic for using PRAMIN in OS specific code. Make the implementation for the functions generic. Move all PRAMIN logic to PRAMIN and simplify the interface provided by PRAMIN. Change-Id: I1acb9e8d7d424325dc73314d5738cb2c9ebf7692 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1753708 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
cf2ac655fd
commit
6ea52c59b0
@@ -31,6 +31,18 @@ void nvgpu_writel(struct gk20a *g, u32 r, u32 v)
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_writel_relaxed(struct gk20a *g, u32 r, u32 v)
|
||||
{
|
||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||
|
||||
if (unlikely(!l->regs)) {
|
||||
__gk20a_warn_on_no_regs();
|
||||
nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
|
||||
} else {
|
||||
writel_relaxed(v, l->regs + r);
|
||||
}
|
||||
}
|
||||
|
||||
u32 nvgpu_readl(struct gk20a *g, u32 r)
|
||||
{
|
||||
u32 v = __nvgpu_readl(g, r);
|
||||
|
||||
@@ -48,207 +48,6 @@ static u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
return ipa;
|
||||
}
|
||||
|
||||
static void pramin_access_batch_rd_n(struct gk20a *g, u32 start, u32 words, u32 **arg)
|
||||
{
|
||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||
u32 r = start, *dest_u32 = *arg;
|
||||
|
||||
if (!l->regs) {
|
||||
__gk20a_warn_on_no_regs();
|
||||
return;
|
||||
}
|
||||
|
||||
while (words--) {
|
||||
*dest_u32++ = gk20a_readl(g, r);
|
||||
r += sizeof(u32);
|
||||
}
|
||||
|
||||
*arg = dest_u32;
|
||||
}
|
||||
|
||||
u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
|
||||
{
|
||||
u32 data = 0;
|
||||
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u32 *ptr = mem->cpu_va;
|
||||
|
||||
WARN_ON(!ptr);
|
||||
data = ptr[w];
|
||||
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
|
||||
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
|
||||
#endif
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 value;
|
||||
u32 *p = &value;
|
||||
|
||||
nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
|
||||
sizeof(u32), pramin_access_batch_rd_n, &p);
|
||||
|
||||
data = value;
|
||||
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
|
||||
}
|
||||
|
||||
void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 offset, void *dest, u32 size)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
WARN_ON(size & 3);
|
||||
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u8 *src = (u8 *)mem->cpu_va + offset;
|
||||
|
||||
WARN_ON(!mem->cpu_va);
|
||||
memcpy(dest, src, size);
|
||||
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
|
||||
if (size)
|
||||
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
|
||||
src, *dest, size);
|
||||
#endif
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 *dest_u32 = dest;
|
||||
|
||||
nvgpu_pramin_access_batched(g, mem, offset, size,
|
||||
pramin_access_batch_rd_n, &dest_u32);
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
}
|
||||
|
||||
static void pramin_access_batch_wr_n(struct gk20a *g, u32 start, u32 words, u32 **arg)
|
||||
{
|
||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||
u32 r = start, *src_u32 = *arg;
|
||||
|
||||
if (!l->regs) {
|
||||
__gk20a_warn_on_no_regs();
|
||||
return;
|
||||
}
|
||||
|
||||
while (words--) {
|
||||
writel_relaxed(*src_u32++, l->regs + r);
|
||||
r += sizeof(u32);
|
||||
}
|
||||
|
||||
*arg = src_u32;
|
||||
}
|
||||
|
||||
void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
|
||||
{
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u32 *ptr = mem->cpu_va;
|
||||
|
||||
WARN_ON(!ptr);
|
||||
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
|
||||
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
|
||||
#endif
|
||||
ptr[w] = data;
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 value = data;
|
||||
u32 *p = &value;
|
||||
|
||||
nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
|
||||
sizeof(u32), pramin_access_batch_wr_n, &p);
|
||||
if (!mem->skip_wmb)
|
||||
wmb();
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
|
||||
}
|
||||
|
||||
void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
void *src, u32 size)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
WARN_ON(size & 3);
|
||||
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u8 *dest = (u8 *)mem->cpu_va + offset;
|
||||
|
||||
WARN_ON(!mem->cpu_va);
|
||||
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
|
||||
if (size)
|
||||
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
|
||||
dest, *src, size);
|
||||
#endif
|
||||
memcpy(dest, src, size);
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 *src_u32 = src;
|
||||
|
||||
nvgpu_pramin_access_batched(g, mem, offset, size,
|
||||
pramin_access_batch_wr_n, &src_u32);
|
||||
if (!mem->skip_wmb)
|
||||
wmb();
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
}
|
||||
|
||||
static void pramin_access_batch_set(struct gk20a *g, u32 start, u32 words, u32 **arg)
|
||||
{
|
||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||
u32 r = start, repeat = **arg;
|
||||
|
||||
if (!l->regs) {
|
||||
__gk20a_warn_on_no_regs();
|
||||
return;
|
||||
}
|
||||
|
||||
while (words--) {
|
||||
writel_relaxed(repeat, l->regs + r);
|
||||
r += sizeof(u32);
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
u32 c, u32 size)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
WARN_ON(size & 3);
|
||||
WARN_ON(c & ~0xff);
|
||||
|
||||
c &= 0xff;
|
||||
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u8 *dest = (u8 *)mem->cpu_va + offset;
|
||||
|
||||
WARN_ON(!mem->cpu_va);
|
||||
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
|
||||
if (size)
|
||||
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x [times %d]",
|
||||
dest, c, size);
|
||||
#endif
|
||||
memset(dest, c, size);
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
|
||||
u32 *p = &repeat_value;
|
||||
|
||||
nvgpu_pramin_access_batched(g, mem, offset, size,
|
||||
pramin_access_batch_set, &p);
|
||||
if (!mem->skip_wmb)
|
||||
wmb();
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Obtain a SYSMEM address from a Linux SGL. This should eventually go away
|
||||
* and/or become private to this file once all bad usages of Linux SGLs are
|
||||
|
||||
Reference in New Issue
Block a user