mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu: nvgpu: Implement common nvgpu_mem_rd* functions
nvgpu_mem_rd*() functions were implemented per OS. They also used nvgpu_pramin_access_batched() and implemented a big portion of logic for using PRAMIN in OS specific code. Make the implementation for the functions generic. Move all PRAMIN logic to PRAMIN and simplify the interface provided by PRAMIN. Change-Id: I1acb9e8d7d424325dc73314d5738cb2c9ebf7692 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1753708 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
cf2ac655fd
commit
6ea52c59b0
@@ -27,12 +27,19 @@
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
/*
|
||||
* This typedef is for functions that get called during the access_batched()
|
||||
* operation.
|
||||
*/
|
||||
typedef void (*pramin_access_batch_fn)(struct gk20a *g, u32 start, u32 words,
|
||||
u32 **arg);
|
||||
|
||||
/*
|
||||
* The PRAMIN range is 1 MB, must change base addr if a buffer crosses that.
|
||||
* This same loop is used for read/write/memset. Offset and size in bytes.
|
||||
* One call to "loop" is done per range, with "arg" supplied.
|
||||
*/
|
||||
void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
static void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg)
|
||||
{
|
||||
struct nvgpu_page_alloc *alloc = NULL;
|
||||
@@ -87,6 +94,69 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
}
|
||||
}
|
||||
|
||||
static void nvgpu_pramin_access_batch_rd_n(struct gk20a *g,
|
||||
u32 start, u32 words, u32 **arg)
|
||||
{
|
||||
u32 r = start, *dest_u32 = *arg;
|
||||
|
||||
while (words--) {
|
||||
*dest_u32++ = nvgpu_readl(g, r);
|
||||
r += sizeof(u32);
|
||||
}
|
||||
|
||||
*arg = dest_u32;
|
||||
}
|
||||
|
||||
void nvgpu_pramin_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 start, u32 size, void *dest)
|
||||
{
|
||||
u32 *dest_u32 = dest;
|
||||
|
||||
return nvgpu_pramin_access_batched(g, mem, start, size,
|
||||
nvgpu_pramin_access_batch_rd_n, &dest_u32);
|
||||
}
|
||||
|
||||
static void nvgpu_pramin_access_batch_wr_n(struct gk20a *g,
|
||||
u32 start, u32 words, u32 **arg)
|
||||
{
|
||||
u32 r = start, *src_u32 = *arg;
|
||||
|
||||
while (words--) {
|
||||
nvgpu_writel_relaxed(g, r, *src_u32++);
|
||||
r += sizeof(u32);
|
||||
}
|
||||
|
||||
*arg = src_u32;
|
||||
}
|
||||
|
||||
void nvgpu_pramin_wr_n(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 start, u32 size, void *src)
|
||||
{
|
||||
u32 *src_u32 = src;
|
||||
|
||||
return nvgpu_pramin_access_batched(g, mem, start, size,
|
||||
nvgpu_pramin_access_batch_wr_n, &src_u32);
|
||||
}
|
||||
|
||||
static void nvgpu_pramin_access_batch_set(struct gk20a *g,
|
||||
u32 start, u32 words, u32 **arg)
|
||||
{
|
||||
u32 r = start, repeat = **arg;
|
||||
|
||||
while (words--) {
|
||||
nvgpu_writel_relaxed(g, r, repeat);
|
||||
r += sizeof(u32);
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_pramin_memset(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 start, u32 size, u32 w)
|
||||
{
|
||||
u32 *p = &w;
|
||||
|
||||
return nvgpu_pramin_access_batched(g, mem, start, size,
|
||||
nvgpu_pramin_access_batch_set, &p);
|
||||
}
|
||||
void nvgpu_init_pramin(struct mm_gk20a *mm)
|
||||
{
|
||||
mm->pramin_window = 0;
|
||||
|
||||
Reference in New Issue
Block a user