mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu: nvgpu: rename mem_desc to nvgpu_mem
Renaming was done with the following command:
$ find -type f | \
xargs sed -i 's/struct mem_desc/struct nvgpu_mem/g'
Also rename mem_desc.[ch] to nvgpu_mem.[ch].
JIRA NVGPU-12
Change-Id: I69395758c22a56aa01e3dffbcded70a729bf559a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1325547
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
b69020bff5
commit
c9665079d7
@@ -31,7 +31,7 @@ nvgpu-y := \
|
||||
common/linux/ioctl_channel.o \
|
||||
common/linux/ioctl_tsg.o \
|
||||
common/linux/log.o \
|
||||
common/linux/mem_desc.o \
|
||||
common/linux/nvgpu_mem.o \
|
||||
common/mm/nvgpu_allocator.o \
|
||||
common/mm/bitmap_allocator.o \
|
||||
common/mm/buddy_allocator.o \
|
||||
|
||||
@@ -165,7 +165,7 @@ u32 boardobjgrp_pmucmd_pmuinithandle_impl(struct gk20a *g,
|
||||
struct boardobjgrp_pmu_cmd *pcmd)
|
||||
{
|
||||
u32 status = 0;
|
||||
struct mem_desc *sysmem_desc = &pcmd->surf.sysmem_desc;
|
||||
struct nvgpu_mem *sysmem_desc = &pcmd->surf.sysmem_desc;
|
||||
|
||||
gk20a_dbg_info("");
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <nvgpu/mem_desc.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <nvgpu/page_allocator.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
@@ -36,14 +36,14 @@ u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem,
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 sysmem_mask, u32 vidmem_mask)
|
||||
{
|
||||
return __nvgpu_aperture_mask(g, mem->aperture,
|
||||
sysmem_mask, vidmem_mask);
|
||||
}
|
||||
|
||||
int nvgpu_mem_begin(struct gk20a *g, struct mem_desc *mem)
|
||||
int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
void *cpu_va;
|
||||
|
||||
@@ -66,7 +66,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct mem_desc *mem)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvgpu_mem_end(struct gk20a *g, struct mem_desc *mem)
|
||||
void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
|
||||
return;
|
||||
@@ -75,7 +75,7 @@ void nvgpu_mem_end(struct gk20a *g, struct mem_desc *mem)
|
||||
mem->cpu_va = NULL;
|
||||
}
|
||||
|
||||
u32 nvgpu_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w)
|
||||
u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
|
||||
{
|
||||
u32 data = 0;
|
||||
|
||||
@@ -97,19 +97,19 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w)
|
||||
data = value;
|
||||
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated mem_desc");
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
u32 nvgpu_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset)
|
||||
u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
|
||||
}
|
||||
|
||||
void nvgpu_mem_rd_n(struct gk20a *g, struct mem_desc *mem,
|
||||
void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 offset, void *dest, u32 size)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
@@ -131,11 +131,11 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct mem_desc *mem,
|
||||
nvgpu_pramin_access_batched(g, mem, offset, size,
|
||||
pramin_access_batch_rd_n, &dest_u32);
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated mem_desc");
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data)
|
||||
void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
|
||||
{
|
||||
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
|
||||
u32 *ptr = mem->cpu_va;
|
||||
@@ -154,17 +154,17 @@ void nvgpu_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data)
|
||||
if (!mem->skip_wmb)
|
||||
wmb();
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated mem_desc");
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data)
|
||||
void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
|
||||
}
|
||||
|
||||
void nvgpu_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
|
||||
void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
void *src, u32 size)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
@@ -188,11 +188,11 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
|
||||
if (!mem->skip_wmb)
|
||||
wmb();
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated mem_desc");
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
|
||||
void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
u32 c, u32 size)
|
||||
{
|
||||
WARN_ON(offset & 3);
|
||||
@@ -220,6 +220,6 @@ void nvgpu_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
|
||||
if (!mem->skip_wmb)
|
||||
wmb();
|
||||
} else {
|
||||
WARN_ON("Accessing unallocated mem_desc");
|
||||
WARN_ON("Accessing unallocated nvgpu_mem");
|
||||
}
|
||||
}
|
||||
@@ -80,7 +80,7 @@ void pramin_access_batch_set(struct gk20a *g, u32 start, u32 words, u32 **arg)
|
||||
* This same loop is used for read/write/memset. Offset and size in bytes.
|
||||
* One call to "loop" is done per range, with "arg" supplied.
|
||||
*/
|
||||
void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
|
||||
void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg)
|
||||
{
|
||||
struct nvgpu_page_alloc *alloc = NULL;
|
||||
|
||||
@@ -130,7 +130,7 @@ int gk20a_read_ptimer(struct gk20a *g, u64 *value)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int gk20a_bus_bar1_bind(struct gk20a *g, struct mem_desc *bar1_inst)
|
||||
static int gk20a_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
|
||||
{
|
||||
u64 iova = gk20a_mm_inst_block_addr(g, bar1_inst);
|
||||
u32 ptr_v = (u32)(iova >> bar1_instance_block_shift_gk20a());
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
struct gk20a;
|
||||
struct gpu_ops;
|
||||
struct mem_desc;
|
||||
struct nvgpu_mem;
|
||||
|
||||
void gk20a_init_bus(struct gpu_ops *gops);
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx)
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < cde_ctx->num_bufs; i++) {
|
||||
struct mem_desc *mem = cde_ctx->mem + i;
|
||||
struct nvgpu_mem *mem = cde_ctx->mem + i;
|
||||
gk20a_gmmu_unmap_free(cde_ctx->vm, mem);
|
||||
}
|
||||
|
||||
@@ -226,7 +226,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
|
||||
const struct firmware *img,
|
||||
struct gk20a_cde_hdr_buf *buf)
|
||||
{
|
||||
struct mem_desc *mem;
|
||||
struct nvgpu_mem *mem;
|
||||
int err;
|
||||
|
||||
/* check that the file can hold the buf */
|
||||
@@ -312,8 +312,8 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
|
||||
const struct firmware *img,
|
||||
struct gk20a_cde_hdr_replace *replace)
|
||||
{
|
||||
struct mem_desc *source_mem;
|
||||
struct mem_desc *target_mem;
|
||||
struct nvgpu_mem *source_mem;
|
||||
struct nvgpu_mem *target_mem;
|
||||
u32 *target_mem_ptr;
|
||||
u64 vaddr;
|
||||
int err;
|
||||
@@ -362,7 +362,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
|
||||
static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
|
||||
{
|
||||
struct gk20a *g = cde_ctx->g;
|
||||
struct mem_desc *target_mem;
|
||||
struct nvgpu_mem *target_mem;
|
||||
u32 *target_mem_ptr;
|
||||
u64 new_data;
|
||||
int user_id = 0, err;
|
||||
@@ -451,7 +451,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
|
||||
const struct firmware *img,
|
||||
struct gk20a_cde_hdr_param *param)
|
||||
{
|
||||
struct mem_desc *target_mem;
|
||||
struct nvgpu_mem *target_mem;
|
||||
|
||||
if (param->target_buf >= cde_ctx->num_bufs) {
|
||||
gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u",
|
||||
@@ -545,7 +545,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
|
||||
|
||||
gpfifo_elem = *gpfifo;
|
||||
for (i = 0; i < num_elems; i++, cmd_elem++, gpfifo_elem++) {
|
||||
struct mem_desc *target_mem;
|
||||
struct nvgpu_mem *target_mem;
|
||||
|
||||
/* validate the current entry */
|
||||
if (cmd_elem->target_buf >= cde_ctx->num_bufs) {
|
||||
|
||||
@@ -222,7 +222,7 @@ struct gk20a_cde_ctx {
|
||||
struct vm_gk20a *vm;
|
||||
|
||||
/* buf converter configuration */
|
||||
struct mem_desc mem[MAX_CDE_BUFS];
|
||||
struct nvgpu_mem mem[MAX_CDE_BUFS];
|
||||
unsigned int num_bufs;
|
||||
|
||||
/* buffer patching params (where should patching be done) */
|
||||
|
||||
@@ -110,7 +110,7 @@ struct gk20a_gpu_ctx {
|
||||
struct vm_gk20a *vm;
|
||||
|
||||
/* cmd buf mem_desc */
|
||||
struct mem_desc cmd_buf_mem;
|
||||
struct nvgpu_mem cmd_buf_mem;
|
||||
|
||||
struct nvgpu_list_node list;
|
||||
|
||||
|
||||
@@ -2117,7 +2117,7 @@ static void gk20a_submit_append_priv_cmdbuf(struct channel_gk20a *c,
|
||||
struct priv_cmd_entry *cmd)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct mem_desc *gpfifo_mem = &c->gpfifo.mem;
|
||||
struct nvgpu_mem *gpfifo_mem = &c->gpfifo.mem;
|
||||
struct nvgpu_gpfifo x = {
|
||||
.entry0 = u64_lo32(cmd->gva),
|
||||
.entry1 = u64_hi32(cmd->gva) |
|
||||
@@ -2148,7 +2148,7 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c,
|
||||
u32 len = num_entries * sizeof(struct nvgpu_gpfifo);
|
||||
u32 start = c->gpfifo.put * sizeof(struct nvgpu_gpfifo);
|
||||
u32 end = start + len; /* exclusive */
|
||||
struct mem_desc *gpfifo_mem = &c->gpfifo.mem;
|
||||
struct nvgpu_mem *gpfifo_mem = &c->gpfifo.mem;
|
||||
struct nvgpu_gpfifo *cpu_src;
|
||||
int err;
|
||||
|
||||
|
||||
@@ -193,7 +193,7 @@ struct channel_gk20a {
|
||||
|
||||
struct channel_ctx_gk20a ch_ctx;
|
||||
|
||||
struct mem_desc inst_block;
|
||||
struct nvgpu_mem inst_block;
|
||||
|
||||
u64 userd_iova;
|
||||
u64 userd_gpu_va;
|
||||
|
||||
@@ -114,7 +114,7 @@ gk20a_cs_snapshot_client_from_list(struct nvgpu_list_node *node)
|
||||
struct gk20a_cs_snapshot {
|
||||
unsigned long perfmon_ids[PM_BITMAP_SIZE];
|
||||
struct nvgpu_list_node clients;
|
||||
struct mem_desc hw_memdesc;
|
||||
struct nvgpu_mem hw_memdesc;
|
||||
/* pointer to allocated cpu_va memory where GPU place data */
|
||||
struct gk20a_cs_snapshot_fifo_entry *hw_snapshot;
|
||||
struct gk20a_cs_snapshot_fifo_entry *hw_end;
|
||||
|
||||
@@ -95,7 +95,7 @@ static void gk20a_fb_set_debug_mode(struct gk20a *g, bool enable)
|
||||
gk20a_writel(g, fb_mmu_debug_ctrl_r(), reg_val);
|
||||
}
|
||||
|
||||
void gk20a_fb_tlb_invalidate(struct gk20a *g, struct mem_desc *pdb)
|
||||
void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
||||
{
|
||||
struct nvgpu_timeout timeout;
|
||||
u32 addr_lo;
|
||||
|
||||
@@ -15,11 +15,11 @@
|
||||
#define FB_GK20A_H
|
||||
|
||||
struct gk20a;
|
||||
struct mem_desc;
|
||||
struct nvgpu_mem;
|
||||
|
||||
void gk20a_init_fb(struct gpu_ops *gops);
|
||||
void fb_gk20a_reset(struct gk20a *g);
|
||||
void gk20a_fb_init_hw(struct gk20a *g);
|
||||
void gk20a_fb_tlb_invalidate(struct gk20a *g, struct mem_desc *pdb);
|
||||
void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -60,7 +60,7 @@ struct gk20a_fecs_trace_hash_ent {
|
||||
|
||||
struct gk20a_fecs_trace {
|
||||
|
||||
struct mem_desc trace_buf;
|
||||
struct nvgpu_mem trace_buf;
|
||||
DECLARE_HASHTABLE(pid_hash_table, GK20A_FECS_TRACE_HASH_BITS);
|
||||
struct nvgpu_mutex hash_lock;
|
||||
struct nvgpu_mutex poll_lock;
|
||||
@@ -620,7 +620,7 @@ static int gk20a_fecs_trace_bind_channel(struct gk20a *g,
|
||||
phys_addr_t pa;
|
||||
struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx;
|
||||
struct gk20a_fecs_trace *trace = g->fecs_trace;
|
||||
struct mem_desc *mem = &ch_ctx->gr_ctx->mem;
|
||||
struct nvgpu_mem *mem = &ch_ctx->gr_ctx->mem;
|
||||
u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(ch);
|
||||
pid_t pid;
|
||||
u32 aperture;
|
||||
|
||||
@@ -3963,7 +3963,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
|
||||
u32 flags)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct mem_desc *mem = &c->inst_block;
|
||||
struct nvgpu_mem *mem = &c->inst_block;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
@@ -4097,7 +4097,7 @@ int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority)
|
||||
void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct mem_desc *mem = &c->inst_block;
|
||||
struct nvgpu_mem *mem = &c->inst_block;
|
||||
|
||||
gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid);
|
||||
|
||||
@@ -4109,7 +4109,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
|
||||
int gk20a_fifo_setup_userd(struct channel_gk20a *c)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct mem_desc *mem = &g->fifo.userd;
|
||||
struct nvgpu_mem *mem = &g->fifo.userd;
|
||||
u32 offset = c->hw_chid * g->fifo.userd_entry_size / sizeof(u32);
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
@@ -63,7 +63,7 @@ struct fifo_runlist_info_gk20a {
|
||||
unsigned long *active_channels;
|
||||
unsigned long *active_tsgs;
|
||||
/* Each engine has its own SW and HW runlist buffer.*/
|
||||
struct mem_desc mem[MAX_RUNLIST_BUFFERS];
|
||||
struct nvgpu_mem mem[MAX_RUNLIST_BUFFERS];
|
||||
u32 cur_buffer;
|
||||
u32 total_entries;
|
||||
u32 pbdma_bitmask; /* pbdmas supported for this runlist*/
|
||||
@@ -166,7 +166,7 @@ struct fifo_gk20a {
|
||||
struct nvgpu_mutex lock;
|
||||
} profile;
|
||||
#endif
|
||||
struct mem_desc userd;
|
||||
struct nvgpu_mem userd;
|
||||
u32 userd_entry_size;
|
||||
|
||||
unsigned int used_channels;
|
||||
|
||||
@@ -258,7 +258,7 @@ struct gpu_ops {
|
||||
struct gr_ctx_desc *gr_ctx);
|
||||
void (*update_ctxsw_preemption_mode)(struct gk20a *g,
|
||||
struct channel_ctx_gk20a *ch_ctx,
|
||||
struct mem_desc *mem);
|
||||
struct nvgpu_mem *mem);
|
||||
int (*update_smpc_ctxsw_mode)(struct gk20a *g,
|
||||
struct channel_gk20a *c,
|
||||
bool enable);
|
||||
@@ -279,7 +279,7 @@ struct gpu_ops {
|
||||
u32 expect_delay);
|
||||
void (*init_cyclestats)(struct gk20a *g);
|
||||
void (*enable_cde_in_fecs)(struct gk20a *g,
|
||||
struct mem_desc *mem);
|
||||
struct nvgpu_mem *mem);
|
||||
int (*set_sm_debug_mode)(struct gk20a *g, struct channel_gk20a *ch,
|
||||
u64 sms, bool enable);
|
||||
void (*bpt_reg_info)(struct gk20a *g,
|
||||
@@ -329,7 +329,7 @@ struct gpu_ops {
|
||||
u32 compute_preempt_mode);
|
||||
int (*set_boosted_ctx)(struct channel_gk20a *ch, bool boost);
|
||||
void (*update_boosted_ctx)(struct gk20a *g,
|
||||
struct mem_desc *mem,
|
||||
struct nvgpu_mem *mem,
|
||||
struct gr_ctx_desc *gr_ctx);
|
||||
int (*fuse_override)(struct gk20a *g);
|
||||
void (*init_sm_id_table)(struct gk20a *g);
|
||||
@@ -344,11 +344,11 @@ struct gpu_ops {
|
||||
int (*commit_global_timeslice)(struct gk20a *g,
|
||||
struct channel_gk20a *c, bool patch);
|
||||
int (*commit_inst)(struct channel_gk20a *c, u64 gpu_va);
|
||||
void (*restore_context_header)(struct gk20a *g, struct mem_desc *ctxheader);
|
||||
void (*restore_context_header)(struct gk20a *g, struct nvgpu_mem *ctxheader);
|
||||
void (*write_zcull_ptr)(struct gk20a *g,
|
||||
struct mem_desc *mem, u64 gpu_va);
|
||||
struct nvgpu_mem *mem, u64 gpu_va);
|
||||
void (*write_pm_ptr)(struct gk20a *g,
|
||||
struct mem_desc *mem, u64 gpu_va);
|
||||
struct nvgpu_mem *mem, u64 gpu_va);
|
||||
void (*init_elcg_mode)(struct gk20a *g, u32 mode, u32 engine);
|
||||
void (*load_tpc_mask)(struct gk20a *g);
|
||||
int (*inval_icache)(struct gk20a *g, struct channel_gk20a *ch);
|
||||
@@ -372,7 +372,7 @@ struct gpu_ops {
|
||||
int (*vpr_info_fetch)(struct gk20a *g);
|
||||
bool (*is_debug_mode_enabled)(struct gk20a *g);
|
||||
void (*set_debug_mode)(struct gk20a *g, bool enable);
|
||||
void (*tlb_invalidate)(struct gk20a *g, struct mem_desc *pdb);
|
||||
void (*tlb_invalidate)(struct gk20a *g, struct nvgpu_mem *pdb);
|
||||
void (*hub_isr)(struct gk20a *g);
|
||||
} fb;
|
||||
struct {
|
||||
@@ -636,7 +636,7 @@ struct gpu_ops {
|
||||
void (*l2_flush)(struct gk20a *g, bool invalidate);
|
||||
void (*cbc_clean)(struct gk20a *g);
|
||||
void (*set_big_page_size)(struct gk20a *g,
|
||||
struct mem_desc *mem, int size);
|
||||
struct nvgpu_mem *mem, int size);
|
||||
u32 (*get_big_page_sizes)(void);
|
||||
u32 (*get_physical_addr_bits)(struct gk20a *g);
|
||||
int (*init_mm_setup_hw)(struct gk20a *g);
|
||||
@@ -646,19 +646,19 @@ struct gpu_ops {
|
||||
void (*remove_bar2_vm)(struct gk20a *g);
|
||||
const struct gk20a_mmu_level *
|
||||
(*get_mmu_levels)(struct gk20a *g, u32 big_page_size);
|
||||
void (*init_pdb)(struct gk20a *g, struct mem_desc *inst_block,
|
||||
void (*init_pdb)(struct gk20a *g, struct nvgpu_mem *inst_block,
|
||||
struct vm_gk20a *vm);
|
||||
u64 (*get_iova_addr)(struct gk20a *g, struct scatterlist *sgl,
|
||||
u32 flags);
|
||||
size_t (*get_vidmem_size)(struct gk20a *g);
|
||||
void (*init_inst_block)(struct mem_desc *inst_block,
|
||||
void (*init_inst_block)(struct nvgpu_mem *inst_block,
|
||||
struct vm_gk20a *vm, u32 big_page_size);
|
||||
bool (*mmu_fault_pending)(struct gk20a *g);
|
||||
} mm;
|
||||
struct {
|
||||
u32 (*enter)(struct gk20a *g, struct mem_desc *mem,
|
||||
u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
struct page_alloc_chunk *chunk, u32 w);
|
||||
void (*exit)(struct gk20a *g, struct mem_desc *mem,
|
||||
void (*exit)(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
struct page_alloc_chunk *chunk);
|
||||
u32 (*data032_r)(u32 i);
|
||||
} pramin;
|
||||
@@ -709,7 +709,7 @@ struct gpu_ops {
|
||||
bool (*is_priv_load)(u32 falcon_id);
|
||||
void (*get_wpr)(struct gk20a *g, struct wpr_carveout_info *inf);
|
||||
int (*alloc_blob_space)(struct gk20a *g,
|
||||
size_t size, struct mem_desc *mem);
|
||||
size_t size, struct nvgpu_mem *mem);
|
||||
int (*pmu_populate_loader_cfg)(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size);
|
||||
int (*flcn_populate_bl_dmem_desc)(struct gk20a *g,
|
||||
@@ -820,7 +820,7 @@ struct gpu_ops {
|
||||
void (*init_hw)(struct gk20a *g);
|
||||
void (*isr)(struct gk20a *g);
|
||||
int (*read_ptimer)(struct gk20a *g, u64 *value);
|
||||
int (*bar1_bind)(struct gk20a *g, struct mem_desc *bar1_inst);
|
||||
int (*bar1_bind)(struct gk20a *g, struct nvgpu_mem *bar1_inst);
|
||||
} bus;
|
||||
|
||||
int (*bios_init)(struct gk20a *g);
|
||||
|
||||
@@ -699,7 +699,7 @@ void gr_gk20a_ctx_patch_write(struct gk20a *g,
|
||||
}
|
||||
}
|
||||
|
||||
static u32 fecs_current_ctx_data(struct gk20a *g, struct mem_desc *inst_block)
|
||||
static u32 fecs_current_ctx_data(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
{
|
||||
u32 ptr = u64_lo32(gk20a_mm_inst_block_addr(g, inst_block)
|
||||
>> ram_in_base_shift_v());
|
||||
@@ -741,7 +741,7 @@ static int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
|
||||
}
|
||||
|
||||
void gr_gk20a_write_zcull_ptr(struct gk20a *g,
|
||||
struct mem_desc *mem, u64 gpu_va)
|
||||
struct nvgpu_mem *mem, u64 gpu_va)
|
||||
{
|
||||
u32 va = u64_lo32(gpu_va >> 8);
|
||||
|
||||
@@ -750,7 +750,7 @@ void gr_gk20a_write_zcull_ptr(struct gk20a *g,
|
||||
}
|
||||
|
||||
void gr_gk20a_write_pm_ptr(struct gk20a *g,
|
||||
struct mem_desc *mem, u64 gpu_va)
|
||||
struct nvgpu_mem *mem, u64 gpu_va)
|
||||
{
|
||||
u32 va = u64_lo32(gpu_va >> 8);
|
||||
|
||||
@@ -761,9 +761,9 @@ void gr_gk20a_write_pm_ptr(struct gk20a *g,
|
||||
static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
|
||||
{
|
||||
struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
|
||||
struct mem_desc *mem = &ch_ctx->gr_ctx->mem;
|
||||
struct nvgpu_mem *mem = &ch_ctx->gr_ctx->mem;
|
||||
struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
|
||||
struct mem_desc *ctxheader = &ctx->mem;
|
||||
struct nvgpu_mem *ctxheader = &ctx->mem;
|
||||
int ret = 0;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
@@ -1579,15 +1579,15 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
|
||||
u32 ctx_header_words;
|
||||
u32 i;
|
||||
u32 data;
|
||||
struct mem_desc *gold_mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem;
|
||||
struct mem_desc *gr_mem = &ch_ctx->gr_ctx->mem;
|
||||
struct nvgpu_mem *gold_mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem;
|
||||
struct nvgpu_mem *gr_mem = &ch_ctx->gr_ctx->mem;
|
||||
u32 err = 0;
|
||||
struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load;
|
||||
struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init;
|
||||
u32 last_method_data = 0;
|
||||
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
|
||||
struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
|
||||
struct mem_desc *ctxheader = &ctx->mem;
|
||||
struct nvgpu_mem *ctxheader = &ctx->mem;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
@@ -1836,7 +1836,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
|
||||
bool enable_smpc_ctxsw)
|
||||
{
|
||||
struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
|
||||
struct mem_desc *mem;
|
||||
struct nvgpu_mem *mem;
|
||||
u32 data;
|
||||
int ret;
|
||||
|
||||
@@ -1893,11 +1893,11 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
|
||||
{
|
||||
struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
|
||||
struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
|
||||
struct mem_desc *gr_mem;
|
||||
struct nvgpu_mem *gr_mem;
|
||||
u32 data;
|
||||
u64 virt_addr;
|
||||
struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
|
||||
struct mem_desc *ctxheader = &ctx->mem;
|
||||
struct nvgpu_mem *ctxheader = &ctx->mem;
|
||||
int ret;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
@@ -2018,7 +2018,7 @@ cleanup_pm_buf:
|
||||
gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
|
||||
gk20a_mem_flag_none);
|
||||
gk20a_gmmu_free(g, &pm_ctx->mem);
|
||||
memset(&pm_ctx->mem, 0, sizeof(struct mem_desc));
|
||||
memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem));
|
||||
|
||||
gk20a_enable_channel_tsg(g, c);
|
||||
return ret;
|
||||
@@ -2035,9 +2035,9 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
|
||||
u64 virt_addr = 0;
|
||||
u32 v, data;
|
||||
int ret = 0;
|
||||
struct mem_desc *mem = &ch_ctx->gr_ctx->mem;
|
||||
struct nvgpu_mem *mem = &ch_ctx->gr_ctx->mem;
|
||||
struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
|
||||
struct mem_desc *ctxheader = &ctx->mem;
|
||||
struct nvgpu_mem *ctxheader = &ctx->mem;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
@@ -2249,7 +2249,7 @@ static void gr_gk20a_init_ctxsw_ucode_segments(
|
||||
|
||||
static int gr_gk20a_copy_ctxsw_ucode_segments(
|
||||
struct gk20a *g,
|
||||
struct mem_desc *dst,
|
||||
struct nvgpu_mem *dst,
|
||||
struct gk20a_ctxsw_ucode_segments *segments,
|
||||
u32 *bootimage,
|
||||
u32 *code, u32 *data)
|
||||
@@ -2826,7 +2826,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
|
||||
u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
|
||||
u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size;
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
struct mem_desc *mem;
|
||||
struct nvgpu_mem *mem;
|
||||
u64 gpu_va;
|
||||
u32 i;
|
||||
gk20a_dbg_fn("");
|
||||
@@ -5085,7 +5085,7 @@ out:
|
||||
static int gr_gk20a_init_access_map(struct gk20a *g)
|
||||
{
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
struct mem_desc *mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem;
|
||||
struct nvgpu_mem *mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem;
|
||||
u32 w, nr_pages =
|
||||
DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size,
|
||||
PAGE_SIZE);
|
||||
@@ -6645,7 +6645,7 @@ int gr_gk20a_fecs_get_reglist_img_size(struct gk20a *g, u32 *size)
|
||||
}
|
||||
|
||||
int gr_gk20a_fecs_set_reglist_bind_inst(struct gk20a *g,
|
||||
struct mem_desc *inst_block)
|
||||
struct nvgpu_mem *inst_block)
|
||||
{
|
||||
u32 data = fecs_current_ctx_data(g, inst_block);
|
||||
|
||||
@@ -7131,7 +7131,7 @@ static void gr_gk20a_init_sm_dsm_reg_info(void)
|
||||
static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
|
||||
struct channel_ctx_gk20a *ch_ctx,
|
||||
u32 addr, u32 data,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
u32 num_gpc = g->gr.gpc_count;
|
||||
u32 num_tpc;
|
||||
@@ -8258,7 +8258,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
|
||||
struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx;
|
||||
bool gr_ctx_ready = false;
|
||||
bool pm_ctx_ready = false;
|
||||
struct mem_desc *current_mem = NULL;
|
||||
struct nvgpu_mem *current_mem = NULL;
|
||||
bool ch_is_curr_ctx, restart_gr_ctxsw = false;
|
||||
u32 i, j, offset, v;
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
|
||||
@@ -205,7 +205,7 @@ struct gr_gk20a_isr_data {
|
||||
|
||||
struct gr_ctx_buffer_desc {
|
||||
void (*destroy)(struct gk20a *, struct gr_ctx_buffer_desc *);
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
@@ -321,8 +321,8 @@ struct gr_gk20a {
|
||||
|
||||
struct gr_ctx_buffer_desc global_ctx_buffer[NR_GLOBAL_CTX_BUF];
|
||||
|
||||
struct mem_desc mmu_wr_mem;
|
||||
struct mem_desc mmu_rd_mem;
|
||||
struct nvgpu_mem mmu_wr_mem;
|
||||
struct nvgpu_mem mmu_rd_mem;
|
||||
|
||||
u8 *map_tiles;
|
||||
u32 map_tile_count;
|
||||
@@ -385,7 +385,7 @@ struct gr_gk20a {
|
||||
void gk20a_fecs_dump_falcon_stats(struct gk20a *g);
|
||||
|
||||
struct gr_ctx_desc {
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
|
||||
u32 graphics_preempt_mode;
|
||||
u32 compute_preempt_mode;
|
||||
@@ -399,7 +399,7 @@ struct gr_ctx_desc {
|
||||
};
|
||||
|
||||
struct ctx_header_desc {
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
};
|
||||
|
||||
struct gk20a_ctxsw_ucode_segment {
|
||||
@@ -441,8 +441,8 @@ struct gk20a_ctxsw_ucode_segments {
|
||||
|
||||
struct gk20a_ctxsw_ucode_info {
|
||||
u64 *p_va;
|
||||
struct mem_desc inst_blk_desc;
|
||||
struct mem_desc surface_desc;
|
||||
struct nvgpu_mem inst_blk_desc;
|
||||
struct nvgpu_mem surface_desc;
|
||||
struct gk20a_ctxsw_ucode_segments fecs;
|
||||
struct gk20a_ctxsw_ucode_segments gpccs;
|
||||
};
|
||||
@@ -526,7 +526,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr);
|
||||
/* pmu */
|
||||
int gr_gk20a_fecs_get_reglist_img_size(struct gk20a *g, u32 *size);
|
||||
int gr_gk20a_fecs_set_reglist_bind_inst(struct gk20a *g,
|
||||
struct mem_desc *inst_block);
|
||||
struct nvgpu_mem *inst_block);
|
||||
int gr_gk20a_fecs_set_reglist_virtual_addr(struct gk20a *g, u64 pmu_va);
|
||||
|
||||
void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine);
|
||||
@@ -717,10 +717,10 @@ void gr_gk20a_init_sm_id_table(struct gk20a *g);
|
||||
int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va);
|
||||
|
||||
void gr_gk20a_write_zcull_ptr(struct gk20a *g,
|
||||
struct mem_desc *mem, u64 gpu_va);
|
||||
struct nvgpu_mem *mem, u64 gpu_va);
|
||||
|
||||
void gr_gk20a_write_pm_ptr(struct gk20a *g,
|
||||
struct mem_desc *mem, u64 gpu_va);
|
||||
struct nvgpu_mem *mem, u64 gpu_va);
|
||||
|
||||
|
||||
static inline const char *gr_gk20a_graphics_preempt_mode_name(u32 graphics_preempt_mode)
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/pramin.h>
|
||||
#include <nvgpu/list.h>
|
||||
#include <nvgpu/mem_desc.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <nvgpu/allocator.h>
|
||||
#include <nvgpu/semaphore.h>
|
||||
#include <nvgpu/page_allocator.h>
|
||||
@@ -169,7 +169,7 @@ struct gk20a_dmabuf_priv {
|
||||
|
||||
struct gk20a_vidmem_buf {
|
||||
struct gk20a *g;
|
||||
struct mem_desc *mem;
|
||||
struct nvgpu_mem *mem;
|
||||
struct dma_buf *dmabuf;
|
||||
void *dmabuf_priv;
|
||||
void (*dmabuf_priv_delete)(void *);
|
||||
@@ -457,7 +457,7 @@ static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block)
|
||||
void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
|
||||
{
|
||||
struct gk20a *g = vm->mm->g;
|
||||
|
||||
@@ -1866,7 +1866,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
|
||||
nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex);
|
||||
}
|
||||
|
||||
buf->mem = nvgpu_kzalloc(g, sizeof(struct mem_desc));
|
||||
buf->mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem));
|
||||
if (!buf->mem)
|
||||
goto err_kfree;
|
||||
|
||||
@@ -1931,7 +1931,7 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
|
||||
{
|
||||
#if defined(CONFIG_GK20A_VIDMEM)
|
||||
struct gk20a_vidmem_buf *vidmem_buf;
|
||||
struct mem_desc *mem;
|
||||
struct nvgpu_mem *mem;
|
||||
int err = 0;
|
||||
|
||||
if (gk20a_dmabuf_aperture(g, dmabuf) != APERTURE_VIDMEM)
|
||||
@@ -2519,13 +2519,13 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
|
||||
aperture);
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem)
|
||||
int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
return gk20a_gmmu_alloc_flags(g, 0, size, mem);
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
if (g->mm.vidmem_is_vidmem) {
|
||||
/*
|
||||
@@ -2549,7 +2549,7 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
||||
return gk20a_gmmu_alloc_flags_sys(g, flags, size, mem);
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem)
|
||||
int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem);
|
||||
}
|
||||
@@ -2574,7 +2574,7 @@ static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs,
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem)
|
||||
size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
struct device *d = dev_from_gk20a(g);
|
||||
int err;
|
||||
@@ -2631,7 +2631,7 @@ fail_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void gk20a_gmmu_free_sys(struct gk20a *g, struct mem_desc *mem)
|
||||
static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
struct device *d = dev_from_gk20a(g);
|
||||
|
||||
@@ -2666,7 +2666,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct mem_desc *mem)
|
||||
}
|
||||
|
||||
#if defined(CONFIG_GK20A_VIDMEM)
|
||||
static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
|
||||
static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
struct gk20a_fence *gk20a_fence_out = NULL;
|
||||
struct gk20a_fence *gk20a_last_fence = NULL;
|
||||
@@ -2728,14 +2728,14 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
|
||||
}
|
||||
#endif
|
||||
|
||||
int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct mem_desc *mem)
|
||||
int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
return gk20a_gmmu_alloc_flags_vid(g,
|
||||
NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem)
|
||||
size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
return gk20a_gmmu_alloc_flags_vid_at(g, flags, size, mem, 0);
|
||||
}
|
||||
@@ -2756,7 +2756,7 @@ static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
|
||||
#endif
|
||||
|
||||
int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem, dma_addr_t at)
|
||||
size_t size, struct nvgpu_mem *mem, dma_addr_t at)
|
||||
{
|
||||
#if defined(CONFIG_GK20A_VIDMEM)
|
||||
u64 addr;
|
||||
@@ -2831,7 +2831,7 @@ fail_physfree:
|
||||
#endif
|
||||
}
|
||||
|
||||
static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem)
|
||||
static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
#if defined(CONFIG_GK20A_VIDMEM)
|
||||
bool was_empty;
|
||||
@@ -2863,7 +2863,7 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem)
|
||||
#endif
|
||||
}
|
||||
|
||||
void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem)
|
||||
void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
switch (mem->aperture) {
|
||||
case APERTURE_SYSMEM:
|
||||
@@ -2879,7 +2879,7 @@ void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem)
|
||||
* If mem is in VIDMEM, return base address in vidmem
|
||||
* else return IOVA address for SYSMEM
|
||||
*/
|
||||
u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem,
|
||||
u64 gk20a_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 flags)
|
||||
{
|
||||
struct nvgpu_page_alloc *alloc;
|
||||
@@ -2900,14 +2900,14 @@ u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem,
|
||||
}
|
||||
|
||||
#if defined(CONFIG_GK20A_VIDMEM)
|
||||
static struct mem_desc *get_pending_mem_desc(struct mm_gk20a *mm)
|
||||
static struct nvgpu_mem *get_pending_mem_desc(struct mm_gk20a *mm)
|
||||
{
|
||||
struct mem_desc *mem = NULL;
|
||||
struct nvgpu_mem *mem = NULL;
|
||||
|
||||
nvgpu_mutex_acquire(&mm->vidmem.clear_list_mutex);
|
||||
if (!nvgpu_list_empty(&mm->vidmem.clear_list_head)) {
|
||||
mem = nvgpu_list_first_entry(&mm->vidmem.clear_list_head,
|
||||
mem_desc, clear_list_entry);
|
||||
nvgpu_mem, clear_list_entry);
|
||||
nvgpu_list_del(&mem->clear_list_entry);
|
||||
}
|
||||
nvgpu_mutex_release(&mm->vidmem.clear_list_mutex);
|
||||
@@ -2920,7 +2920,7 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
|
||||
struct mm_gk20a *mm = container_of(work, struct mm_gk20a,
|
||||
vidmem.clear_mem_worker);
|
||||
struct gk20a *g = mm->g;
|
||||
struct mem_desc *mem;
|
||||
struct nvgpu_mem *mem;
|
||||
|
||||
while ((mem = get_pending_mem_desc(mm)) != NULL) {
|
||||
gk20a_gmmu_clear_vidmem_mem(g, mem);
|
||||
@@ -2939,13 +2939,13 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
|
||||
#endif
|
||||
|
||||
int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
return gk20a_gmmu_alloc_map_flags(vm, 0, size, mem);
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem)
|
||||
size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
if (vm->mm->vidmem_is_vidmem) {
|
||||
/*
|
||||
@@ -2970,13 +2970,13 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
return gk20a_gmmu_alloc_map_flags_sys(vm, 0, size, mem);
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem)
|
||||
size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
int err = gk20a_gmmu_alloc_flags_sys(vm->mm->g, flags, size, mem);
|
||||
|
||||
@@ -2999,14 +2999,14 @@ fail_free:
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
return gk20a_gmmu_alloc_map_flags_vid(vm,
|
||||
NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
|
||||
}
|
||||
|
||||
int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem)
|
||||
size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
int err = gk20a_gmmu_alloc_flags_vid(vm->mm->g, flags, size, mem);
|
||||
|
||||
@@ -3028,7 +3028,7 @@ fail_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct mem_desc *mem)
|
||||
void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
|
||||
{
|
||||
if (mem->gpu_va)
|
||||
gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size, gk20a_mem_flag_none);
|
||||
@@ -4583,7 +4583,7 @@ void gk20a_deinit_vm(struct vm_gk20a *vm)
|
||||
gk20a_vm_free_entries(vm, &vm->pdb, 0);
|
||||
}
|
||||
|
||||
int gk20a_alloc_inst_block(struct gk20a *g, struct mem_desc *inst_block)
|
||||
int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
{
|
||||
struct device *dev = dev_from_gk20a(g);
|
||||
int err;
|
||||
@@ -4600,13 +4600,13 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct mem_desc *inst_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gk20a_free_inst_block(struct gk20a *g, struct mem_desc *inst_block)
|
||||
void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
{
|
||||
if (inst_block->size)
|
||||
gk20a_gmmu_free(g, inst_block);
|
||||
}
|
||||
|
||||
u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct mem_desc *inst_block)
|
||||
u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
{
|
||||
u64 addr;
|
||||
if (g->mm.has_physical_mode)
|
||||
@@ -4622,7 +4622,7 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
|
||||
int err;
|
||||
struct vm_gk20a *vm = &mm->bar1.vm;
|
||||
struct gk20a *g = gk20a_from_mm(mm);
|
||||
struct mem_desc *inst_block = &mm->bar1.inst_block;
|
||||
struct nvgpu_mem *inst_block = &mm->bar1.inst_block;
|
||||
u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size;
|
||||
|
||||
mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
|
||||
@@ -4653,7 +4653,7 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
|
||||
int err;
|
||||
struct vm_gk20a *vm = &mm->pmu.vm;
|
||||
struct gk20a *g = gk20a_from_mm(mm);
|
||||
struct mem_desc *inst_block = &mm->pmu.inst_block;
|
||||
struct nvgpu_mem *inst_block = &mm->pmu.inst_block;
|
||||
u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size;
|
||||
u32 low_hole, aperture_size;
|
||||
|
||||
@@ -4691,7 +4691,7 @@ static int gk20a_init_hwpm(struct mm_gk20a *mm)
|
||||
int err;
|
||||
struct vm_gk20a *vm = &mm->pmu.vm;
|
||||
struct gk20a *g = gk20a_from_mm(mm);
|
||||
struct mem_desc *inst_block = &mm->hwpm.inst_block;
|
||||
struct nvgpu_mem *inst_block = &mm->hwpm.inst_block;
|
||||
|
||||
err = gk20a_alloc_inst_block(g, inst_block);
|
||||
if (err)
|
||||
@@ -4727,7 +4727,7 @@ static int gk20a_init_ce_vm(struct mm_gk20a *mm)
|
||||
false, false, "ce");
|
||||
}
|
||||
|
||||
void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
|
||||
void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
|
||||
struct vm_gk20a *vm)
|
||||
{
|
||||
u64 pdb_addr = gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0);
|
||||
@@ -4747,7 +4747,7 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
|
||||
ram_in_page_dir_base_hi_f(pdb_addr_hi));
|
||||
}
|
||||
|
||||
void gk20a_init_inst_block(struct mem_desc *inst_block, struct vm_gk20a *vm,
|
||||
void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
|
||||
u32 big_page_size)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#include <asm/dma-iommu.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include <nvgpu/mem_desc.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <nvgpu/allocator.h>
|
||||
#include <nvgpu/list.h>
|
||||
#include <nvgpu/rbtree.h>
|
||||
@@ -47,7 +47,7 @@ enum gk20a_mem_rw_flag {
|
||||
};
|
||||
|
||||
struct gpfifo_desc {
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
u32 entry_num;
|
||||
|
||||
u32 get;
|
||||
@@ -61,7 +61,7 @@ struct gpfifo_desc {
|
||||
};
|
||||
|
||||
struct patch_desc {
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
u32 data_count;
|
||||
};
|
||||
|
||||
@@ -72,14 +72,14 @@ struct zcull_ctx_desc {
|
||||
};
|
||||
|
||||
struct pm_ctx_desc {
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
u32 pm_mode;
|
||||
};
|
||||
|
||||
struct gk20a;
|
||||
|
||||
struct compbit_store_desc {
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
|
||||
/* The value that is written to the hardware. This depends on
|
||||
* on the number of ltcs and is not an address. */
|
||||
@@ -124,7 +124,7 @@ struct gk20a_comptags {
|
||||
|
||||
struct gk20a_mm_entry {
|
||||
/* backing for */
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
u32 woffset; /* if >0, mem is a shadow copy, owned by another entry */
|
||||
int pgsz;
|
||||
struct gk20a_mm_entry *entries;
|
||||
@@ -132,7 +132,7 @@ struct gk20a_mm_entry {
|
||||
};
|
||||
|
||||
struct priv_cmd_queue {
|
||||
struct mem_desc mem;
|
||||
struct nvgpu_mem mem;
|
||||
u32 size; /* num of entries in words */
|
||||
u32 put; /* put for priv cmd queue */
|
||||
u32 get; /* get for priv cmd queue */
|
||||
@@ -140,7 +140,7 @@ struct priv_cmd_queue {
|
||||
|
||||
struct priv_cmd_entry {
|
||||
bool valid;
|
||||
struct mem_desc *mem;
|
||||
struct nvgpu_mem *mem;
|
||||
u32 off; /* offset in mem, in u32 entries */
|
||||
u64 gva;
|
||||
u32 get; /* start of entry in queue */
|
||||
@@ -335,24 +335,24 @@ struct mm_gk20a {
|
||||
struct {
|
||||
u32 aperture_size;
|
||||
struct vm_gk20a vm;
|
||||
struct mem_desc inst_block;
|
||||
struct nvgpu_mem inst_block;
|
||||
} bar1;
|
||||
|
||||
struct {
|
||||
u32 aperture_size;
|
||||
struct vm_gk20a vm;
|
||||
struct mem_desc inst_block;
|
||||
struct nvgpu_mem inst_block;
|
||||
} bar2;
|
||||
|
||||
struct {
|
||||
u32 aperture_size;
|
||||
struct vm_gk20a vm;
|
||||
struct mem_desc inst_block;
|
||||
struct nvgpu_mem inst_block;
|
||||
} pmu;
|
||||
|
||||
struct {
|
||||
/* using pmu vm currently */
|
||||
struct mem_desc inst_block;
|
||||
struct nvgpu_mem inst_block;
|
||||
} hwpm;
|
||||
|
||||
struct {
|
||||
@@ -367,7 +367,7 @@ struct mm_gk20a {
|
||||
struct nvgpu_mutex tlb_lock;
|
||||
struct nvgpu_mutex priv_lock;
|
||||
#ifdef CONFIG_ARCH_TEGRA_18x_SOC
|
||||
struct mem_desc bar2_desc;
|
||||
struct nvgpu_mem bar2_desc;
|
||||
#endif
|
||||
/*
|
||||
* Separate function to cleanup the CE since it requires a channel to
|
||||
@@ -397,7 +397,7 @@ struct mm_gk20a {
|
||||
/* false if vidmem aperture actually points to sysmem */
|
||||
bool vidmem_is_vidmem;
|
||||
|
||||
struct mem_desc sysmem_flush;
|
||||
struct nvgpu_mem sysmem_flush;
|
||||
|
||||
u32 pramin_window;
|
||||
struct nvgpu_spinlock pramin_window_lock;
|
||||
@@ -475,11 +475,11 @@ struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl);
|
||||
#define bar1_instance_block_shift_gk20a() bus_bar1_block_ptr_shift_v()
|
||||
#endif
|
||||
|
||||
int gk20a_alloc_inst_block(struct gk20a *g, struct mem_desc *inst_block);
|
||||
void gk20a_free_inst_block(struct gk20a *g, struct mem_desc *inst_block);
|
||||
void gk20a_init_inst_block(struct mem_desc *inst_block, struct vm_gk20a *vm,
|
||||
int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
|
||||
u32 big_page_size);
|
||||
u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct mem_desc *mem);
|
||||
u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
|
||||
void gk20a_mm_dump_vm(struct vm_gk20a *vm,
|
||||
u64 va_begin, u64 va_end, char *label);
|
||||
@@ -499,7 +499,7 @@ void gk20a_free_sgtable(struct gk20a *g, struct sg_table **sgt);
|
||||
u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
|
||||
u32 flags);
|
||||
u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova);
|
||||
u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem,
|
||||
u64 gk20a_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 flags);
|
||||
|
||||
void gk20a_mm_ltc_isr(struct gk20a *g);
|
||||
@@ -542,39 +542,39 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
|
||||
#define NVGPU_DMA_READ_ONLY (1 << 2)
|
||||
|
||||
int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
|
||||
struct mem_desc *mem);
|
||||
struct nvgpu_mem *mem);
|
||||
int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem);
|
||||
size_t size, struct nvgpu_mem *mem);
|
||||
|
||||
int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
||||
struct mem_desc *mem);
|
||||
struct nvgpu_mem *mem);
|
||||
int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem);
|
||||
size_t size, struct nvgpu_mem *mem);
|
||||
|
||||
int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
|
||||
struct mem_desc *mem);
|
||||
struct nvgpu_mem *mem);
|
||||
int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem);
|
||||
size_t size, struct nvgpu_mem *mem);
|
||||
|
||||
void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct mem_desc *mem);
|
||||
void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem);
|
||||
|
||||
int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem);
|
||||
int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
||||
int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
||||
struct mem_desc *mem);
|
||||
struct nvgpu_mem *mem);
|
||||
|
||||
int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem);
|
||||
int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
||||
int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem);
|
||||
size_t size, struct nvgpu_mem *mem);
|
||||
|
||||
int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct mem_desc *mem);
|
||||
int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
||||
int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem);
|
||||
size_t size, struct nvgpu_mem *mem);
|
||||
int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
|
||||
size_t size, struct mem_desc *mem, dma_addr_t at);
|
||||
size_t size, struct nvgpu_mem *mem, dma_addr_t at);
|
||||
|
||||
void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem);
|
||||
void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
|
||||
static inline phys_addr_t gk20a_mem_phys(struct mem_desc *mem)
|
||||
static inline phys_addr_t gk20a_mem_phys(struct nvgpu_mem *mem)
|
||||
{
|
||||
/* FIXME: the sgt/sgl may get null if this is accessed e.g. in an isr
|
||||
* during channel deletion - attempt to fix at least null derefs */
|
||||
@@ -591,7 +591,7 @@ static inline phys_addr_t gk20a_mem_phys(struct mem_desc *mem)
|
||||
|
||||
u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 vidmem_mask);
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem,
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 sysmem_mask, u32 vidmem_mask);
|
||||
|
||||
void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry,
|
||||
@@ -769,10 +769,10 @@ struct gpu_ops;
|
||||
void gk20a_init_mm(struct gpu_ops *gops);
|
||||
const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
|
||||
u32 big_page_size);
|
||||
void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *mem,
|
||||
void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
struct vm_gk20a *vm);
|
||||
|
||||
void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block);
|
||||
void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block);
|
||||
|
||||
int gk20a_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size);
|
||||
|
||||
|
||||
@@ -4743,7 +4743,7 @@ clean_up:
|
||||
return err;
|
||||
}
|
||||
|
||||
void gk20a_pmu_surface_describe(struct gk20a *g, struct mem_desc *mem,
|
||||
void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
struct flcn_mem_desc_v0 *fb)
|
||||
{
|
||||
fb->address.lo = u64_lo32(mem->gpu_va);
|
||||
@@ -4752,7 +4752,7 @@ void gk20a_pmu_surface_describe(struct gk20a *g, struct mem_desc *mem,
|
||||
fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24);
|
||||
}
|
||||
|
||||
int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct mem_desc *mem,
|
||||
int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 size)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
@@ -4768,7 +4768,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct mem_desc *mem,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct mem_desc *mem,
|
||||
int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 size)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
@@ -4784,10 +4784,10 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct mem_desc *mem,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gk20a_pmu_surface_free(struct gk20a *g, struct mem_desc *mem)
|
||||
void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
gk20a_gmmu_free(g, mem);
|
||||
memset(mem, 0, sizeof(struct mem_desc));
|
||||
memset(mem, 0, sizeof(struct nvgpu_mem));
|
||||
}
|
||||
|
||||
int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
@@ -4860,7 +4860,8 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
goto clean_up;
|
||||
|
||||
if (payload->in.fb_size != 0x0) {
|
||||
seq->in_mem = nvgpu_kzalloc(g, sizeof(struct mem_desc));
|
||||
seq->in_mem = nvgpu_kzalloc(g,
|
||||
sizeof(struct nvgpu_mem));
|
||||
if (!seq->in_mem) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up;
|
||||
@@ -4904,7 +4905,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
|
||||
if (payload->out.fb_size != 0x0) {
|
||||
seq->out_mem = nvgpu_kzalloc(g,
|
||||
sizeof(struct mem_desc));
|
||||
sizeof(struct nvgpu_mem));
|
||||
if (!seq->out_mem) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up;
|
||||
|
||||
@@ -185,8 +185,8 @@ struct pmu_payload {
|
||||
};
|
||||
|
||||
struct pmu_surface {
|
||||
struct mem_desc vidmem_desc;
|
||||
struct mem_desc sysmem_desc;
|
||||
struct nvgpu_mem vidmem_desc;
|
||||
struct nvgpu_mem sysmem_desc;
|
||||
struct flcn_mem_desc_v0 params;
|
||||
};
|
||||
|
||||
@@ -204,14 +204,14 @@ struct pmu_sequence {
|
||||
struct pmu_allocation_v2 in_v2;
|
||||
struct pmu_allocation_v3 in_v3;
|
||||
};
|
||||
struct mem_desc *in_mem;
|
||||
struct nvgpu_mem *in_mem;
|
||||
union {
|
||||
struct pmu_allocation_v0 out_v0;
|
||||
struct pmu_allocation_v1 out_v1;
|
||||
struct pmu_allocation_v2 out_v2;
|
||||
struct pmu_allocation_v3 out_v3;
|
||||
};
|
||||
struct mem_desc *out_mem;
|
||||
struct nvgpu_mem *out_mem;
|
||||
u8 *out_payload;
|
||||
pmu_callback callback;
|
||||
void* cb_params;
|
||||
@@ -314,13 +314,13 @@ struct pmu_gk20a {
|
||||
struct pmu_ucode_desc *desc;
|
||||
struct pmu_ucode_desc_v1 *desc_v1;
|
||||
};
|
||||
struct mem_desc ucode;
|
||||
struct nvgpu_mem ucode;
|
||||
|
||||
struct mem_desc pg_buf;
|
||||
struct nvgpu_mem pg_buf;
|
||||
/* TBD: remove this if ZBC seq is fixed */
|
||||
struct mem_desc seq_buf;
|
||||
struct mem_desc trace_buf;
|
||||
struct mem_desc wpr_buf;
|
||||
struct nvgpu_mem seq_buf;
|
||||
struct nvgpu_mem trace_buf;
|
||||
struct nvgpu_mem wpr_buf;
|
||||
bool buf_loaded;
|
||||
|
||||
struct pmu_sha1_gid gid_info;
|
||||
@@ -459,12 +459,12 @@ int gk20a_pmu_reset(struct gk20a *g);
|
||||
int pmu_idle(struct pmu_gk20a *pmu);
|
||||
int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable);
|
||||
|
||||
void gk20a_pmu_surface_free(struct gk20a *g, struct mem_desc *mem);
|
||||
void gk20a_pmu_surface_describe(struct gk20a *g, struct mem_desc *mem,
|
||||
void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
struct flcn_mem_desc_v0 *fb);
|
||||
int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct mem_desc *mem,
|
||||
int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 size);
|
||||
int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct mem_desc *mem,
|
||||
int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 size);
|
||||
|
||||
#endif /*__PMU_GK20A_H__*/
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#include <nvgpu/hw/gk20a/hw_pram_gk20a.h>
|
||||
|
||||
/* WARNING: returns pramin_window_lock taken, complement with pramin_exit() */
|
||||
static u32 gk20a_pramin_enter(struct gk20a *g, struct mem_desc *mem,
|
||||
static u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
struct page_alloc_chunk *chunk, u32 w)
|
||||
{
|
||||
u64 bufbase = chunk->base;
|
||||
@@ -55,7 +55,7 @@ static u32 gk20a_pramin_enter(struct gk20a *g, struct mem_desc *mem,
|
||||
return lo;
|
||||
}
|
||||
|
||||
static void gk20a_pramin_exit(struct gk20a *g, struct mem_desc *mem,
|
||||
static void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
struct page_alloc_chunk *chunk)
|
||||
{
|
||||
gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, chunk);
|
||||
|
||||
@@ -59,14 +59,14 @@ static void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g,
|
||||
struct flcn_ucode_img *p_img);
|
||||
static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm);
|
||||
static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
|
||||
struct mem_desc *nonwpr);
|
||||
struct nvgpu_mem *nonwpr);
|
||||
static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm);
|
||||
static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size);
|
||||
static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
|
||||
static int gm20b_alloc_blob_space(struct gk20a *g,
|
||||
size_t size, struct mem_desc *mem);
|
||||
size_t size, struct nvgpu_mem *mem);
|
||||
static bool gm20b_is_priv_load(u32 falcon_id);
|
||||
static bool gm20b_is_lazy_bootstrap(u32 falcon_id);
|
||||
static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
|
||||
@@ -364,7 +364,7 @@ static bool gm20b_is_priv_load(u32 falcon_id)
|
||||
}
|
||||
|
||||
static int gm20b_alloc_blob_space(struct gk20a *g,
|
||||
size_t size, struct mem_desc *mem)
|
||||
size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
int err;
|
||||
|
||||
@@ -707,7 +707,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
|
||||
|
||||
/* Initialize WPR contents */
|
||||
static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
|
||||
struct mem_desc *ucode)
|
||||
struct nvgpu_mem *ucode)
|
||||
{
|
||||
struct lsfm_managed_ucode_img *pnode = plsfm->ucode_img_list;
|
||||
u32 i;
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
|
||||
#include <nvgpu/hw/gm20b/hw_bus_gm20b.h>
|
||||
|
||||
static int gm20b_bus_bar1_bind(struct gk20a *g, struct mem_desc *bar1_inst)
|
||||
static int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
|
||||
{
|
||||
struct nvgpu_timeout timeout;
|
||||
int err = 0;
|
||||
|
||||
@@ -852,7 +852,7 @@ static int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
|
||||
|
||||
static void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
struct channel_ctx_gk20a *ch_ctx,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx;
|
||||
u32 cta_preempt_option =
|
||||
@@ -1012,7 +1012,7 @@ static int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
|
||||
bool enable)
|
||||
{
|
||||
struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
|
||||
struct mem_desc *mem;
|
||||
struct nvgpu_mem *mem;
|
||||
u32 v;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
@@ -1108,7 +1108,7 @@ static void gr_gm20b_init_cyclestats(struct gk20a *g)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void gr_gm20b_enable_cde_in_fecs(struct gk20a *g, struct mem_desc *mem)
|
||||
static void gr_gm20b_enable_cde_in_fecs(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
u32 cde_v;
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
|
||||
|
||||
static void gm20b_mm_set_big_page_size(struct gk20a *g,
|
||||
struct mem_desc *mem, int size)
|
||||
struct nvgpu_mem *mem, int size)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ static void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g,
|
||||
static int lsf_gen_wpr_requirements(struct gk20a *g,
|
||||
struct ls_flcn_mgr_v1 *plsfm);
|
||||
static void lsfm_init_wpr_contents(struct gk20a *g,
|
||||
struct ls_flcn_mgr_v1 *plsfm, struct mem_desc *nonwpr);
|
||||
struct ls_flcn_mgr_v1 *plsfm, struct nvgpu_mem *nonwpr);
|
||||
static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm);
|
||||
static int gp106_pmu_populate_loader_cfg(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size);
|
||||
@@ -98,7 +98,7 @@ static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
|
||||
}
|
||||
|
||||
static int gp106_alloc_blob_space(struct gk20a *g,
|
||||
size_t size, struct mem_desc *mem)
|
||||
size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
int err;
|
||||
@@ -685,7 +685,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
|
||||
|
||||
/* Initialize WPR contents */
|
||||
static void lsfm_init_wpr_contents(struct gk20a *g,
|
||||
struct ls_flcn_mgr_v1 *plsfm, struct mem_desc *ucode)
|
||||
struct ls_flcn_mgr_v1 *plsfm, struct nvgpu_mem *ucode)
|
||||
{
|
||||
struct lsfm_managed_ucode_img_v2 *pnode = plsfm->ucode_img_list;
|
||||
u32 i;
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
#include <nvgpu/hw/gp10b/hw_top_gp10b.h>
|
||||
|
||||
static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
@@ -83,7 +83,7 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
|
||||
unsigned long acquire_timeout, u32 flags)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct mem_desc *mem = &c->inst_block;
|
||||
struct nvgpu_mem *mem = &c->inst_block;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
|
||||
@@ -832,7 +832,7 @@ static int gr_gp10b_init_ctx_state(struct gk20a *g)
|
||||
}
|
||||
|
||||
int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
int err;
|
||||
|
||||
@@ -1037,7 +1037,7 @@ fail_free_gk20a_ctx:
|
||||
static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
|
||||
struct gr_ctx_desc *gr_ctx)
|
||||
{
|
||||
struct mem_desc *mem = &gr_ctx->mem;
|
||||
struct nvgpu_mem *mem = &gr_ctx->mem;
|
||||
|
||||
if (nvgpu_mem_begin(g, mem)) {
|
||||
WARN_ON("Cannot map context");
|
||||
@@ -1108,7 +1108,7 @@ static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
|
||||
static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
struct channel_ctx_gk20a *ch_ctx,
|
||||
struct mem_desc *mem)
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx;
|
||||
u32 gfxp_preempt_option =
|
||||
@@ -2072,7 +2072,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
|
||||
{
|
||||
struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx;
|
||||
struct gk20a *g = ch->g;
|
||||
struct mem_desc *mem = &gr_ctx->mem;
|
||||
struct nvgpu_mem *mem = &gr_ctx->mem;
|
||||
int err = 0;
|
||||
|
||||
gr_ctx->boosted_ctx = boost;
|
||||
@@ -2101,7 +2101,7 @@ unmap_ctx:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void gr_gp10b_update_boosted_ctx(struct gk20a *g, struct mem_desc *mem,
|
||||
static void gr_gp10b_update_boosted_ctx(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
struct gr_ctx_desc *gr_ctx) {
|
||||
u32 v;
|
||||
|
||||
@@ -2119,7 +2119,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
|
||||
struct gk20a *g = ch->g;
|
||||
struct tsg_gk20a *tsg;
|
||||
struct vm_gk20a *vm;
|
||||
struct mem_desc *mem = &gr_ctx->mem;
|
||||
struct nvgpu_mem *mem = &gr_ctx->mem;
|
||||
u32 class;
|
||||
int err = 0;
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ enum {
|
||||
void gp10b_init_gr(struct gpu_ops *ops);
|
||||
int gr_gp10b_init_fs_state(struct gk20a *g);
|
||||
int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
|
||||
struct mem_desc *mem);
|
||||
struct nvgpu_mem *mem);
|
||||
void gr_gp10b_create_sysfs(struct device *dev);
|
||||
|
||||
struct ecc_stat {
|
||||
@@ -95,10 +95,10 @@ struct gr_t18x {
|
||||
};
|
||||
|
||||
struct gr_ctx_desc_t18x {
|
||||
struct mem_desc preempt_ctxsw_buffer;
|
||||
struct mem_desc spill_ctxsw_buffer;
|
||||
struct mem_desc betacb_ctxsw_buffer;
|
||||
struct mem_desc pagepool_ctxsw_buffer;
|
||||
struct nvgpu_mem preempt_ctxsw_buffer;
|
||||
struct nvgpu_mem spill_ctxsw_buffer;
|
||||
struct nvgpu_mem betacb_ctxsw_buffer;
|
||||
struct nvgpu_mem pagepool_ctxsw_buffer;
|
||||
u32 ctx_id;
|
||||
bool ctx_id_valid;
|
||||
bool cilp_preempt_pending;
|
||||
|
||||
@@ -34,7 +34,7 @@ static u32 gp10b_mm_get_physical_addr_bits(struct gk20a *g)
|
||||
static int gp10b_init_mm_setup_hw(struct gk20a *g)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct mem_desc *inst_block = &mm->bar1.inst_block;
|
||||
struct nvgpu_mem *inst_block = &mm->bar1.inst_block;
|
||||
int err = 0;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
@@ -68,7 +68,7 @@ static int gb10b_init_bar2_vm(struct gk20a *g)
|
||||
int err;
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = &mm->bar2.vm;
|
||||
struct mem_desc *inst_block = &mm->bar2.inst_block;
|
||||
struct nvgpu_mem *inst_block = &mm->bar2.inst_block;
|
||||
u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size;
|
||||
|
||||
/* BAR2 aperture size is 32MB */
|
||||
@@ -96,7 +96,7 @@ clean_up_va:
|
||||
static int gb10b_init_bar2_mm_hw_setup(struct gk20a *g)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct mem_desc *inst_block = &mm->bar2.inst_block;
|
||||
struct nvgpu_mem *inst_block = &mm->bar2.inst_block;
|
||||
u64 inst_pa = gk20a_mm_inst_block_addr(g, inst_block);
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
@@ -375,7 +375,7 @@ static const struct gk20a_mmu_level *gp10b_mm_get_mmu_levels(struct gk20a *g,
|
||||
return gp10b_mm_levels;
|
||||
}
|
||||
|
||||
static void gp10b_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
|
||||
static void gp10b_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
|
||||
struct vm_gk20a *vm)
|
||||
{
|
||||
u64 pdb_addr = gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0);
|
||||
|
||||
@@ -67,8 +67,8 @@ struct wpr_carveout_info {
|
||||
};
|
||||
|
||||
struct acr_desc {
|
||||
struct mem_desc ucode_blob;
|
||||
struct mem_desc wpr_dummy;
|
||||
struct nvgpu_mem ucode_blob;
|
||||
struct nvgpu_mem wpr_dummy;
|
||||
struct bin_hdr *bl_bin_hdr;
|
||||
struct hsflcn_bl_desc *pmu_hsbl_desc;
|
||||
struct bin_hdr *hsbin_hdr;
|
||||
@@ -79,9 +79,9 @@ struct acr_desc {
|
||||
struct flcn_acr_desc *acr_dmem_desc;
|
||||
struct flcn_acr_desc_v1 *acr_dmem_desc_v1;
|
||||
};
|
||||
struct mem_desc acr_ucode;
|
||||
struct nvgpu_mem acr_ucode;
|
||||
const struct firmware *hsbl_fw;
|
||||
struct mem_desc hsbl_ucode;
|
||||
struct nvgpu_mem hsbl_ucode;
|
||||
union {
|
||||
struct flcn_bl_dmem_desc bl_dmem_desc;
|
||||
struct flcn_bl_dmem_desc_v1 bl_dmem_desc_v1;
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __NVGPU_MEM_DESC_H__
|
||||
#define __NVGPU_MEM_DESC_H__
|
||||
#ifndef __NVGPU_NVGPU_MEM_H__
|
||||
#define __NVGPU_NVGPU_MEM_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
@@ -38,7 +38,7 @@ enum nvgpu_aperture {
|
||||
APERTURE_VIDMEM
|
||||
};
|
||||
|
||||
struct mem_desc {
|
||||
struct nvgpu_mem {
|
||||
void *cpu_va; /* sysmem only */
|
||||
struct page **pages; /* sysmem only */
|
||||
struct sg_table *sgt;
|
||||
@@ -53,14 +53,15 @@ struct mem_desc {
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
static inline struct mem_desc *
|
||||
mem_desc_from_clear_list_entry(struct nvgpu_list_node *node)
|
||||
static inline struct nvgpu_mem *
|
||||
nvgpu_mem_from_clear_list_entry(struct nvgpu_list_node *node)
|
||||
{
|
||||
return (struct mem_desc *)
|
||||
((uintptr_t)node - offsetof(struct mem_desc, clear_list_entry));
|
||||
return (struct nvgpu_mem *)
|
||||
((uintptr_t)node - offsetof(struct nvgpu_mem,
|
||||
clear_list_entry));
|
||||
};
|
||||
|
||||
struct mem_desc_sub {
|
||||
struct nvgpu_mem_sub {
|
||||
u32 offset;
|
||||
u32 size;
|
||||
};
|
||||
@@ -80,32 +81,32 @@ static inline const char *nvgpu_aperture_str(enum nvgpu_aperture aperture)
|
||||
* kernel mapping for this buffer.
|
||||
*/
|
||||
|
||||
int nvgpu_mem_begin(struct gk20a *g, struct mem_desc *mem);
|
||||
int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
/* nop for null mem, like with free() or vunmap() */
|
||||
void nvgpu_mem_end(struct gk20a *g, struct mem_desc *mem);
|
||||
void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
|
||||
/* word-indexed offset */
|
||||
u32 nvgpu_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w);
|
||||
u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w);
|
||||
/* byte offset (32b-aligned) */
|
||||
u32 nvgpu_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset);
|
||||
u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
|
||||
/* memcpy to cpu, offset and size in bytes (32b-aligned) */
|
||||
void nvgpu_mem_rd_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
|
||||
void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
void *dest, u32 size);
|
||||
|
||||
/* word-indexed offset */
|
||||
void nvgpu_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data);
|
||||
void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data);
|
||||
/* byte offset (32b-aligned) */
|
||||
void nvgpu_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data);
|
||||
void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data);
|
||||
/* memcpy from cpu, offset and size in bytes (32b-aligned) */
|
||||
void nvgpu_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
|
||||
void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
void *src, u32 size);
|
||||
/* size and offset in bytes (32b-aligned), filled with the constant byte c */
|
||||
void nvgpu_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
|
||||
void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
u32 c, u32 size);
|
||||
|
||||
u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 vidmem_mask);
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem,
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 sysmem_mask, u32 vidmem_mask);
|
||||
|
||||
#endif
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
struct gk20a;
|
||||
struct mm_gk20a;
|
||||
struct mem_desc;
|
||||
struct nvgpu_mem;
|
||||
|
||||
/*
|
||||
* This typedef is for functions that get called during the access_batched()
|
||||
@@ -37,7 +37,7 @@ void pramin_access_batch_rd_n(struct gk20a *g, u32 start, u32 words, u32 **arg);
|
||||
void pramin_access_batch_wr_n(struct gk20a *g, u32 start, u32 words, u32 **arg);
|
||||
void pramin_access_batch_set(struct gk20a *g, u32 start, u32 words, u32 **arg);
|
||||
|
||||
void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
|
||||
void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 offset, u32 size,
|
||||
pramin_access_batch_fn loop, u32 **arg);
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ struct nvgpu_semaphore_sea {
|
||||
struct page *pages[SEMAPHORE_POOL_COUNT];
|
||||
*/
|
||||
|
||||
struct mem_desc sea_mem;
|
||||
struct nvgpu_mem sea_mem;
|
||||
|
||||
/*
|
||||
* Can't use a regular allocator here since the full range of pools are
|
||||
|
||||
@@ -149,7 +149,7 @@ static int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||
u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) *
|
||||
gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
|
||||
g->gr.max_tpc_count;
|
||||
struct mem_desc *desc;
|
||||
struct nvgpu_mem *desc;
|
||||
|
||||
attrib_cb_size = ALIGN(attrib_cb_size, 128);
|
||||
|
||||
|
||||
@@ -501,7 +501,7 @@ static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
|
||||
vgpu_cache_maint(vgpu_get_handle(g), op);
|
||||
}
|
||||
|
||||
static void vgpu_mm_tlb_invalidate(struct gk20a *g, struct mem_desc *pdb)
|
||||
static void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
||||
{
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
|
||||
Reference in New Issue
Block a user