mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: use nvgpu_flcn_copy_to_dmem()
- replace usage of pmu_copy_to_dmem() with nvgpu_flcn_copy_to_dmem() - delete nvgpu_flcn_copy_to_dmem() JIRA NVGPU-99 Change-Id: I9bb5837556e144521b181f9e15731beee08b435a Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master/r/1506577 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
fe3fc43401
commit
3a2eb257ee
@@ -135,7 +135,7 @@ static inline void pmu_queue_read(struct nvgpu_pmu *pmu,
|
||||
static inline void pmu_queue_write(struct nvgpu_pmu *pmu,
|
||||
u32 offset, u8 *src, u32 size)
|
||||
{
|
||||
pmu_copy_to_dmem(pmu, offset, src, size, 0);
|
||||
nvgpu_flcn_copy_to_dmem(pmu->flcn, offset, src, size, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -562,7 +562,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
payload->in.buf, payload->in.fb_size);
|
||||
|
||||
} else {
|
||||
pmu_copy_to_dmem(pmu,
|
||||
nvgpu_flcn_copy_to_dmem(pmu->flcn,
|
||||
(pv->pmu_allocation_get_dmem_offset(pmu, in)),
|
||||
payload->in.buf, payload->in.size, 0);
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn,
|
||||
data = 0;
|
||||
for (i = 0; i < bytes; i++)
|
||||
((u8 *)&data)[i] = src[(words << 2) + i];
|
||||
gk20a_writel(g, falcon_falcon_dmemd_r(port), data);
|
||||
gk20a_writel(g, base_addr + falcon_falcon_dmemd_r(port), data);
|
||||
}
|
||||
|
||||
size = ALIGN(size, 4);
|
||||
|
||||
@@ -104,57 +104,6 @@ static void printtrace(struct nvgpu_pmu *pmu)
|
||||
nvgpu_kfree(g, tracebuffer);
|
||||
}
|
||||
|
||||
void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
|
||||
u32 dst, u8 *src, u32 size, u8 port)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
u32 i, words, bytes;
|
||||
u32 data, addr_mask;
|
||||
u32 *src_u32 = (u32*)src;
|
||||
|
||||
if (size == 0) {
|
||||
nvgpu_err(g, "size is zero");
|
||||
return;
|
||||
}
|
||||
|
||||
if (dst & 0x3) {
|
||||
nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst);
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->pmu_copy_lock);
|
||||
|
||||
words = size >> 2;
|
||||
bytes = size & 0x3;
|
||||
|
||||
addr_mask = pwr_falcon_dmemc_offs_m() |
|
||||
pwr_falcon_dmemc_blk_m();
|
||||
|
||||
dst &= addr_mask;
|
||||
|
||||
gk20a_writel(g, pwr_falcon_dmemc_r(port),
|
||||
dst | pwr_falcon_dmemc_aincw_f(1));
|
||||
|
||||
for (i = 0; i < words; i++)
|
||||
gk20a_writel(g, pwr_falcon_dmemd_r(port), src_u32[i]);
|
||||
|
||||
if (bytes > 0) {
|
||||
data = 0;
|
||||
for (i = 0; i < bytes; i++)
|
||||
((u8 *)&data)[i] = src[(words << 2) + i];
|
||||
gk20a_writel(g, pwr_falcon_dmemd_r(port), data);
|
||||
}
|
||||
|
||||
data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask;
|
||||
size = ALIGN(size, 4);
|
||||
if (data != ((dst + size) & addr_mask)) {
|
||||
nvgpu_err(g, "copy failed. bytes written %d, expected %d",
|
||||
data - dst, size);
|
||||
}
|
||||
nvgpu_mutex_release(&pmu->pmu_copy_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
@@ -319,7 +268,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
|
||||
<< GK20A_PMU_DMEM_BLKSIZE2) -
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
|
||||
|
||||
pmu_copy_to_dmem(pmu, addr_args,
|
||||
nvgpu_flcn_copy_to_dmem(pmu->flcn, addr_args,
|
||||
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
|
||||
|
||||
|
||||
@@ -1244,7 +1244,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
|
||||
pwr_falcon_dmemc_offs_f(0) |
|
||||
pwr_falcon_dmemc_blk_f(0) |
|
||||
pwr_falcon_dmemc_aincw_f(1));
|
||||
pmu_copy_to_dmem(pmu, 0, (u8 *)pbl_desc,
|
||||
nvgpu_flcn_copy_to_dmem(pmu->flcn, 0, (u8 *)pbl_desc,
|
||||
sizeof(struct flcn_bl_dmem_desc), 0);
|
||||
/*TODO This had to be copied to bl_desc_dmem_load_off, but since
|
||||
* this is 0, so ok for now*/
|
||||
@@ -1356,7 +1356,7 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
|
||||
pmu, GK20A_PMU_DMAIDX_VIRT);
|
||||
pmu_copy_to_dmem(pmu, g->acr.pmu_args,
|
||||
nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
|
||||
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
|
||||
/*disable irqs for hs falcon booting as we will poll for halt*/
|
||||
|
||||
@@ -324,7 +324,7 @@ void init_pmu_setup_hw1(struct gk20a *g)
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
|
||||
pmu, GK20A_PMU_DMAIDX_VIRT);
|
||||
|
||||
pmu_copy_to_dmem(pmu, g->acr.pmu_args,
|
||||
nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
|
||||
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
|
||||
|
||||
|
||||
@@ -174,8 +174,8 @@ bool nvgpu_flcn_get_cpu_halted_status(struct nvgpu_falcon *flcn);
|
||||
bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn);
|
||||
int nvgpu_flcn_copy_from_dmem(struct nvgpu_falcon *flcn,
|
||||
u32 src, u8 *dst, u32 size, u8 port);
|
||||
int nvgpu_flcn_copy_to_mem(struct nvgpu_falcon *flcn,
|
||||
enum flcn_mem_type mem_type, u32 dst, u8 *src, u32 size, u8 port);
|
||||
int nvgpu_flcn_copy_to_dmem(struct nvgpu_falcon *flcn,
|
||||
u32 dst, u8 *src, u32 size, u8 port);
|
||||
int nvgpu_flcn_dma_copy(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_dma_info *dma_info);
|
||||
u32 nvgpu_flcn_mailbox_read(struct nvgpu_falcon *flcn, u32 mailbox_index);
|
||||
|
||||
Reference in New Issue
Block a user