gpu: nvgpu: os-agnostic segregation of sim/sim_pci

segregated os-agnostic function from linux/sim.c and linux/sim_pci.c
to sim.c and sim_pci.c, while retaining os-specific functions.

renamed all gk20a_* api's to nvgpu_*.

renamed hw_sim_gk20a.h to nvgpu/hw_sim.h
moved hw_sim_pci.h to nvgpu/hw_sim_pci.h

JIRA VQRM-2368

Change-Id: I040a6b12b19111a0b99280245808ea2b0f344cdd
Signed-off-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1702425
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Antony Clince Alex
2018-04-25 21:26:21 +05:30
committed by mobile promotions
parent b144935644
commit 50d1b0c72b
14 changed files with 721 additions and 635 deletions

View File

@@ -122,7 +122,9 @@ nvgpu-y := \
boardobj/boardobjgrp.o \ boardobj/boardobjgrp.o \
boardobj/boardobjgrpmask.o \ boardobj/boardobjgrpmask.o \
boardobj/boardobjgrp_e255.o \ boardobj/boardobjgrp_e255.o \
boardobj/boardobjgrp_e32.o boardobj/boardobjgrp_e32.o \
common/sim.o \
common/sim_pci.o
nvgpu-$(CONFIG_GK20A_VIDMEM) += \ nvgpu-$(CONFIG_GK20A_VIDMEM) += \
common/mm/vidmem.o \ common/mm/vidmem.o \

View File

@@ -601,7 +601,7 @@ static int gk20a_do_unidle(void *_g)
} }
#endif #endif
static void __iomem *gk20a_ioremap_resource(struct platform_device *dev, int i, void __iomem *nvgpu_ioremap_resource(struct platform_device *dev, int i,
struct resource **out) struct resource **out)
{ {
struct resource *r = platform_get_resource(dev, IORESOURCE_MEM, i); struct resource *r = platform_get_resource(dev, IORESOURCE_MEM, i);
@@ -637,6 +637,7 @@ static irqreturn_t gk20a_intr_thread_stall(int irq, void *dev_id)
void gk20a_remove_support(struct gk20a *g) void gk20a_remove_support(struct gk20a *g)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct sim_nvgpu_linux *sim_linux;
tegra_unregister_idle_unidle(gk20a_do_idle); tegra_unregister_idle_unidle(gk20a_do_idle);
@@ -659,8 +660,13 @@ void gk20a_remove_support(struct gk20a *g)
if (g->mm.remove_support) if (g->mm.remove_support)
g->mm.remove_support(&g->mm); g->mm.remove_support(&g->mm);
if (g->sim && g->sim->remove_support) if (g->sim) {
g->sim->remove_support(g->sim); sim_linux = container_of(g->sim, struct sim_nvgpu_linux, sim);
if (g->sim->remove_support)
g->sim->remove_support(g);
if (sim_linux->remove_support_linux)
sim_linux->remove_support_linux(g);
}
/* free mappings to registers, etc */ /* free mappings to registers, etc */
if (l->regs) { if (l->regs) {
@@ -679,18 +685,13 @@ void gk20a_remove_support(struct gk20a *g)
static int gk20a_init_support(struct platform_device *dev) static int gk20a_init_support(struct platform_device *dev)
{ {
int err = 0; int err = -ENOMEM;
struct gk20a *g = get_gk20a(&dev->dev); struct gk20a *g = get_gk20a(&dev->dev);
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct sim_gk20a_linux *sim_linux = nvgpu_kzalloc(g, sizeof(*sim_linux));
if (!sim_linux)
goto fail;
g->sim = &sim_linux->sim;
tegra_register_idle_unidle(gk20a_do_idle, gk20a_do_unidle, g); tegra_register_idle_unidle(gk20a_do_idle, gk20a_do_unidle, g);
l->regs = gk20a_ioremap_resource(dev, GK20A_BAR0_IORESOURCE_MEM, l->regs = nvgpu_ioremap_resource(dev, GK20A_BAR0_IORESOURCE_MEM,
&l->reg_mem); &l->reg_mem);
if (IS_ERR(l->regs)) { if (IS_ERR(l->regs)) {
nvgpu_err(g, "failed to remap gk20a registers"); nvgpu_err(g, "failed to remap gk20a registers");
@@ -698,7 +699,7 @@ static int gk20a_init_support(struct platform_device *dev)
goto fail; goto fail;
} }
l->bar1 = gk20a_ioremap_resource(dev, GK20A_BAR1_IORESOURCE_MEM, l->bar1 = nvgpu_ioremap_resource(dev, GK20A_BAR1_IORESOURCE_MEM,
&l->bar1_mem); &l->bar1_mem);
if (IS_ERR(l->bar1)) { if (IS_ERR(l->bar1)) {
nvgpu_err(g, "failed to remap gk20a bar1"); nvgpu_err(g, "failed to remap gk20a bar1");
@@ -706,29 +707,28 @@ static int gk20a_init_support(struct platform_device *dev)
goto fail; goto fail;
} }
if (nvgpu_platform_is_simulation(g)) { err = nvgpu_init_sim_support_linux(g, dev);
g->sim->g = g; if (err)
sim_linux->regs = gk20a_ioremap_resource(dev, goto fail;
GK20A_SIM_IORESOURCE_MEM, err = nvgpu_init_sim_support(g);
&sim_linux->reg_mem); if (err)
if (IS_ERR(sim_linux->regs)) { goto fail_sim;
nvgpu_err(g, "failed to remap gk20a sim regs");
err = PTR_ERR(sim_linux->regs);
goto fail;
}
err = gk20a_init_sim_support(g);
if (err)
goto fail;
}
nvgpu_init_usermode_support(g); nvgpu_init_usermode_support(g);
return 0; return 0;
fail_sim:
nvgpu_remove_sim_support_linux(g);
fail: fail:
nvgpu_kfree(g, sim_linux); if (l->regs) {
g->sim = NULL; iounmap(l->regs);
l->regs = NULL;
}
if (l->bar1) {
iounmap(l->bar1);
l->bar1 = NULL;
}
return err; return err;
} }
@@ -1227,18 +1227,6 @@ static int gk20a_probe(struct platform_device *dev)
return 0; return 0;
return_err: return_err:
/*
* Make sure to clean up any memory allocs made in this function -
* especially since we can be called many times due to probe deferal.
*/
if (gk20a->sim) {
struct sim_gk20a_linux *sim_linux;
sim_linux = container_of(gk20a->sim,
struct sim_gk20a_linux,
sim);
nvgpu_kfree(gk20a, sim_linux);
}
nvgpu_free_enabled_flags(gk20a); nvgpu_free_enabled_flags(gk20a);
/* /*

View File

@@ -25,7 +25,8 @@ int nvgpu_quiesce(struct gk20a *g);
int nvgpu_remove(struct device *dev, struct class *class); int nvgpu_remove(struct device *dev, struct class *class);
void nvgpu_free_irq(struct gk20a *g); void nvgpu_free_irq(struct gk20a *g);
struct device_node *nvgpu_get_node(struct gk20a *g); struct device_node *nvgpu_get_node(struct gk20a *g);
void __iomem *nvgpu_ioremap_resource(struct platform_device *dev, int i,
struct resource **out);
extern struct class nvgpu_class; extern struct class nvgpu_class;
#endif #endif

View File

@@ -498,15 +498,29 @@ static int nvgpu_pci_init_support(struct pci_dev *pdev)
goto fail; goto fail;
} }
err = nvgpu_pci_init_sim_support(g); err = nvgpu_init_sim_support_linux_pci(g);
if (err) if (err)
goto fail; goto fail;
err = nvgpu_init_sim_support_pci(g);
if (err)
goto fail_sim;
nvgpu_pci_init_usermode_support(l); nvgpu_pci_init_usermode_support(l);
return 0; return 0;
fail_sim:
nvgpu_remove_sim_support_linux_pci(g);
fail: fail:
if (l->regs) {
iounmap(l->regs);
l->regs = NULL;
}
if (l->bar1) {
iounmap(l->bar1);
l->bar1 = NULL;
}
return err; return err;
} }

View File

@@ -23,310 +23,74 @@
#include <nvgpu/bitops.h> #include <nvgpu/bitops.h>
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
#include <nvgpu/soc.h>
#include <nvgpu/hw_sim.h>
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#include "sim.h" #include "platform_gk20a.h"
#include "os_linux.h"
#include "module.h"
#include "sim.h" /* will be removed in next patch */
#include <nvgpu/hw/gk20a/hw_sim_gk20a.h> void sim_writel(struct sim_nvgpu *sim, u32 r, u32 v)
static inline void sim_writel(struct sim_gk20a *sim, u32 r, u32 v)
{ {
struct sim_gk20a_linux *sim_linux = struct sim_nvgpu_linux *sim_linux =
container_of(sim, struct sim_gk20a_linux, sim); container_of(sim, struct sim_nvgpu_linux, sim);
writel(v, sim_linux->regs + r); writel(v, sim_linux->regs + r);
} }
static inline u32 sim_readl(struct sim_gk20a *sim, u32 r) u32 sim_readl(struct sim_nvgpu *sim, u32 r)
{ {
struct sim_gk20a_linux *sim_linux = struct sim_nvgpu_linux *sim_linux =
container_of(sim, struct sim_gk20a_linux, sim); container_of(sim, struct sim_nvgpu_linux, sim);
return readl(sim_linux->regs + r); return readl(sim_linux->regs + r);
} }
static int gk20a_alloc_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem) void nvgpu_remove_sim_support_linux(struct gk20a *g)
{ {
int err; struct sim_nvgpu_linux *sim_linux;
err = nvgpu_dma_alloc(g, PAGE_SIZE, mem); if (!g->sim) {
nvgpu_warn(g, "sim not allocated or not in sim_mode");
if (err) return;
return err;
/*
* create a valid cpu_va mapping
*/
nvgpu_mem_begin(g, mem);
return 0;
}
static void gk20a_free_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
{
if (nvgpu_mem_is_valid(mem)) {
/*
* invalidate the cpu_va mapping
*/
nvgpu_mem_end(g, mem);
nvgpu_dma_free(g, mem);
} }
sim_linux = container_of(g->sim, struct sim_nvgpu_linux, sim);
memset(mem, 0, sizeof(*mem));
}
static void gk20a_free_sim_support(struct gk20a *g)
{
gk20a_free_sim_buffer(g, &g->sim->send_bfr);
gk20a_free_sim_buffer(g, &g->sim->recv_bfr);
gk20a_free_sim_buffer(g, &g->sim->msg_bfr);
}
static void gk20a_remove_sim_support(struct sim_gk20a *s)
{
struct gk20a *g = s->g;
struct sim_gk20a_linux *sim_linux =
container_of(g->sim, struct sim_gk20a_linux, sim);
if (sim_linux->regs)
sim_writel(s, sim_config_r(), sim_config_mode_disabled_v());
if (sim_linux->regs) { if (sim_linux->regs) {
sim_writel(g->sim, sim_config_r(), sim_config_mode_disabled_v());
iounmap(sim_linux->regs); iounmap(sim_linux->regs);
sim_linux->regs = NULL; sim_linux->regs = NULL;
} }
gk20a_free_sim_support(g);
nvgpu_kfree(g, sim_linux); nvgpu_kfree(g, sim_linux);
g->sim = NULL; g->sim = NULL;
} }
static inline u32 sim_msg_header_size(void) int nvgpu_init_sim_support_linux(struct gk20a *g,
struct platform_device *dev)
{ {
return 24;/*TBD: fix the header to gt this from NV_VGPU_MSG_HEADER*/ struct sim_nvgpu_linux *sim_linux;
} int err = -ENOMEM;
static inline u32 *sim_msg_bfr(struct gk20a *g, u32 byte_offset) if (!nvgpu_platform_is_simulation(g))
{ return 0;
u8 *cpu_va;
cpu_va = (u8 *)sim_linux->msg_bfr.cpu_va; sim_linux = nvgpu_kzalloc(g, sizeof(*sim_linux));
if (!sim_linux)
return (u32 *)(cpu_va + byte_offset);
}
static inline u32 *sim_msg_hdr(struct gk20a *g, u32 byte_offset)
{
return sim_msg_bfr(g, byte_offset); /*starts at 0*/
}
static inline u32 *sim_msg_param(struct gk20a *g, u32 byte_offset)
{
/*starts after msg header/cmn*/
return sim_msg_bfr(g, byte_offset + sim_msg_header_size());
}
static inline void sim_write_hdr(struct gk20a *g, u32 func, u32 size)
{
/*memset(g->sim->msg_bfr.kvaddr,0,min(PAGE_SIZE,size));*/
*sim_msg_hdr(g, sim_msg_signature_r()) = sim_msg_signature_valid_v();
*sim_msg_hdr(g, sim_msg_result_r()) = sim_msg_result_rpc_pending_v();
*sim_msg_hdr(g, sim_msg_spare_r()) = sim_msg_spare__init_v();
*sim_msg_hdr(g, sim_msg_function_r()) = func;
*sim_msg_hdr(g, sim_msg_length_r()) = size + sim_msg_header_size();
}
static inline u32 sim_escape_read_hdr_size(void)
{
return 12; /*TBD: fix NV_VGPU_SIM_ESCAPE_READ_HEADER*/
}
static u32 *sim_send_ring_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)sim_linux->send_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static int rpc_send_message(struct gk20a *g)
{
/* calculations done in units of u32s */
u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2;
u32 dma_offset = send_base + sim_dma_r()/sizeof(u32);
u32 dma_hi_offset = send_base + sim_dma_hi_r()/sizeof(u32);
*sim_send_ring_bfr(g, dma_offset*sizeof(u32)) =
sim_dma_target_phys_pci_coherent_f() |
sim_dma_status_valid_f() |
sim_dma_size_4kb_f() |
sim_dma_addr_lo_f(nvgpu_mem_get_addr(g, &sim_linux->msg_bfr) >> PAGE_SHIFT);
*sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) =
u64_hi32(nvgpu_mem_get_addr(g, &g->sim->msg_bfr));
*sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++;
g->sim->send_ring_put = (g->sim->send_ring_put + 2 * sizeof(u32)) % PAGE_SIZE;
/* Update the put pointer. This will trap into the host. */
sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
return 0;
}
static inline u32 *sim_recv_ring_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)sim_linux->recv_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static int rpc_recv_poll(struct gk20a *g)
{
u64 recv_phys_addr;
/* XXX This read is not required (?) */
/*pVGpu->recv_ring_get = VGPU_REG_RD32(pGpu, NV_VGPU_RECV_GET);*/
/* Poll the recv ring get pointer in an infinite loop*/
do {
g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
} while (g->sim->recv_ring_put == g->sim->recv_ring_get);
/* process all replies */
while (g->sim->recv_ring_put != g->sim->recv_ring_get) {
/* these are in u32 offsets*/
u32 dma_lo_offset =
sim_recv_put_pointer_v(g->sim->recv_ring_get)*2 + 0;
u32 dma_hi_offset = dma_lo_offset + 1;
u32 recv_phys_addr_lo = sim_dma_addr_lo_v(
*sim_recv_ring_bfr(g, dma_lo_offset*4));
u32 recv_phys_addr_hi = sim_dma_hi_addr_v(
*sim_recv_ring_bfr(g, dma_hi_offset*4));
recv_phys_addr = (u64)recv_phys_addr_hi << 32 |
(u64)recv_phys_addr_lo << PAGE_SHIFT;
if (recv_phys_addr !=
nvgpu_mem_get_addr(g, &g->sim->msg_bfr)) {
nvgpu_err(g, "%s Error in RPC reply",
__func__);
return -1;
}
/* Update GET pointer */
g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*sizeof(u32)) % PAGE_SIZE;
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
}
return 0;
}
static int issue_rpc_and_wait(struct gk20a *g)
{
int err;
err = rpc_send_message(g);
if (err) {
nvgpu_err(g, "%s failed rpc_send_message",
__func__);
return err; return err;
} g->sim = &sim_linux->sim;
g->sim->g = g;
err = rpc_recv_poll(g); sim_linux->regs = nvgpu_ioremap_resource(dev,
if (err) { GK20A_SIM_IORESOURCE_MEM,
nvgpu_err(g, "%s failed rpc_recv_poll", &sim_linux->reg_mem);
__func__); if (IS_ERR(sim_linux->regs)) {
return err; nvgpu_err(g, "failed to remap gk20a sim regs");
} err = PTR_ERR(sim_linux->regs);
/* Now check if RPC really succeeded */
if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
nvgpu_err(g, "%s received failed status!",
__func__);
return -(*sim_msg_hdr(g, sim_msg_result_r()));
}
return 0;
}
static int gk20a_sim_esc_readl(struct gk20a *g, char *path, u32 index, u32 *data)
{
int err;
size_t pathlen = strlen(path);
u32 data_offset;
sim_write_hdr(g, sim_msg_function_sim_escape_read_v(),
sim_escape_read_hdr_size());
*sim_msg_param(g, 0) = index;
*sim_msg_param(g, 4) = sizeof(u32);
data_offset = roundup(0xc + pathlen + 1, sizeof(u32));
*sim_msg_param(g, 8) = data_offset;
strcpy((char *)sim_msg_param(g, 0xc), path);
err = issue_rpc_and_wait(g);
if (!err)
memcpy(data, sim_msg_param(g, data_offset), sizeof(u32));
return err;
}
int gk20a_init_sim_support(struct gk20a *g)
{
int err = 0;
u64 phys;
/* allocate sim event/msg buffers */
err = gk20a_alloc_sim_buffer(g, &g->sim->send_bfr);
err = err || gk20a_alloc_sim_buffer(g, &g->sim->recv_bfr);
err = err || gk20a_alloc_sim_buffer(g, &g->sim->msg_bfr);
if (err)
goto fail; goto fail;
/*mark send ring invalid*/ }
sim_writel(g->sim, sim_send_ring_r(), sim_send_ring_status_invalid_f()); sim_linux->remove_support_linux = nvgpu_remove_sim_support_linux;
/*read get pointer and make equal to put*/
g->sim->send_ring_put = sim_readl(g->sim, sim_send_get_r());
sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
/*write send ring address and make it valid*/
phys = nvgpu_mem_get_addr(g, &g->sim->send_bfr);
sim_writel(g->sim, sim_send_ring_hi_r(),
sim_send_ring_hi_addr_f(u64_hi32(phys)));
sim_writel(g->sim, sim_send_ring_r(),
sim_send_ring_status_valid_f() |
sim_send_ring_target_phys_pci_coherent_f() |
sim_send_ring_size_4kb_f() |
sim_send_ring_addr_lo_f(phys >> PAGE_SHIFT));
/*repeat for recv ring (but swap put,get as roles are opposite) */
sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f());
/*read put pointer and make equal to get*/
g->sim->recv_ring_get = sim_readl(g->sim, sim_recv_put_r());
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
/*write send ring address and make it valid*/
phys = nvgpu_mem_get_addr(g, &g->sim->recv_bfr);
sim_writel(g->sim, sim_recv_ring_hi_r(),
sim_recv_ring_hi_addr_f(u64_hi32(phys)));
sim_writel(g->sim, sim_recv_ring_r(),
sim_recv_ring_status_valid_f() |
sim_recv_ring_target_phys_pci_coherent_f() |
sim_recv_ring_size_4kb_f() |
sim_recv_ring_addr_lo_f(phys >> PAGE_SHIFT));
g->sim->remove_support = gk20a_remove_sim_support;
g->sim->esc_readl = gk20a_sim_esc_readl;
return 0; return 0;
fail: fail:
gk20a_free_sim_support(g); nvgpu_remove_sim_support_linux(g);
return err; return err;
} }

View File

@@ -1,6 +1,6 @@
/* /*
* *
* GK20A sim support * nvgpu sim support
* *
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* *
@@ -23,12 +23,21 @@
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
#include "gk20a/sim_gk20a.h" #include "gk20a/sim_gk20a.h"
struct sim_gk20a_linux { struct sim_nvgpu_linux {
struct sim_gk20a sim; struct sim_nvgpu sim;
struct resource *reg_mem; struct resource *reg_mem;
void __iomem *regs; void __iomem *regs;
void (*remove_support_linux)(struct gk20a *g);
}; };
int gk20a_init_sim_support(struct gk20a *g); void sim_writel(struct sim_nvgpu *sim, u32 r, u32 v);
u32 sim_readl(struct sim_nvgpu *sim, u32 r);
int nvgpu_init_sim_support(struct gk20a *g); /* will be moved to common in subsequent patch */
int nvgpu_alloc_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem); /* will be moved to common in subsequent patch */
void nvgpu_free_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem); /* will be moved to common in subsequent patch */
void nvgpu_free_sim_support(struct gk20a *g); /* will be moved to common in subsequent patch */
void nvgpu_remove_sim_support(struct gk20a *g); /* will be moved to common in subsequent patch */
int nvgpu_init_sim_support_linux(struct gk20a *g,
struct platform_device *dev);
void nvgpu_remove_sim_support_linux(struct gk20a *g);
#endif #endif

View File

@@ -23,254 +23,12 @@
#include <nvgpu/bitops.h> #include <nvgpu/bitops.h>
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
#include <nvgpu/hw_sim_pci.h>
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#include "os_linux.h" #include "os_linux.h"
#include "sim.h" #include "module.h"
#include "hw_sim_pci.h" #include "sim.h" /* will be removed in subsequent patches */
#include "sim_pci.h"/* will be removed in subsequent patches */
static inline void sim_writel(struct sim_gk20a *sim, u32 r, u32 v)
{
struct sim_gk20a_linux *sim_linux =
container_of(sim, struct sim_gk20a_linux, sim);
writel(v, sim_linux->regs + r);
}
static inline u32 sim_readl(struct sim_gk20a *sim, u32 r)
{
struct sim_gk20a_linux *sim_linux =
container_of(sim, struct sim_gk20a_linux, sim);
return readl(sim_linux->regs + r);
}
static int gk20a_alloc_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
{
int err;
err = nvgpu_dma_alloc(g, PAGE_SIZE, mem);
if (err)
return err;
/*
* create a valid cpu_va mapping
*/
nvgpu_mem_begin(g, mem);
return 0;
}
static void gk20a_free_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
{
if (nvgpu_mem_is_valid(mem)) {
/*
* invalidate the cpu_va mapping
*/
nvgpu_mem_end(g, mem);
nvgpu_dma_free(g, mem);
}
memset(mem, 0, sizeof(*mem));
}
static void gk20a_free_sim_support(struct gk20a *g)
{
gk20a_free_sim_buffer(g, &g->sim->send_bfr);
gk20a_free_sim_buffer(g, &g->sim->recv_bfr);
gk20a_free_sim_buffer(g, &g->sim->msg_bfr);
}
static void gk20a_remove_sim_support(struct sim_gk20a *s)
{
struct gk20a *g = s->g;
struct sim_gk20a_linux *sim_linux =
container_of(g->sim, struct sim_gk20a_linux, sim);
if (sim_linux->regs)
sim_writel(s, sim_config_r(), sim_config_mode_disabled_v());
if (sim_linux->regs) {
iounmap(sim_linux->regs);
sim_linux->regs = NULL;
}
gk20a_free_sim_support(g);
nvgpu_kfree(g, sim_linux);
g->sim = NULL;
}
static inline u32 sim_msg_header_size(void)
{
return 32U;
}
static inline u32 *sim_msg_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)sim_linux->msg_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static inline u32 *sim_msg_hdr(struct gk20a *g, u32 byte_offset)
{
return sim_msg_bfr(g, byte_offset); /* starts at 0 */
}
static inline u32 *sim_msg_param(struct gk20a *g, u32 byte_offset)
{
/* starts after msg header/cmn */
return sim_msg_bfr(g, byte_offset + sim_msg_header_size());
}
static inline void sim_write_hdr(struct gk20a *g, u32 func, u32 size)
{
*sim_msg_hdr(g, sim_msg_header_version_r()) =
sim_msg_header_version_major_tot_v() |
sim_msg_header_version_minor_tot_v();
*sim_msg_hdr(g, sim_msg_signature_r()) = sim_msg_signature_valid_v();
*sim_msg_hdr(g, sim_msg_result_r()) = sim_msg_result_rpc_pending_v();
*sim_msg_hdr(g, sim_msg_spare_r()) = sim_msg_spare__init_v();
*sim_msg_hdr(g, sim_msg_function_r()) = func;
*sim_msg_hdr(g, sim_msg_length_r()) = size + sim_msg_header_size();
}
static inline u32 sim_escape_read_hdr_size(void)
{
return 12U;
}
static u32 *sim_send_ring_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)sim_linux->send_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static int rpc_send_message(struct gk20a *g)
{
/* calculations done in units of u32s */
u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2;
u32 dma_offset = send_base + sim_dma_r()/sizeof(u32);
u32 dma_hi_offset = send_base + sim_dma_hi_r()/sizeof(u32);
*sim_send_ring_bfr(g, dma_offset*sizeof(u32)) =
sim_dma_target_phys_pci_coherent_f() |
sim_dma_status_valid_f() |
sim_dma_size_4kb_f() |
sim_dma_addr_lo_f(nvgpu_mem_get_addr(g, &sim_linux->msg_bfr) >> PAGE_SHIFT);
*sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) =
u64_hi32(nvgpu_mem_get_addr(g, &g->sim->msg_bfr));
*sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++;
g->sim->send_ring_put = (g->sim->send_ring_put + 2 * sizeof(u32)) %
PAGE_SIZE;
/* Update the put pointer. This will trap into the host. */
sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
return 0;
}
static inline u32 *sim_recv_ring_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)sim_linux->recv_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static int rpc_recv_poll(struct gk20a *g)
{
u64 recv_phys_addr;
/* Poll the recv ring get pointer in an infinite loop */
do {
g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
} while (g->sim->recv_ring_put == g->sim->recv_ring_get);
/* process all replies */
while (g->sim->recv_ring_put != g->sim->recv_ring_get) {
/* these are in u32 offsets */
u32 dma_lo_offset =
sim_recv_put_pointer_v(g->sim->recv_ring_get)*2 + 0;
u32 dma_hi_offset = dma_lo_offset + 1;
u32 recv_phys_addr_lo = sim_dma_addr_lo_v(
*sim_recv_ring_bfr(g, dma_lo_offset*4));
u32 recv_phys_addr_hi = sim_dma_hi_addr_v(
*sim_recv_ring_bfr(g, dma_hi_offset*4));
recv_phys_addr = (u64)recv_phys_addr_hi << 32 |
(u64)recv_phys_addr_lo << PAGE_SHIFT;
if (recv_phys_addr !=
nvgpu_mem_get_addr(g, &g->sim->msg_bfr)) {
nvgpu_err(g, "Error in RPC reply");
return -EINVAL;
}
/* Update GET pointer */
g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*sizeof(u32))
% PAGE_SIZE;
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
}
return 0;
}
static int issue_rpc_and_wait(struct gk20a *g)
{
int err;
err = rpc_send_message(g);
if (err) {
nvgpu_err(g, "failed rpc_send_message");
return err;
}
err = rpc_recv_poll(g);
if (err) {
nvgpu_err(g, "failed rpc_recv_poll");
return err;
}
/* Now check if RPC really succeeded */
if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
nvgpu_err(g, "received failed status!");
return -EINVAL;
}
return 0;
}
static int gk20a_sim_esc_readl(struct gk20a *g, char *path, u32 index, u32 *data)
{
int err;
size_t pathlen = strlen(path);
u32 data_offset;
sim_write_hdr(g, sim_msg_function_sim_escape_read_v(),
sim_escape_read_hdr_size());
*sim_msg_param(g, 0) = index;
*sim_msg_param(g, 4) = sizeof(u32);
data_offset = roundup(pathlen + 1, sizeof(u32));
*sim_msg_param(g, 8) = data_offset;
strcpy((char *)sim_msg_param(g, 0xc), path);
err = issue_rpc_and_wait(g);
if (!err)
memcpy(data, sim_msg_param(g, data_offset + 0xc), sizeof(u32));
return err;
}
static bool _nvgpu_pci_is_simulation(struct gk20a *g, u32 sim_base) static bool _nvgpu_pci_is_simulation(struct gk20a *g, u32 sim_base)
{ {
@@ -284,15 +42,39 @@ static bool _nvgpu_pci_is_simulation(struct gk20a *g, u32 sim_base)
return is_simulation; return is_simulation;
} }
int nvgpu_pci_init_sim_support(struct gk20a *g) void nvgpu_remove_sim_support_linux_pci(struct gk20a *g)
{ {
int err = 0; struct sim_nvgpu_linux *sim_linux;
u64 phys; bool is_simulation;
struct sim_gk20a_linux *sim_linux;
is_simulation = _nvgpu_pci_is_simulation(g, sim_r());
if (!is_simulation) {
nvgpu_warn(g, "not in sim_mode");
return;
}
if (!g->sim) {
nvgpu_warn(g, "sim_gk20a not allocated");
return;
}
sim_linux = container_of(g->sim, struct sim_nvgpu_linux, sim);
if (sim_linux->regs) {
sim_writel(g->sim, sim_config_r(), sim_config_mode_disabled_v());
sim_linux->regs = NULL;
}
nvgpu_kfree(g, sim_linux);
g->sim = NULL;
}
int nvgpu_init_sim_support_linux_pci(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct sim_nvgpu_linux *sim_linux;
int err = -ENOMEM;
bool is_simulation; bool is_simulation;
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
/* initialize sim aperture */
is_simulation = _nvgpu_pci_is_simulation(g, sim_r()); is_simulation = _nvgpu_pci_is_simulation(g, sim_r());
__nvgpu_set_enabled(g, NVGPU_IS_FMODEL, is_simulation); __nvgpu_set_enabled(g, NVGPU_IS_FMODEL, is_simulation);
@@ -301,57 +83,11 @@ int nvgpu_pci_init_sim_support(struct gk20a *g)
sim_linux = nvgpu_kzalloc(g, sizeof(*sim_linux)); sim_linux = nvgpu_kzalloc(g, sizeof(*sim_linux));
if (!sim_linux) if (!sim_linux)
goto fail; return err;
g->sim = &sim_linux->sim; g->sim = &sim_linux->sim;
g->sim->g = g;
sim_linux->regs = l->regs + sim_r(); sim_linux->regs = l->regs + sim_r();
sim_linux->remove_support_linux = nvgpu_remove_sim_support_linux_pci;
/* allocate sim event/msg buffers */
err = gk20a_alloc_sim_buffer(g, &g->sim->send_bfr);
err = err || gk20a_alloc_sim_buffer(g, &g->sim->recv_bfr);
err = err || gk20a_alloc_sim_buffer(g, &g->sim->msg_bfr);
if (err)
goto fail;
/* mark send ring invalid */
sim_writel(g->sim, sim_send_ring_r(), sim_send_ring_status_invalid_f());
/* read get pointer and make equal to put */
g->sim->send_ring_put = sim_readl(g->sim, sim_send_get_r());
sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
/* write send ring address and make it valid */
phys = nvgpu_mem_get_addr(g, &g->sim->send_bfr);
sim_writel(g->sim, sim_send_ring_hi_r(),
sim_send_ring_hi_addr_f(u64_hi32(phys)));
sim_writel(g->sim, sim_send_ring_r(),
sim_send_ring_status_valid_f() |
sim_send_ring_target_phys_pci_coherent_f() |
sim_send_ring_size_4kb_f() |
sim_send_ring_addr_lo_f(phys >> PAGE_SHIFT));
/* repeat for recv ring (but swap put,get as roles are opposite) */
sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f());
/* read put pointer and make equal to get */
g->sim->recv_ring_get = sim_readl(g->sim, sim_recv_put_r());
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
/* write send ring address and make it valid */
phys = nvgpu_mem_get_addr(g, &g->sim->recv_bfr);
sim_writel(g->sim, sim_recv_ring_hi_r(),
sim_recv_ring_hi_addr_f(u64_hi32(phys)));
sim_writel(g->sim, sim_recv_ring_r(),
sim_recv_ring_status_valid_f() |
sim_recv_ring_target_phys_pci_coherent_f() |
sim_recv_ring_size_4kb_f() |
sim_recv_ring_addr_lo_f(phys >> PAGE_SHIFT));
g->sim->remove_support = gk20a_remove_sim_support;
g->sim->esc_readl = gk20a_sim_esc_readl;
return 0; return 0;
fail:
gk20a_free_sim_support(g);
return err;
} }

View File

@@ -23,6 +23,7 @@
#include "gk20a/sim_gk20a.h" #include "gk20a/sim_gk20a.h"
#include "sim.h" #include "sim.h"
int nvgpu_pci_init_sim_support(struct gk20a *g); int nvgpu_init_sim_support_pci(struct gk20a *g); /* this will be moved */
int nvgpu_init_sim_support_linux_pci(struct gk20a *g);
void nvgpu_remove_sim_support_linux_pci(struct gk20a *g);
#endif #endif

View File

@@ -0,0 +1,311 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/log.h>
#include <nvgpu/bitops.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/dma.h>
#include <nvgpu/io.h>
#include <nvgpu/hw_sim.h>
#include "gk20a/gk20a.h"
#include "linux/sim.h"
int nvgpu_alloc_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
{
int err;
err = nvgpu_dma_alloc(g, PAGE_SIZE, mem);
if (err)
return err;
/*
* create a valid cpu_va mapping
*/
nvgpu_mem_begin(g, mem);
return 0;
}
void nvgpu_free_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
{
if (nvgpu_mem_is_valid(mem)) {
/*
* invalidate the cpu_va mapping
*/
nvgpu_mem_end(g, mem);
nvgpu_dma_free(g, mem);
}
memset(mem, 0, sizeof(*mem));
}
void nvgpu_free_sim_support(struct gk20a *g)
{
nvgpu_free_sim_buffer(g, &g->sim->send_bfr);
nvgpu_free_sim_buffer(g, &g->sim->recv_bfr);
nvgpu_free_sim_buffer(g, &g->sim->msg_bfr);
}
void nvgpu_remove_sim_support(struct gk20a *g)
{
if (g->sim)
nvgpu_free_sim_support(g);
}
static inline u32 sim_msg_header_size(void)
{
return 24;/*TBD: fix the header to gt this from NV_VGPU_MSG_HEADER*/
}
static inline u32 *sim_msg_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)g->sim->msg_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static inline u32 *sim_msg_hdr(struct gk20a *g, u32 byte_offset)
{
return sim_msg_bfr(g, byte_offset); /*starts at 0*/
}
static inline u32 *sim_msg_param(struct gk20a *g, u32 byte_offset)
{
/*starts after msg header/cmn*/
return sim_msg_bfr(g, byte_offset + sim_msg_header_size());
}
static inline void sim_write_hdr(struct gk20a *g, u32 func, u32 size)
{
/*memset(g->sim->msg_bfr.kvaddr,0,min(PAGE_SIZE,size));*/
*sim_msg_hdr(g, sim_msg_signature_r()) = sim_msg_signature_valid_v();
*sim_msg_hdr(g, sim_msg_result_r()) = sim_msg_result_rpc_pending_v();
*sim_msg_hdr(g, sim_msg_spare_r()) = sim_msg_spare__init_v();
*sim_msg_hdr(g, sim_msg_function_r()) = func;
*sim_msg_hdr(g, sim_msg_length_r()) = size + sim_msg_header_size();
}
static inline u32 sim_escape_read_hdr_size(void)
{
return 12; /*TBD: fix NV_VGPU_SIM_ESCAPE_READ_HEADER*/
}
static u32 *sim_send_ring_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)g->sim->send_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static int rpc_send_message(struct gk20a *g)
{
/* calculations done in units of u32s */
u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2;
u32 dma_offset = send_base + sim_dma_r()/sizeof(u32);
u32 dma_hi_offset = send_base + sim_dma_hi_r()/sizeof(u32);
*sim_send_ring_bfr(g, dma_offset*sizeof(u32)) =
sim_dma_target_phys_pci_coherent_f() |
sim_dma_status_valid_f() |
sim_dma_size_4kb_f() |
sim_dma_addr_lo_f(nvgpu_mem_get_addr(g, &g->sim->msg_bfr)
>> PAGE_SHIFT);
*sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) =
u64_hi32(nvgpu_mem_get_addr(g, &g->sim->msg_bfr));
*sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++;
g->sim->send_ring_put = (g->sim->send_ring_put + 2 * sizeof(u32))
% PAGE_SIZE;
/* Update the put pointer. This will trap into the host. */
sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
return 0;
}
static inline u32 *sim_recv_ring_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)g->sim->recv_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static int rpc_recv_poll(struct gk20a *g)
{
u64 recv_phys_addr;
/* XXX This read is not required (?) */
/*pVGpu->recv_ring_get = VGPU_REG_RD32(pGpu, NV_VGPU_RECV_GET);*/
/* Poll the recv ring get pointer in an infinite loop*/
do {
g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
} while (g->sim->recv_ring_put == g->sim->recv_ring_get);
/* process all replies */
while (g->sim->recv_ring_put != g->sim->recv_ring_get) {
/* these are in u32 offsets*/
u32 dma_lo_offset =
sim_recv_put_pointer_v(g->sim->recv_ring_get)*2 + 0;
u32 dma_hi_offset = dma_lo_offset + 1;
u32 recv_phys_addr_lo = sim_dma_addr_lo_v(
*sim_recv_ring_bfr(g, dma_lo_offset*4));
u32 recv_phys_addr_hi = sim_dma_hi_addr_v(
*sim_recv_ring_bfr(g, dma_hi_offset*4));
recv_phys_addr = (u64)recv_phys_addr_hi << 32 |
(u64)recv_phys_addr_lo << PAGE_SHIFT;
if (recv_phys_addr !=
nvgpu_mem_get_addr(g, &g->sim->msg_bfr)) {
nvgpu_err(g, "%s Error in RPC reply",
__func__);
return -1;
}
/* Update GET pointer */
g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*sizeof(u32))
% PAGE_SIZE;
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
}
return 0;
}
static int issue_rpc_and_wait(struct gk20a *g)
{
int err;
err = rpc_send_message(g);
if (err) {
nvgpu_err(g, "%s failed rpc_send_message",
__func__);
return err;
}
err = rpc_recv_poll(g);
if (err) {
nvgpu_err(g, "%s failed rpc_recv_poll",
__func__);
return err;
}
/* Now check if RPC really succeeded */
if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
nvgpu_err(g, "%s received failed status!",
__func__);
return -(*sim_msg_hdr(g, sim_msg_result_r()));
}
return 0;
}
static int nvgpu_sim_esc_readl(struct gk20a *g,
char *path, u32 index, u32 *data)
{
int err;
size_t pathlen = strlen(path);
u32 data_offset;
sim_write_hdr(g, sim_msg_function_sim_escape_read_v(),
sim_escape_read_hdr_size());
*sim_msg_param(g, 0) = index;
*sim_msg_param(g, 4) = sizeof(u32);
data_offset = roundup(0xc + pathlen + 1, sizeof(u32));
*sim_msg_param(g, 8) = data_offset;
strcpy((char *)sim_msg_param(g, 0xc), path);
err = issue_rpc_and_wait(g);
if (!err)
memcpy(data, sim_msg_param(g, data_offset), sizeof(u32));
return err;
}
int nvgpu_init_sim_support(struct gk20a *g)
{
int err = -ENOMEM;
u64 phys;
if (!g->sim)
return 0;
/* allocate sim event/msg buffers */
err = nvgpu_alloc_sim_buffer(g, &g->sim->send_bfr);
err = err || nvgpu_alloc_sim_buffer(g, &g->sim->recv_bfr);
err = err || nvgpu_alloc_sim_buffer(g, &g->sim->msg_bfr);
if (err)
goto fail;
/*mark send ring invalid*/
sim_writel(g->sim, sim_send_ring_r(), sim_send_ring_status_invalid_f());
/*read get pointer and make equal to put*/
g->sim->send_ring_put = sim_readl(g->sim, sim_send_get_r());
sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
/*write send ring address and make it valid*/
phys = nvgpu_mem_get_addr(g, &g->sim->send_bfr);
sim_writel(g->sim, sim_send_ring_hi_r(),
sim_send_ring_hi_addr_f(u64_hi32(phys)));
sim_writel(g->sim, sim_send_ring_r(),
sim_send_ring_status_valid_f() |
sim_send_ring_target_phys_pci_coherent_f() |
sim_send_ring_size_4kb_f() |
sim_send_ring_addr_lo_f(phys >> PAGE_SHIFT));
/*repeat for recv ring (but swap put,get as roles are opposite) */
sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f());
/*read put pointer and make equal to get*/
g->sim->recv_ring_get = sim_readl(g->sim, sim_recv_put_r());
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
/*write send ring address and make it valid*/
phys = nvgpu_mem_get_addr(g, &g->sim->recv_bfr);
sim_writel(g->sim, sim_recv_ring_hi_r(),
sim_recv_ring_hi_addr_f(u64_hi32(phys)));
sim_writel(g->sim, sim_recv_ring_r(),
sim_recv_ring_status_valid_f() |
sim_recv_ring_target_phys_pci_coherent_f() |
sim_recv_ring_size_4kb_f() |
sim_recv_ring_addr_lo_f(phys >> PAGE_SHIFT));
g->sim->remove_support = nvgpu_remove_sim_support;
g->sim->esc_readl = nvgpu_sim_esc_readl;
return 0;
fail:
nvgpu_free_sim_support(g);
return err;
}

View File

@@ -0,0 +1,260 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/log.h>
#include <nvgpu/bitops.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/dma.h>
#include <nvgpu/hw_sim_pci.h>
#include "gk20a/gk20a.h"
#include "linux/sim.h"
static inline u32 sim_msg_header_size(void)
{
return 32U;
}
static inline u32 *sim_msg_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)g->sim->msg_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static inline u32 *sim_msg_hdr(struct gk20a *g, u32 byte_offset)
{
return sim_msg_bfr(g, byte_offset); /* starts at 0 */
}
static inline u32 *sim_msg_param(struct gk20a *g, u32 byte_offset)
{
/* starts after msg header/cmn */
return sim_msg_bfr(g, byte_offset + sim_msg_header_size());
}
static inline void sim_write_hdr(struct gk20a *g, u32 func, u32 size)
{
*sim_msg_hdr(g, sim_msg_header_version_r()) =
sim_msg_header_version_major_tot_v() |
sim_msg_header_version_minor_tot_v();
*sim_msg_hdr(g, sim_msg_signature_r()) = sim_msg_signature_valid_v();
*sim_msg_hdr(g, sim_msg_result_r()) = sim_msg_result_rpc_pending_v();
*sim_msg_hdr(g, sim_msg_spare_r()) = sim_msg_spare__init_v();
*sim_msg_hdr(g, sim_msg_function_r()) = func;
*sim_msg_hdr(g, sim_msg_length_r()) = size + sim_msg_header_size();
}
static inline u32 sim_escape_read_hdr_size(void)
{
return 12U;
}
static u32 *sim_send_ring_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)g->sim->send_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static int rpc_send_message(struct gk20a *g)
{
/* calculations done in units of u32s */
u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2;
u32 dma_offset = send_base + sim_dma_r()/sizeof(u32);
u32 dma_hi_offset = send_base + sim_dma_hi_r()/sizeof(u32);
*sim_send_ring_bfr(g, dma_offset*sizeof(u32)) =
sim_dma_target_phys_pci_coherent_f() |
sim_dma_status_valid_f() |
sim_dma_size_4kb_f() |
sim_dma_addr_lo_f(nvgpu_mem_get_addr(g, &g->sim->msg_bfr)
>> PAGE_SHIFT);
*sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) =
u64_hi32(nvgpu_mem_get_addr(g, &g->sim->msg_bfr));
*sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++;
g->sim->send_ring_put = (g->sim->send_ring_put + 2 * sizeof(u32)) %
PAGE_SIZE;
/* Update the put pointer. This will trap into the host. */
sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
return 0;
}
static inline u32 *sim_recv_ring_bfr(struct gk20a *g, u32 byte_offset)
{
u8 *cpu_va;
cpu_va = (u8 *)g->sim->recv_bfr.cpu_va;
return (u32 *)(cpu_va + byte_offset);
}
static int rpc_recv_poll(struct gk20a *g)
{
u64 recv_phys_addr;
/* Poll the recv ring get pointer in an infinite loop */
do {
g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
} while (g->sim->recv_ring_put == g->sim->recv_ring_get);
/* process all replies */
while (g->sim->recv_ring_put != g->sim->recv_ring_get) {
/* these are in u32 offsets */
u32 dma_lo_offset =
sim_recv_put_pointer_v(g->sim->recv_ring_get)*2 + 0;
u32 dma_hi_offset = dma_lo_offset + 1;
u32 recv_phys_addr_lo = sim_dma_addr_lo_v(
*sim_recv_ring_bfr(g, dma_lo_offset*4));
u32 recv_phys_addr_hi = sim_dma_hi_addr_v(
*sim_recv_ring_bfr(g, dma_hi_offset*4));
recv_phys_addr = (u64)recv_phys_addr_hi << 32 |
(u64)recv_phys_addr_lo << PAGE_SHIFT;
if (recv_phys_addr != nvgpu_mem_get_addr(g, &g->sim->msg_bfr)) {
nvgpu_err(g, "Error in RPC reply");
return -EINVAL;
}
/* Update GET pointer */
g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*sizeof(u32))
% PAGE_SIZE;
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
g->sim->recv_ring_put = sim_readl(g->sim, sim_recv_put_r());
}
return 0;
}
static int issue_rpc_and_wait(struct gk20a *g)
{
int err;
err = rpc_send_message(g);
if (err) {
nvgpu_err(g, "failed rpc_send_message");
return err;
}
err = rpc_recv_poll(g);
if (err) {
nvgpu_err(g, "failed rpc_recv_poll");
return err;
}
/* Now check if RPC really succeeded */
if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
nvgpu_err(g, "received failed status!");
return -EINVAL;
}
return 0;
}
static int nvgpu_sim_esc_readl(struct gk20a *g,
char *path, u32 index, u32 *data)
{
int err;
size_t pathlen = strlen(path);
u32 data_offset;
sim_write_hdr(g, sim_msg_function_sim_escape_read_v(),
sim_escape_read_hdr_size());
*sim_msg_param(g, 0) = index;
*sim_msg_param(g, 4) = sizeof(u32);
data_offset = roundup(pathlen + 1, sizeof(u32));
*sim_msg_param(g, 8) = data_offset;
strcpy((char *)sim_msg_param(g, 0xc), path);
err = issue_rpc_and_wait(g);
if (!err)
memcpy(data, sim_msg_param(g, data_offset + 0xc), sizeof(u32));
return err;
}
int nvgpu_init_sim_support_pci(struct gk20a *g)
{
int err = -ENOMEM;
u64 phys;
if (!g->sim)
return 0;
/* allocate sim event/msg buffers */
err = nvgpu_alloc_sim_buffer(g, &g->sim->send_bfr);
err = err || nvgpu_alloc_sim_buffer(g, &g->sim->recv_bfr);
err = err || nvgpu_alloc_sim_buffer(g, &g->sim->msg_bfr);
if (err)
goto fail;
/* mark send ring invalid */
sim_writel(g->sim, sim_send_ring_r(), sim_send_ring_status_invalid_f());
/* read get pointer and make equal to put */
g->sim->send_ring_put = sim_readl(g->sim, sim_send_get_r());
sim_writel(g->sim, sim_send_put_r(), g->sim->send_ring_put);
/* write send ring address and make it valid */
phys = nvgpu_mem_get_addr(g, &g->sim->send_bfr);
sim_writel(g->sim, sim_send_ring_hi_r(),
sim_send_ring_hi_addr_f(u64_hi32(phys)));
sim_writel(g->sim, sim_send_ring_r(),
sim_send_ring_status_valid_f() |
sim_send_ring_target_phys_pci_coherent_f() |
sim_send_ring_size_4kb_f() |
sim_send_ring_addr_lo_f(phys >> PAGE_SHIFT));
/* repeat for recv ring (but swap put,get as roles are opposite) */
sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f());
/* read put pointer and make equal to get */
g->sim->recv_ring_get = sim_readl(g->sim, sim_recv_put_r());
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
/* write send ring address and make it valid */
phys = nvgpu_mem_get_addr(g, &g->sim->recv_bfr);
sim_writel(g->sim, sim_recv_ring_hi_r(),
sim_recv_ring_hi_addr_f(u64_hi32(phys)));
sim_writel(g->sim, sim_recv_ring_r(),
sim_recv_ring_status_valid_f() |
sim_recv_ring_target_phys_pci_coherent_f() |
sim_recv_ring_size_4kb_f() |
sim_recv_ring_addr_lo_f(phys >> PAGE_SHIFT));
g->sim->remove_support = nvgpu_remove_sim_support;
g->sim->esc_readl = nvgpu_sim_esc_readl;
return 0;
fail:
nvgpu_free_sim_support(g);
return err;
}

View File

@@ -28,7 +28,7 @@ struct gk20a;
struct fifo_gk20a; struct fifo_gk20a;
struct channel_gk20a; struct channel_gk20a;
struct gr_gk20a; struct gr_gk20a;
struct sim_gk20a; struct sim_nvgpu;
struct gk20a_ctxsw_ucode_segments; struct gk20a_ctxsw_ucode_segments;
struct gk20a_fecs_trace; struct gk20a_fecs_trace;
struct gk20a_ctxsw_trace; struct gk20a_ctxsw_trace;
@@ -1287,7 +1287,7 @@ struct gk20a {
struct fifo_gk20a fifo; struct fifo_gk20a fifo;
struct nvgpu_nvlink_dev nvlink; struct nvgpu_nvlink_dev nvlink;
struct gr_gk20a gr; struct gr_gk20a gr;
struct sim_gk20a *sim; struct sim_nvgpu *sim;
struct mm_gk20a mm; struct mm_gk20a mm;
struct nvgpu_pmu pmu; struct nvgpu_pmu pmu;
struct acr_desc acr; struct acr_desc acr;

View File

@@ -28,7 +28,7 @@
struct gk20a; struct gk20a;
struct sim_gk20a { struct sim_nvgpu {
struct gk20a *g; struct gk20a *g;
u32 send_ring_put; u32 send_ring_put;
u32 recv_ring_get; u32 recv_ring_get;
@@ -37,7 +37,7 @@ struct sim_gk20a {
struct nvgpu_mem send_bfr; struct nvgpu_mem send_bfr;
struct nvgpu_mem recv_bfr; struct nvgpu_mem recv_bfr;
struct nvgpu_mem msg_bfr; struct nvgpu_mem msg_bfr;
void (*remove_support)(struct sim_gk20a *); void (*remove_support)(struct gk20a *);
int (*esc_readl)( int (*esc_readl)(
struct gk20a *g, char *path, u32 index, u32 *data); struct gk20a *g, char *path, u32 index, u32 *data);
}; };

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012-2017, NVIDIA Corporation. * Copyright (c) 2012-2018, NVIDIA Corporation.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,8 +53,8 @@
* of register <x>. * of register <x>.
*/ */
#ifndef __hw_sim_gk20a_h__ #ifndef __hw_sim_h__
#define __hw_sim_gk20a_h__ #define __hw_sim_h__
/*This file is autogenerated. Do not edit. */ /*This file is autogenerated. Do not edit. */
static inline u32 sim_send_ring_r(void) static inline u32 sim_send_ring_r(void)
@@ -2150,4 +2150,4 @@ static inline u32 sim_msg_spare__init_v(void)
return 0x00000000; return 0x00000000;
} }
#endif /* __hw_sim_gk20a_h__ */ #endif /* __hw_sim__ */