mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
gpu: nvgpu: vgpu: move to nvgpu_err/info from dev_err/info
It helps code be more portable. Jira EVLR-2364 Change-Id: I0cc1fa739d7884d3c863975f08b3b592acd34613 Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649941 Reviewed-by: Aingara Paramakuru <aparamakuru@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Nirav Patel <nipatel@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
26f22226ba
commit
0e39ad429e
@@ -54,7 +54,7 @@ int vgpu_fecs_trace_init(struct gk20a *g)
|
||||
err = of_parse_phandle_with_fixed_args(np,
|
||||
"mempool-fecs-trace", 1, 0, &args);
|
||||
if (err) {
|
||||
dev_info(dev_from_gk20a(g), "does not support fecs trace\n");
|
||||
nvgpu_info(g, "does not support fecs trace");
|
||||
goto fail;
|
||||
}
|
||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true);
|
||||
@@ -62,8 +62,8 @@ int vgpu_fecs_trace_init(struct gk20a *g)
|
||||
mempool = args.args[0];
|
||||
vcst->cookie = vgpu_ivm_mempool_reserve(mempool);
|
||||
if (IS_ERR(vcst->cookie)) {
|
||||
dev_info(dev_from_gk20a(g),
|
||||
"mempool %u reserve failed\n", mempool);
|
||||
nvgpu_info(g,
|
||||
"mempool %u reserve failed", mempool);
|
||||
vcst->cookie = NULL;
|
||||
err = -EINVAL;
|
||||
goto fail;
|
||||
@@ -72,15 +72,14 @@ int vgpu_fecs_trace_init(struct gk20a *g)
|
||||
vcst->buf = ioremap_cache(vgpu_ivm_get_ipa(vcst->cookie),
|
||||
vgpu_ivm_get_size(vcst->cookie));
|
||||
if (!vcst->buf) {
|
||||
dev_info(dev_from_gk20a(g), "ioremap_cache failed\n");
|
||||
nvgpu_info(g, "ioremap_cache failed");
|
||||
err = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
vcst->header = vcst->buf;
|
||||
vcst->num_entries = vcst->header->num_ents;
|
||||
if (unlikely(vcst->header->ent_size != sizeof(*vcst->entries))) {
|
||||
dev_err(dev_from_gk20a(g),
|
||||
"entry size mismatch\n");
|
||||
nvgpu_err(g, "entry size mismatch");
|
||||
goto fail;
|
||||
}
|
||||
vcst->entries = vcst->buf + sizeof(*vcst->header);
|
||||
|
||||
@@ -147,7 +147,6 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
|
||||
u32 gpfifo_entries,
|
||||
unsigned long acquire_timeout, u32 flags)
|
||||
{
|
||||
struct device __maybe_unused *d = dev_from_gk20a(ch->g);
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc;
|
||||
int err;
|
||||
@@ -211,7 +210,6 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
|
||||
static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
|
||||
{
|
||||
struct fifo_runlist_info_gk20a *runlist;
|
||||
struct device *d = dev_from_gk20a(g);
|
||||
unsigned int runlist_id = -1;
|
||||
u32 i;
|
||||
u64 runlist_size;
|
||||
@@ -242,7 +240,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
|
||||
int err = nvgpu_dma_alloc_sys(g, runlist_size,
|
||||
&runlist->mem[i]);
|
||||
if (err) {
|
||||
dev_err(d, "memory allocation failed\n");
|
||||
nvgpu_err(g, "memory allocation failed");
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
}
|
||||
@@ -266,7 +264,6 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct device *d = dev_from_gk20a(g);
|
||||
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
|
||||
unsigned int chid;
|
||||
int err = 0;
|
||||
@@ -287,7 +284,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
|
||||
err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels,
|
||||
&f->userd);
|
||||
if (err) {
|
||||
dev_err(d, "memory allocation failed\n");
|
||||
nvgpu_err(g, "memory allocation failed");
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
@@ -296,7 +293,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
|
||||
f->userd.gpu_va = vgpu_bar1_map(g, &f->userd.priv.sgt,
|
||||
f->userd.size);
|
||||
if (!f->userd.gpu_va) {
|
||||
dev_err(d, "gmmu mapping failed\n");
|
||||
nvgpu_err(g, "gmmu mapping failed");
|
||||
goto clean_up;
|
||||
}
|
||||
/* if reduced BAR1 range is specified, use offset of 0
|
||||
|
||||
@@ -35,12 +35,12 @@ static int gv11b_vgpu_probe(struct device *dev)
|
||||
|
||||
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usermode");
|
||||
if (!r) {
|
||||
dev_err(dev, "failed to get usermode regs\n");
|
||||
nvgpu_err(g, "failed to get usermode regs");
|
||||
return -ENXIO;
|
||||
}
|
||||
regs = devm_ioremap_resource(dev, r);
|
||||
if (IS_ERR(regs)) {
|
||||
dev_err(dev, "failed to map usermode regs\n");
|
||||
nvgpu_err(g, "failed to map usermode regs");
|
||||
return PTR_ERR(regs);
|
||||
}
|
||||
l->usermode_regs = regs;
|
||||
@@ -56,7 +56,7 @@ static int gv11b_vgpu_probe(struct device *dev)
|
||||
&g->syncpt_unit_base,
|
||||
&g->syncpt_unit_size);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to get syncpt interface");
|
||||
nvgpu_err(g, "Failed to get syncpt interface");
|
||||
return -ENOSYS;
|
||||
}
|
||||
g->syncpt_size = nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(1);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Virtualized GPU Memory Management
|
||||
*
|
||||
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -98,8 +98,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
if (va_allocated) {
|
||||
err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
|
||||
if (err) {
|
||||
dev_err(dev_from_vm(vm),
|
||||
"failed to free va");
|
||||
nvgpu_err(g, "failed to free va");
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -110,8 +109,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
p->gpu_va = vaddr;
|
||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||
if (err || msg.ret)
|
||||
dev_err(dev_from_vm(vm),
|
||||
"failed to update gmmu ptes on unmap");
|
||||
nvgpu_err(g, "failed to update gmmu ptes on unmap");
|
||||
|
||||
/* TLB invalidate handled on server side */
|
||||
}
|
||||
|
||||
@@ -685,29 +685,29 @@ int vgpu_probe(struct platform_device *pdev)
|
||||
err = platform->probe(dev);
|
||||
if (err) {
|
||||
if (err == -EPROBE_DEFER)
|
||||
dev_info(dev, "platform probe failed");
|
||||
nvgpu_info(gk20a, "platform probe failed");
|
||||
else
|
||||
dev_err(dev, "platform probe failed");
|
||||
nvgpu_err(gk20a, "platform probe failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
if (platform->late_probe) {
|
||||
err = platform->late_probe(dev);
|
||||
if (err) {
|
||||
dev_err(dev, "late probe failed");
|
||||
nvgpu_err(gk20a, "late probe failed");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
err = vgpu_comm_init(gk20a);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to init comm interface\n");
|
||||
nvgpu_err(gk20a, "failed to init comm interface");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
priv->virt_handle = vgpu_connect();
|
||||
if (!priv->virt_handle) {
|
||||
dev_err(dev, "failed to connect to server node\n");
|
||||
nvgpu_err(gk20a, "failed to connect to server node");
|
||||
vgpu_comm_deinit();
|
||||
return -ENOSYS;
|
||||
}
|
||||
@@ -720,7 +720,7 @@ int vgpu_probe(struct platform_device *pdev)
|
||||
|
||||
err = vgpu_pm_init(dev);
|
||||
if (err) {
|
||||
dev_err(dev, "pm init failed");
|
||||
nvgpu_err(gk20a, "pm init failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -53,23 +53,18 @@ static inline struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g)
|
||||
return vgpu_get_priv_data_from_dev(dev_from_gk20a(g));
|
||||
}
|
||||
|
||||
static inline u64 vgpu_get_handle_from_dev(struct device *dev)
|
||||
static inline u64 vgpu_get_handle(struct gk20a *g)
|
||||
{
|
||||
struct vgpu_priv_data *priv = vgpu_get_priv_data_from_dev(dev);
|
||||
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
|
||||
|
||||
if (unlikely(!priv)) {
|
||||
dev_err(dev, "invalid vgpu_priv_data in %s\n", __func__);
|
||||
nvgpu_err(g, "invalid vgpu_priv_data in %s", __func__);
|
||||
return INT_MAX;
|
||||
}
|
||||
|
||||
return priv->virt_handle;
|
||||
}
|
||||
|
||||
static inline u64 vgpu_get_handle(struct gk20a *g)
|
||||
{
|
||||
return vgpu_get_handle_from_dev(dev_from_gk20a(g));
|
||||
}
|
||||
|
||||
int vgpu_pm_prepare_poweroff(struct device *dev);
|
||||
int vgpu_pm_finalize_poweron(struct device *dev);
|
||||
int vgpu_probe(struct platform_device *dev);
|
||||
|
||||
Reference in New Issue
Block a user