gpu: nvgpu: gk20a: Use new error macro

gk20a_err() and gk20a_warn() require a struct device pointer,
which is not portable across operating systems. The new nvgpu_err()
and nvgpu_warn() macros take struct gk20a pointer. Convert code
to use the more portable macros.

JIRA NVGPU-16

Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1331694
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Terje Bergstrom
2017-03-30 07:44:03 -07:00
committed by mobile promotions
parent 2be51206af
commit 3ba374a5d9
28 changed files with 608 additions and 636 deletions

View File

@@ -28,6 +28,8 @@
#include "gk20a/gk20a.h"
#include "gk20a/fence_gk20a.h"
#include <nvgpu/log.h>
#define HZ_TO_MHZ(a) ((a > 0xF414F9CD7) ? 0xffff : (a >> 32) ? \
(u32) ((a * 0x10C8ULL) >> 32) : (u16) ((u32) a/MHZ))
#define MHZ_TO_HZ(a) ((u64)a * MHZ)
@@ -352,7 +354,7 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
struct nvgpu_gpu_mmu_debug_mode_args *args)
{
if (gk20a_busy(g)) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu\n");
nvgpu_err(g, "failed to power on gpu\n");
return -EINVAL;
}
@@ -521,7 +523,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
unsigned int i = 0;
if (gk20a_busy(g)) {
gk20a_err(dev_from_gk20a(g), "GPU not powered on\n");
nvgpu_err(g, "GPU not powered on\n");
err = -EINVAL;
goto end;
}
@@ -560,7 +562,7 @@ static int nvgpu_gpu_get_cpu_time_correlation_info(
get_cpu_timestamp = get_cpu_timestamp_timeofday;
break;
default:
gk20a_err(dev_from_gk20a(g), "invalid cpu clock source id\n");
nvgpu_err(g, "invalid cpu clock source id\n");
return -EINVAL;
}
@@ -625,7 +627,7 @@ static int nvgpu_gpu_get_engine_info(
break;
default:
gk20a_err(dev_from_gk20a(g), "Unmapped engine enum %u\n",
nvgpu_err(g, "Unmapped engine enum %u\n",
engine_enum);
continue;
}
@@ -677,7 +679,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
if (align > roundup_pow_of_two(args->in.size)) {
/* log this special case, buddy allocator detail */
gk20a_warn(dev_from_gk20a(g),
nvgpu_warn(g,
"alignment larger than buffer size rounded up to power of 2 is not supported");
return -EINVAL;
}
@@ -1510,7 +1512,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
break;
default:
dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
gk20a_dbg_info("unrecognized gpu ioctl cmd: 0x%x", cmd);
err = -ENOTTY;
break;
}

View File

@@ -17,6 +17,7 @@
#include <soc/tegra/chip-id.h>
#include <nvgpu/page_allocator.h>
#include <nvgpu/log.h>
#include "gk20a.h"
@@ -126,7 +127,7 @@ int gk20a_read_ptimer(struct gk20a *g, u64 *value)
}
/* too many iterations, bail out */
gk20a_err(dev_from_gk20a(g), "failed to read ptimer");
nvgpu_err(g, "failed to read ptimer");
return -EBUSY;
}

View File

@@ -28,6 +28,7 @@
#include <nvgpu/timers.h>
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "channel_gk20a.h"
@@ -228,19 +229,20 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
struct gk20a_cde_hdr_buf *buf)
{
struct nvgpu_mem *mem;
struct gk20a *g = cde_ctx->g;
int err;
/* check that the file can hold the buf */
if (buf->data_byte_offset != 0 &&
buf->data_byte_offset + buf->num_bytes > img->size) {
gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d",
nvgpu_warn(g, "cde: invalid data section. buffer idx = %d",
cde_ctx->num_bufs);
return -EINVAL;
}
/* check that we have enough buf elems available */
if (cde_ctx->num_bufs >= MAX_CDE_BUFS) {
gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d",
nvgpu_warn(g, "cde: invalid data section. buffer idx = %d",
cde_ctx->num_bufs);
return -ENOMEM;
}
@@ -249,7 +251,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
mem = cde_ctx->mem + cde_ctx->num_bufs;
err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem);
if (err) {
gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d",
nvgpu_warn(g, "cde: could not allocate device memory. buffer idx = %d",
cde_ctx->num_bufs);
return -ENOMEM;
}
@@ -267,6 +269,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target,
int type, s32 shift, u64 mask, u64 value)
{
struct gk20a *g = cde_ctx->g;
u32 *target_mem_ptr = target;
u64 *target_mem_ptr_u64 = target;
u64 current_value, new_value;
@@ -287,7 +290,7 @@ static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target,
current_value = (u64)(current_value >> 32) |
(u64)(current_value << 32);
} else {
gk20a_warn(cde_ctx->dev, "cde: unknown type. type=%d",
nvgpu_warn(g, "cde: unknown type. type=%d",
type);
return -EINVAL;
}
@@ -315,13 +318,14 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
{
struct nvgpu_mem *source_mem;
struct nvgpu_mem *target_mem;
struct gk20a *g = cde_ctx->g;
u32 *target_mem_ptr;
u64 vaddr;
int err;
if (replace->target_buf >= cde_ctx->num_bufs ||
replace->source_buf >= cde_ctx->num_bufs) {
gk20a_warn(cde_ctx->dev, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d",
nvgpu_warn(g, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d",
replace->target_buf, replace->source_buf,
cde_ctx->num_bufs);
return -EINVAL;
@@ -333,7 +337,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
if (source_mem->size < (replace->source_byte_offset + 3) ||
target_mem->size < (replace->target_byte_offset + 3)) {
gk20a_warn(cde_ctx->dev, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu",
nvgpu_warn(g, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu",
replace->target_byte_offset,
replace->source_byte_offset,
source_mem->size,
@@ -350,7 +354,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
replace->shift, replace->mask,
vaddr);
if (err) {
gk20a_warn(cde_ctx->dev, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld",
nvgpu_warn(g, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld",
err, replace->target_buf,
replace->target_byte_offset,
replace->source_buf,
@@ -438,7 +442,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
param->shift, param->mask, new_data);
if (err) {
gk20a_warn(cde_ctx->dev, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu",
nvgpu_warn(g, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu",
err, i, param->id, param->target_buf,
param->target_byte_offset, new_data);
return err;
@@ -453,9 +457,10 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
struct gk20a_cde_hdr_param *param)
{
struct nvgpu_mem *target_mem;
struct gk20a *g = cde_ctx->g;
if (param->target_buf >= cde_ctx->num_bufs) {
gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u",
nvgpu_warn(g, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u",
cde_ctx->num_params, param->target_buf,
cde_ctx->num_bufs);
return -EINVAL;
@@ -463,7 +468,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
target_mem = cde_ctx->mem + param->target_buf;
if (target_mem->size < (param->target_byte_offset + 3)) {
gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu",
nvgpu_warn(g, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu",
cde_ctx->num_params, param->target_byte_offset,
target_mem->size);
return -EINVAL;
@@ -471,14 +476,14 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
/* does this parameter fit into our parameter structure */
if (cde_ctx->num_params >= MAX_CDE_PARAMS) {
gk20a_warn(cde_ctx->dev, "cde: no room for new parameters param idx = %d",
nvgpu_warn(g, "cde: no room for new parameters param idx = %d",
cde_ctx->num_params);
return -ENOMEM;
}
/* is the given id valid? */
if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) {
gk20a_warn(cde_ctx->dev, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u",
nvgpu_warn(g, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u",
param->id, cde_ctx->num_params,
NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS);
return -EINVAL;
@@ -494,6 +499,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
const struct firmware *img,
u32 required_class)
{
struct gk20a *g = cde_ctx->g;
struct nvgpu_alloc_obj_ctx_args alloc_obj_ctx;
int err;
@@ -505,7 +511,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx);
if (err) {
gk20a_warn(cde_ctx->dev, "cde: failed to allocate ctx. err=%d",
nvgpu_warn(g, "cde: failed to allocate ctx. err=%d",
err);
return err;
}
@@ -519,6 +525,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
struct gk20a_cde_cmd_elem *cmd_elem,
u32 num_elems)
{
struct gk20a *g = cde_ctx->g;
struct nvgpu_gpfifo **gpfifo, *gpfifo_elem;
u32 *num_entries;
unsigned int i;
@@ -531,7 +538,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
gpfifo = &cde_ctx->convert_cmd;
num_entries = &cde_ctx->convert_cmd_num_entries;
} else {
gk20a_warn(cde_ctx->dev, "cde: unknown command. op=%u",
nvgpu_warn(g, "cde: unknown command. op=%u",
op);
return -EINVAL;
}
@@ -540,7 +547,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
*gpfifo = nvgpu_kzalloc(cde_ctx->g,
sizeof(struct nvgpu_gpfifo) * num_elems);
if (!*gpfifo) {
gk20a_warn(cde_ctx->dev, "cde: could not allocate memory for gpfifo entries");
nvgpu_warn(g, "cde: could not allocate memory for gpfifo entries");
return -ENOMEM;
}
@@ -550,7 +557,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
/* validate the current entry */
if (cmd_elem->target_buf >= cde_ctx->num_bufs) {
gk20a_warn(cde_ctx->dev, "cde: target buffer is not available (target=%u, num_bufs=%u)",
nvgpu_warn(g, "cde: target buffer is not available (target=%u, num_bufs=%u)",
cmd_elem->target_buf, cde_ctx->num_bufs);
return -EINVAL;
}
@@ -558,7 +565,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
target_mem = cde_ctx->mem + cmd_elem->target_buf;
if (target_mem->size<
cmd_elem->target_byte_offset + cmd_elem->num_bytes) {
gk20a_warn(cde_ctx->dev, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)",
nvgpu_warn(g, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)",
target_mem->size,
cmd_elem->target_byte_offset,
cmd_elem->num_bytes);
@@ -582,6 +589,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
{
struct gk20a *g = cde_ctx->g;
unsigned long init_bytes = cde_ctx->init_cmd_num_entries *
sizeof(struct nvgpu_gpfifo);
unsigned long conv_bytes = cde_ctx->convert_cmd_num_entries *
@@ -592,7 +600,7 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
/* allocate buffer that has space for both */
combined_cmd = nvgpu_kzalloc(cde_ctx->g, total_bytes);
if (!combined_cmd) {
gk20a_warn(cde_ctx->dev,
nvgpu_warn(g,
"cde: could not allocate memory for gpfifo entries");
return -ENOMEM;
}
@@ -615,6 +623,7 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
const struct firmware *img)
{
struct gk20a *g = cde_ctx->g;
struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
u32 *data = (u32 *)img->data;
u32 num_of_elems;
@@ -625,7 +634,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
min_size += 2 * sizeof(u32);
if (img->size < min_size) {
gk20a_warn(cde_ctx->dev, "cde: invalid image header");
nvgpu_warn(g, "cde: invalid image header");
return -EINVAL;
}
@@ -634,7 +643,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
min_size += num_of_elems * sizeof(*elem);
if (img->size < min_size) {
gk20a_warn(cde_ctx->dev, "cde: bad image");
nvgpu_warn(g, "cde: bad image");
return -EINVAL;
}
@@ -671,7 +680,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
MAX_CDE_ARRAY_ENTRIES*sizeof(u32));
break;
default:
gk20a_warn(cde_ctx->dev, "cde: unknown header element");
nvgpu_warn(g, "cde: unknown header element");
err = -EINVAL;
}
@@ -682,13 +691,13 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
}
if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) {
gk20a_warn(cde_ctx->dev, "cde: convert command not defined");
nvgpu_warn(g, "cde: convert command not defined");
err = -EINVAL;
goto deinit_image;
}
if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) {
gk20a_warn(cde_ctx->dev, "cde: convert command not defined");
nvgpu_warn(g, "cde: convert command not defined");
err = -EINVAL;
goto deinit_image;
}
@@ -708,6 +717,7 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
u32 op, struct nvgpu_fence *fence,
u32 flags, struct gk20a_fence **fence_out)
{
struct gk20a *g = cde_ctx->g;
struct nvgpu_gpfifo *gpfifo = NULL;
int num_entries = 0;
@@ -721,12 +731,12 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
gpfifo = cde_ctx->convert_cmd;
num_entries = cde_ctx->convert_cmd_num_entries;
} else {
gk20a_warn(cde_ctx->dev, "cde: unknown buffer");
nvgpu_warn(g, "cde: unknown buffer");
return -EINVAL;
}
if (gpfifo == NULL || num_entries == 0) {
gk20a_warn(cde_ctx->dev, "cde: buffer not available");
nvgpu_warn(g, "cde: buffer not available");
return -ENOSYS;
}
@@ -765,7 +775,6 @@ __releases(&cde_app->mutex)
struct gk20a_cde_ctx *cde_ctx = container_of(delay_work,
struct gk20a_cde_ctx, ctx_deleter_work);
struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
struct device *dev = cde_ctx->dev;
struct gk20a *g = cde_ctx->g;
int err;
@@ -780,7 +789,7 @@ __releases(&cde_app->mutex)
if (err) {
/* this context would find new use anyway later, so not freeing
* here does not leak anything */
gk20a_warn(dev, "cde: cannot set gk20a on, postponing"
nvgpu_warn(g, "cde: cannot set gk20a on, postponing"
" temp ctx deletion");
return;
}
@@ -848,7 +857,7 @@ __must_hold(&cde_app->mutex)
cde_ctx = gk20a_cde_allocate_context(g);
if (IS_ERR(cde_ctx)) {
gk20a_warn(g->dev, "cde: cannot allocate context: %ld",
nvgpu_warn(g, "cde: cannot allocate context: %ld",
PTR_ERR(cde_ctx));
return cde_ctx;
}
@@ -1023,7 +1032,7 @@ __releases(&cde_app->mutex)
surface = dma_buf_vmap(compbits_scatter_buf);
if (IS_ERR(surface)) {
gk20a_warn(g->dev,
nvgpu_warn(g,
"dma_buf_vmap failed");
err = -EINVAL;
goto exit_unmap_vaddr;
@@ -1035,7 +1044,7 @@ __releases(&cde_app->mutex)
surface, scatter_buffer);
sgt = gk20a_mm_pin(g->dev, compbits_scatter_buf);
if (IS_ERR(sgt)) {
gk20a_warn(g->dev,
nvgpu_warn(g,
"mm_pin failed");
err = -EINVAL;
goto exit_unmap_surface;
@@ -1083,7 +1092,7 @@ __releases(&cde_app->mutex)
int id = param->id - NUM_RESERVED_PARAMS;
if (id < 0 || id >= MAX_CDE_USER_PARAMS) {
gk20a_warn(cde_ctx->dev, "cde: unknown user parameter");
nvgpu_warn(g, "cde: unknown user parameter");
err = -EINVAL;
goto exit_unmap_surface;
}
@@ -1093,7 +1102,7 @@ __releases(&cde_app->mutex)
/* patch data */
err = gk20a_cde_patch_params(cde_ctx);
if (err) {
gk20a_warn(cde_ctx->dev, "cde: failed to patch parameters");
nvgpu_warn(g, "cde: failed to patch parameters");
goto exit_unmap_surface;
}
@@ -1160,20 +1169,19 @@ __releases(&cde_app->mutex)
if (ch->has_timedout) {
if (cde_ctx->is_temporary) {
gk20a_warn(cde_ctx->dev,
nvgpu_warn(g,
"cde: channel had timed out"
" (temporary channel)");
/* going to be deleted anyway */
} else {
gk20a_warn(cde_ctx->dev,
nvgpu_warn(g,
"cde: channel had timed out"
", reloading");
/* mark it to be deleted, replace with a new one */
nvgpu_mutex_acquire(&cde_app->mutex);
cde_ctx->is_temporary = true;
if (gk20a_cde_create_context(g)) {
gk20a_err(cde_ctx->dev,
"cde: can't replace context");
nvgpu_err(g, "cde: can't replace context");
}
nvgpu_mutex_release(&cde_app->mutex);
}
@@ -1201,7 +1209,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
img = nvgpu_request_firmware(g, "gpu2cde.bin", 0);
if (!img) {
dev_err(cde_ctx->dev, "cde: could not fetch the firmware");
nvgpu_err(g, "cde: could not fetch the firmware");
return -ENOSYS;
}
@@ -1210,7 +1218,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
-1,
false);
if (!ch) {
gk20a_warn(cde_ctx->dev, "cde: gk20a channel not available");
nvgpu_warn(g, "cde: gk20a channel not available");
err = -ENOMEM;
goto err_get_gk20a_channel;
}
@@ -1218,14 +1226,14 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
/* bind the channel to the vm */
err = __gk20a_vm_bind_channel(&g->mm.cde.vm, ch);
if (err) {
gk20a_warn(cde_ctx->dev, "cde: could not bind vm");
nvgpu_warn(g, "cde: could not bind vm");
goto err_commit_va;
}
/* allocate gpfifo (1024 should be more than enough) */
err = gk20a_channel_alloc_gpfifo(ch, 1024, 0, 0);
if (err) {
gk20a_warn(cde_ctx->dev, "cde: unable to allocate gpfifo");
nvgpu_warn(g, "cde: unable to allocate gpfifo");
goto err_alloc_gpfifo;
}
@@ -1238,7 +1246,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
gr->compbit_store.mem.aperture);
if (!vaddr) {
gk20a_warn(cde_ctx->dev, "cde: cannot map compression bit backing store");
nvgpu_warn(g, "cde: cannot map compression bit backing store");
err = -ENOMEM;
goto err_map_backingstore;
}
@@ -1251,7 +1259,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
/* initialise the firmware */
err = gk20a_init_cde_img(cde_ctx, img);
if (err) {
gk20a_warn(cde_ctx->dev, "cde: image initialisation failed");
nvgpu_warn(g, "cde: image initialisation failed");
goto err_init_cde_img;
}
@@ -1268,8 +1276,7 @@ err_alloc_gpfifo:
err_commit_va:
err_get_gk20a_channel:
release_firmware(img);
dev_err(cde_ctx->dev, "cde: couldn't initialise buffer converter: %d",
err);
nvgpu_err(g, "cde: couldn't initialise buffer converter: %d", err);
return err;
}
@@ -1413,17 +1420,17 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
g->ops.cde.get_program_numbers(g, block_height_log2,
&hprog, &vprog);
else {
gk20a_warn(g->dev, "cde: chip not supported");
nvgpu_warn(g, "cde: chip not supported");
return -ENOSYS;
}
if (hprog < 0 || vprog < 0) {
gk20a_warn(g->dev, "cde: could not determine programs");
nvgpu_warn(g, "cde: could not determine programs");
return -ENOSYS;
}
if (xtiles > 8192 / 8 || ytiles > 8192 / 8)
gk20a_warn(g->dev, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
xtiles, ytiles);
gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
@@ -1541,7 +1548,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
width, height, block_height_log2,
submit_flags, fence_in, state);
} else {
dev_err(dev_from_gk20a(g), "unsupported CDE firmware version %d",
nvgpu_err(g, "unsupported CDE firmware version %d",
g->cde_app.firmware_version);
err = -EINVAL;
}
@@ -1628,13 +1635,13 @@ int gk20a_mark_compressible_write(struct gk20a *g, u32 buffer_fd,
dmabuf = dma_buf_get(buffer_fd);
if (IS_ERR(dmabuf)) {
dev_err(dev_from_gk20a(g), "invalid dmabuf");
nvgpu_err(g, "invalid dmabuf");
return -EINVAL;
}
err = gk20a_dmabuf_get_state(dmabuf, dev_from_gk20a(g), offset, &state);
if (err) {
dev_err(dev_from_gk20a(g), "could not get state from dmabuf");
nvgpu_err(g, "could not get state from dmabuf");
dma_buf_put(dmabuf);
return err;
}

View File

@@ -31,6 +31,8 @@
#include "gk20a.h"
#include "debug_gk20a.h"
#include <nvgpu/log.h>
#include <nvgpu/hw/gk20a/hw_ce2_gk20a.h>
#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
@@ -459,7 +461,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
runlist_id,
true);
if (!ce_ctx->ch) {
gk20a_err(ce_ctx->dev, "ce: gk20a channel not available");
nvgpu_err(g, "ce: gk20a channel not available");
goto end;
}
ce_ctx->ch->wdt_enabled = false;
@@ -467,21 +469,21 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
/* bind the channel to the vm */
err = __gk20a_vm_bind_channel(&g->mm.ce.vm, ce_ctx->ch);
if (err) {
gk20a_err(ce_ctx->dev, "ce: could not bind vm");
nvgpu_err(g, "ce: could not bind vm");
goto end;
}
/* allocate gpfifo (1024 should be more than enough) */
err = gk20a_channel_alloc_gpfifo(ce_ctx->ch, 1024, 0, 0);
if (err) {
gk20a_err(ce_ctx->dev, "ce: unable to allocate gpfifo");
nvgpu_err(g, "ce: unable to allocate gpfifo");
goto end;
}
/* allocate command buffer (4096 should be more than enough) from sysmem*/
err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem);
if (err) {
gk20a_err(ce_ctx->dev,
nvgpu_err(g,
"ce: could not allocate command buffer for CE context");
goto end;
}
@@ -492,7 +494,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
if (priority != -1) {
err = gk20a_fifo_set_priority(ce_ctx->ch, priority);
if (err) {
gk20a_err(ce_ctx->dev,
nvgpu_err(g,
"ce: could not set the channel priority for CE context");
goto end;
}
@@ -502,7 +504,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
if (timeslice != -1) {
err = gk20a_fifo_set_timeslice(ce_ctx->ch, timeslice);
if (err) {
gk20a_err(ce_ctx->dev,
nvgpu_err(g,
"ce: could not set the channel timeslice value for CE context");
goto end;
}
@@ -512,7 +514,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
if (runlist_level != -1) {
err = gk20a_channel_set_runlist_interleave(ce_ctx->ch, runlist_level);
if (err) {
gk20a_err(ce_ctx->dev,
nvgpu_err(g,
"ce: could not set the runlist interleave for CE context");
goto end;
}

View File

@@ -27,6 +27,7 @@
#include <nvgpu/timers.h>
#include <nvgpu/kmem.h>
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "debug_gk20a.h"
@@ -301,7 +302,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
} while (!nvgpu_timeout_expired(&timeout));
if (!channel_idle) {
gk20a_err(dev_from_gk20a(ch->g), "jobs not freed for channel %d\n",
nvgpu_err(ch->g, "jobs not freed for channel %d\n",
ch->hw_chid);
return -EBUSY;
}
@@ -322,7 +323,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
int ret;
if (gk20a_is_channel_marked_as_tsg(ch)) {
gk20a_err(dev_from_gk20a(g), "invalid operation for TSG!\n");
nvgpu_err(g, "invalid operation for TSG!\n");
return -EINVAL;
}
@@ -362,7 +363,7 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error)
ch->error_notifier->info32 = error;
ch->error_notifier->status = 0xffff;
gk20a_err(dev_from_gk20a(ch->g),
nvgpu_err(ch->g,
"error notifier set to %d for ch %d", error, ch->hw_chid);
}
}
@@ -398,7 +399,7 @@ static void gk20a_wait_until_counter_is_N(
msecs_to_jiffies(5000)) > 0)
break;
gk20a_warn(dev_from_gk20a(ch->g),
nvgpu_warn(ch->g,
"%s: channel %d, still waiting, %s left: %d, waiting for: %d",
caller, ch->hw_chid, counter_name,
atomic_read(counter), wait_value);
@@ -476,7 +477,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
nvgpu_spinlock_acquire(&ch->ref_obtain_lock);
if (!ch->referenceable) {
nvgpu_spinlock_release(&ch->ref_obtain_lock);
gk20a_err(dev_from_gk20a(ch->g),
nvgpu_err(ch->g,
"Extra %s() called to channel %u",
__func__, ch->hw_chid);
return;
@@ -795,7 +796,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
ch = allocate_channel(f);
if (ch == NULL) {
/* TBD: we want to make this virtualizable */
gk20a_err(dev_from_gk20a(g), "out of hw chids");
nvgpu_err(g, "out of hw chids");
return NULL;
}
@@ -813,7 +814,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
if (g->ops.fifo.alloc_inst(g, ch)) {
ch->g = NULL;
free_channel(f, ch);
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"failed to open gk20a channel, out of inst mem");
return NULL;
}
@@ -873,7 +874,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
used for inserting commands before/after user submitted buffers. */
static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
{
struct device *d = dev_from_gk20a(c->g);
struct gk20a *g = c->g;
struct vm_gk20a *ch_vm = c->vm;
struct priv_cmd_queue *q = &c->priv_cmd_q;
u32 size;
@@ -901,7 +902,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem);
if (err) {
gk20a_err(d, "%s: memory allocation failed\n", __func__);
nvgpu_err(g, "%s: memory allocation failed\n", __func__);
goto clean_up;
}
@@ -938,7 +939,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
gk20a_dbg_fn("size %d", orig_size);
if (!e) {
gk20a_err(dev_from_gk20a(c->g),
nvgpu_err(c->g,
"ch %d: priv cmd entry is null",
c->hw_chid);
return -EINVAL;
@@ -1016,7 +1017,7 @@ static int channel_gk20a_alloc_job(struct channel_gk20a *c,
if (CIRC_SPACE(put, get, c->joblist.pre_alloc.length))
*job_out = &c->joblist.pre_alloc.jobs[put];
else {
gk20a_warn(dev_from_gk20a(c->g),
nvgpu_warn(c->g,
"out of job ringbuffer space\n");
err = -EAGAIN;
}
@@ -1231,7 +1232,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
/* an address space needs to have been bound at this point. */
if (!gk20a_channel_as_bound(c)) {
gk20a_err(d,
nvgpu_err(g,
"not bound to an address space at time of gpfifo"
" allocation.");
return -EINVAL;
@@ -1239,7 +1240,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
ch_vm = c->vm;
if (c->gpfifo.mem.size) {
gk20a_err(d, "channel %d :"
nvgpu_err(g, "channel %d :"
"gpfifo already allocated", c->hw_chid);
return -EEXIST;
}
@@ -1248,7 +1249,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
gpfifo_size * sizeof(struct nvgpu_gpfifo),
&c->gpfifo.mem);
if (err) {
gk20a_err(d, "%s: memory allocation failed\n", __func__);
nvgpu_err(g, "%s: memory allocation failed\n", __func__);
goto clean_up;
}
@@ -1334,7 +1335,7 @@ clean_up_unmap:
nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem);
clean_up:
memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
gk20a_err(d, "fail");
nvgpu_err(g, "fail");
return err;
}
@@ -1607,7 +1608,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
return;
}
gk20a_err(dev_from_gk20a(g), "Job on channel %d timed out",
nvgpu_err(g, "Job on channel %d timed out",
ch->hw_chid);
gk20a_debug_dump(g->dev);
@@ -1761,7 +1762,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
* other reasons than a channel added in the items list
* currently, so warn and ack the message.
*/
gk20a_warn(g->dev, "Spurious worker event!");
nvgpu_warn(g, "Spurious worker event!");
++*get;
break;
}
@@ -1820,7 +1821,7 @@ int nvgpu_channel_worker_init(struct gk20a *g)
task = kthread_run(gk20a_channel_poll_worker, g,
"nvgpu_channel_poll_%s", g->name);
if (IS_ERR(task)) {
gk20a_err(g->dev, "failed to start channel poller thread");
nvgpu_err(g, "failed to start channel poller thread");
return PTR_ERR(task);
}
g->channel_worker.poll_task = task;
@@ -1853,7 +1854,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
* one ref already, so can't fail.
*/
if (WARN_ON(!gk20a_channel_get(ch))) {
gk20a_warn(g->dev, "cannot get ch ref for worker!");
nvgpu_warn(g, "cannot get ch ref for worker!");
return;
}
@@ -1876,7 +1877,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
{
struct priv_cmd_queue *q = &c->priv_cmd_q;
struct device *d = dev_from_gk20a(c->g);
struct gk20a *g = c->g;
if (!e)
return 0;
@@ -1885,7 +1886,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
/* read the entry's valid flag before reading its contents */
rmb();
if ((q->get != e->off) && e->off != 0)
gk20a_err(d, "requests out-of-order, ch=%d\n",
nvgpu_err(g, "requests out-of-order, ch=%d\n",
c->hw_chid);
q->get = e->off + e->size;
}
@@ -2416,7 +2417,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
* So, add extra_entries in user request. Also, HW with fifo size N
* can accept only N-1 entreis and so the below condition */
if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) {
gk20a_err(d, "not enough gpfifo space allocated");
nvgpu_err(g, "not enough gpfifo space allocated");
return -ENOMEM;
}
@@ -2430,7 +2431,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
/* an address space needs to have been bound at this point. */
if (!gk20a_channel_as_bound(c)) {
gk20a_err(d,
nvgpu_err(g,
"not bound to an address space at time of gpfifo"
" submission.");
return -EINVAL;
@@ -2512,7 +2513,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
/* released by job cleanup via syncpt or sema interrupt */
err = gk20a_busy(g);
if (err) {
gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s",
nvgpu_err(g, "failed to host gk20a to submit gpfifo, process %s",
current->comm);
return err;
}

View File

@@ -20,6 +20,7 @@
#include <nvgpu/semaphore.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include "channel_sync_gk20a.h"
#include "gk20a.h"
@@ -65,8 +66,7 @@ static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
int err = 0;
if (!nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, id)) {
dev_warn(dev_from_gk20a(c->g),
"invalid wait id in gpfifo submit, elided");
nvgpu_warn(c->g, "invalid wait id in gpfifo submit, elided");
return 0;
}
@@ -75,7 +75,7 @@ static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
err = gk20a_channel_alloc_priv_cmdbuf(c, 4, wait_cmd);
if (err) {
gk20a_err(dev_from_gk20a(c->g),
nvgpu_err(c->g,
"not enough priv cmd buffer space");
return err;
}
@@ -131,7 +131,7 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
err = gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, wait_cmd);
if (err) {
gk20a_err(dev_from_gk20a(c->g),
nvgpu_err(c->g,
"not enough priv cmd buffer space");
sync_fence_put(sync_fence);
return err;
@@ -360,7 +360,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
c->hw_chid, syncpt_name);
if (!sp->id) {
nvgpu_kfree(c->g, sp);
gk20a_err(c->g->dev, "failed to get free syncpt");
nvgpu_err(c->g, "failed to get free syncpt");
return NULL;
}
@@ -501,7 +501,7 @@ static void gk20a_channel_semaphore_launcher(
fence, fence->name);
err = sync_fence_wait(fence, -1);
if (err < 0)
dev_err(g->dev, "error waiting pre-fence: %d\n", err);
nvgpu_err(g, "error waiting pre-fence: %d\n", err);
gk20a_dbg_info(
"wait completed (%d) for fence %p '%s', triggering gpu work",
@@ -594,8 +594,8 @@ static int gk20a_channel_semaphore_wait_syncpt(
{
struct gk20a_channel_semaphore *sema =
container_of(s, struct gk20a_channel_semaphore, ops);
struct device *dev = dev_from_gk20a(sema->c->g);
gk20a_err(dev, "trying to use syncpoint synchronization");
struct gk20a *g = sema->c->g;
nvgpu_err(g, "trying to use syncpoint synchronization");
return -ENODEV;
}
@@ -707,7 +707,7 @@ static int gk20a_channel_semaphore_wait_fd(
err = gk20a_channel_alloc_priv_cmdbuf(c, 8, wait_cmd);
if (err) {
gk20a_err(dev_from_gk20a(c->g),
nvgpu_err(c->g,
"not enough priv cmd buffer space");
goto clean_up_sync_fence;
}
@@ -724,7 +724,7 @@ static int gk20a_channel_semaphore_wait_fd(
w->ch = c;
w->sema = nvgpu_semaphore_alloc(c);
if (!w->sema) {
gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores");
nvgpu_err(c->g, "ran out of semaphores");
err = -ENOMEM;
goto clean_up_worker;
}
@@ -779,7 +779,7 @@ clean_up_sync_fence:
sync_fence_put(sync_fence);
return err;
#else
gk20a_err(dev_from_gk20a(c->g),
nvgpu_err(c->g,
"trying to use sync fds with CONFIG_SYNC disabled");
return -ENODEV;
#endif
@@ -801,7 +801,7 @@ static int __gk20a_channel_semaphore_incr(
semaphore = nvgpu_semaphore_alloc(c);
if (!semaphore) {
gk20a_err(dev_from_gk20a(c->g),
nvgpu_err(c->g,
"ran out of semaphores");
return -ENOMEM;
}
@@ -809,7 +809,7 @@ static int __gk20a_channel_semaphore_incr(
incr_cmd_size = 10;
err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd);
if (err) {
gk20a_err(dev_from_gk20a(c->g),
nvgpu_err(c->g,
"not enough priv cmd buffer space");
goto clean_up_sema;
}
@@ -889,7 +889,7 @@ static int gk20a_channel_semaphore_incr_user(
#else
struct gk20a_channel_semaphore *sema =
container_of(s, struct gk20a_channel_semaphore, ops);
gk20a_err(dev_from_gk20a(sema->c->g),
nvgpu_err(sema->c->g,
"trying to use sync fds with CONFIG_SYNC disabled");
return -ENODEV;
#endif

View File

@@ -24,6 +24,8 @@
#include "gk20a.h"
#include <nvgpu/log.h>
#include <nvgpu/hw/gk20a/hw_trim_gk20a.h>
#include <nvgpu/hw/gk20a/hw_timer_gk20a.h>
@@ -251,7 +253,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, u32 n)
gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
if (ramp_timeout <= 0) {
gk20a_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout");
nvgpu_err(g, "gpcpll dynamic ramp timeout");
return -ETIMEDOUT;
}
return 0;
@@ -439,7 +441,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g)
ref = clk_get_parent(clk_get_parent(clk->tegra_clk));
if (IS_ERR(ref)) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"failed to get GPCPLL reference clock");
err = -EINVAL;
goto fail;
@@ -449,7 +451,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g)
clk->gpc_pll.id = GK20A_GPC_PLL;
clk->gpc_pll.clk_in = ref_rate / KHZ;
if (clk->gpc_pll.clk_in == 0) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"GPCPLL reference clock is zero");
err = -EINVAL;
goto fail;
@@ -508,7 +510,7 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
/* gpc_pll.freq is changed to new value here */
if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
&freq, true)) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"failed to set pll target for %d", freq);
return -EINVAL;
}
@@ -536,8 +538,7 @@ static int set_pll_freq(struct gk20a *g, u32 freq, u32 old_freq)
/* Just report error but not restore PLL since dvfs could already change
voltage even when it returns error. */
if (err)
gk20a_err(dev_from_gk20a(g),
"failed to set pll to %d", freq);
nvgpu_err(g, "failed to set pll to %d", freq);
return err;
}

View File

@@ -27,6 +27,8 @@
#include "gk20a.h"
#include "css_gr_gk20a.h"
#include <nvgpu/log.h>
#include <nvgpu/hw/gk20a/hw_perf_gk20a.h>
#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
@@ -299,8 +301,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
cur->snapshot->hw_overflow_events_occured++;
}
gk20a_warn(dev_from_gk20a(g),
"cyclestats: hardware overflow detected\n");
nvgpu_warn(g, "cyclestats: hardware overflow detected");
}
/* process all items in HW buffer */
@@ -340,8 +341,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
dst_nxt = dst_head;
} else {
/* client not found - skipping this entry */
gk20a_warn(dev_from_gk20a(g),
"cyclestats: orphaned perfmon %u\n",
nvgpu_warn(g, "cyclestats: orphaned perfmon %u",
src->perfmon_id);
goto next_hw_fifo_entry;
}
@@ -351,8 +351,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
if (dst_nxt == dst_get) {
/* no data copy, no pointer updates */
dst->sw_overflow_events_occured++;
gk20a_warn(dev_from_gk20a(g),
"cyclestats: perfmon %u soft overflow\n",
nvgpu_warn(g, "cyclestats: perfmon %u soft overflow",
src->perfmon_id);
} else {
*dst_put = *src;
@@ -392,8 +391,7 @@ next_hw_fifo_entry:
/* not all entries proceed correctly. some of problems */
/* reported as overflows, some as orphaned perfmons, */
/* but it will be better notify with summary about it */
gk20a_warn(dev_from_gk20a(g),
"cyclestats: completed %u from %u entries\n",
nvgpu_warn(g, "cyclestats: completed %u from %u entries",
completed, pending);
}

View File

@@ -32,6 +32,8 @@
#include "gk20a.h"
#include "gr_gk20a.h"
#include <nvgpu/log.h>
#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h>
#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
@@ -601,7 +603,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
write_idx = hdr->write_idx;
if (write_idx >= dev->num_ents) {
gk20a_err(dev_from_gk20a(dev->g),
nvgpu_err(dev->g,
"write_idx=%u out of range [0..%u]",
write_idx, dev->num_ents);
ret = -ENOSPC;

View File

@@ -25,6 +25,7 @@
#include <uapi/linux/nvgpu.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "gr_gk20a.h"
@@ -229,7 +230,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!ch) {
gk20a_err(dev_from_gk20a(dbg_s->g),
nvgpu_err(dbg_s->g,
"no channel bound to dbg session\n");
return -EINVAL;
}
@@ -248,7 +249,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
break;
default:
gk20a_err(dev_from_gk20a(dbg_s->g),
nvgpu_err(dbg_s->g,
"unrecognized dbg gpu events ctrl cmd: 0x%x",
args->cmd);
ret = -EINVAL;
@@ -402,7 +403,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
break;
default:
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"unrecognized dbg gpu timeout mode : 0x%x",
timeout_mode);
err = -EINVAL;
@@ -742,7 +743,7 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
write_size);
nvgpu_mutex_release(&g->dbg_sessions_lock);
if (err) {
gk20a_err(dev_from_gk20a(g), "copy_to_user failed!\n");
nvgpu_err(g, "copy_to_user failed!\n");
return err;
}
@@ -1099,7 +1100,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
break;
default:
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"unrecognized dbg gpu ioctl cmd: 0x%x",
cmd);
err = -ENOTTY;
@@ -1146,14 +1147,13 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
int err = 0, powergate_err = 0;
bool is_pg_disabled = false;
struct device *dev = dbg_s->dev;
struct gk20a *g = dbg_s->g;
struct channel_gk20a *ch;
gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
if (args->num_ops > g->gpu_characteristics.reg_ops_limit) {
gk20a_err(dev, "regops limit exceeded");
nvgpu_err(g, "regops limit exceeded");
return -EINVAL;
}
@@ -1163,25 +1163,25 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
}
if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) {
gk20a_err(dev, "reg ops work buffer not allocated");
nvgpu_err(g, "reg ops work buffer not allocated");
return -ENODEV;
}
if (!dbg_s->id) {
gk20a_err(dev, "can't call reg_ops on an unbound debugger session");
nvgpu_err(g, "can't call reg_ops on an unbound debugger session");
return -EINVAL;
}
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!dbg_s->is_profiler && !ch) {
gk20a_err(dev, "bind a channel before regops for a debugging session");
nvgpu_err(g, "bind a channel before regops for a debugging session");
return -EINVAL;
}
/* be sure that ctx info is in place */
if (!gk20a_gpu_is_virtual(dbg_s->dev) &&
!gr_context_info_available(dbg_s, &g->gr)) {
gk20a_err(dev, "gr context data not available\n");
nvgpu_err(g, "gr context data not available\n");
return -ENODEV;
}
@@ -1221,7 +1221,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
if (copy_from_user(g->dbg_regops_tmp_buf,
fragment, fragment_size)) {
dev_err(dev, "copy_from_user failed!");
nvgpu_err(g, "copy_from_user failed!");
err = -EFAULT;
break;
}
@@ -1233,7 +1233,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
if (copy_to_user(fragment, g->dbg_regops_tmp_buf,
fragment_size)) {
dev_err(dev, "copy_to_user failed!");
nvgpu_err(g, "copy_to_user failed!");
err = -EFAULT;
break;
}
@@ -1255,7 +1255,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
err = powergate_err;
if (err)
gk20a_err(dev, "dbg regops failed");
nvgpu_err(g, "dbg regops failed");
return err;
}
@@ -1350,7 +1350,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
break;
default:
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"unrecognized dbg gpu powergate mode: 0x%x",
powermode);
err = -ENOTTY;
@@ -1388,7 +1388,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
nvgpu_err(g, "failed to poweron");
return err;
}
@@ -1397,7 +1397,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!ch_gk20a) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"no bound channel for smpc ctxsw mode update\n");
err = -EINVAL;
goto clean_up;
@@ -1406,7 +1406,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a,
args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
if (err) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"error (%d) during smpc ctxsw mode update\n", err);
goto clean_up;
}
@@ -1434,13 +1434,13 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
* cleaned up.
*/
if (!dbg_s->has_profiler_reservation) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"session doesn't have a valid reservation");
}
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
nvgpu_err(g, "failed to poweron");
return err;
}
@@ -1449,7 +1449,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!ch_gk20a) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"no bound channel for pm ctxsw mode update\n");
err = -EINVAL;
goto clean_up;
@@ -1458,7 +1458,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a,
args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW);
if (err)
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"error (%d) during pm ctxsw mode update\n", err);
/* gk20a would require a WAR to set the core PM_ENABLE bit, not
@@ -1486,7 +1486,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
nvgpu_err(g, "failed to poweron");
return err;
}
@@ -1495,7 +1495,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
/* Suspend GPU context switching */
err = gr_gk20a_disable_ctxsw(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
nvgpu_err(g, "unable to stop gr ctxsw");
/* this should probably be ctx-fatal... */
goto clean_up;
}
@@ -1512,7 +1512,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
err = gr_gk20a_enable_ctxsw(g);
if (err)
gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
nvgpu_err(g, "unable to restart ctxsw!\n");
clean_up:
nvgpu_mutex_release(&g->dbg_sessions_lock);
@@ -1544,7 +1544,7 @@ static int nvgpu_ioctl_allocate_profiler_object(
else {
prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (prof_obj->ch == NULL) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"bind a channel for dbg session");
nvgpu_kfree(g, prof_obj);
err = -EINVAL;
@@ -1582,7 +1582,7 @@ static int nvgpu_ioctl_free_profiler_object(
dbg_profiler_object_data, prof_obj_entry) {
if (prof_obj->prof_handle == args->profiler_handle) {
if (prof_obj->session_id != dbg_s->id) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"invalid handle %x",
args->profiler_handle);
err = -EINVAL;
@@ -1598,7 +1598,7 @@ static int nvgpu_ioctl_free_profiler_object(
}
}
if (!obj_found) {
gk20a_err(dev_from_gk20a(g), "profiler %x not found",
nvgpu_err(g, "profiler %x not found",
args->profiler_handle);
err = -EINVAL;
}
@@ -1618,7 +1618,7 @@ static struct dbg_profiler_object_data *find_matching_prof_obj(
dbg_profiler_object_data, prof_obj_entry) {
if (prof_obj->prof_handle == profiler_handle) {
if (prof_obj->session_id != dbg_s->id) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"invalid handle %x",
profiler_handle);
return NULL;
@@ -1667,7 +1667,7 @@ static void nvgpu_release_profiler_reservation(struct dbg_session_gk20a *dbg_s,
g->profiler_reservation_count--;
if (g->profiler_reservation_count < 0)
gk20a_err(dev_from_gk20a(g), "Negative reservation count!");
nvgpu_err(g, "Negative reservation count!");
dbg_s->has_profiler_reservation = false;
prof_obj->has_reservation = false;
if (prof_obj->ch == NULL)
@@ -1684,7 +1684,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle);
if (g->profiler_reservation_count < 0) {
gk20a_err(dev_from_gk20a(g), "Negative reservation count!");
nvgpu_err(g, "Negative reservation count!");
return -EINVAL;
}
@@ -1694,7 +1694,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
if (!my_prof_obj) {
gk20a_err(dev_from_gk20a(g), "object not found");
nvgpu_err(g, "object not found");
err = -EINVAL;
goto exit;
}
@@ -1711,7 +1711,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
*/
if (!g->ops.dbg_session_ops.check_and_set_global_reservation(
dbg_s, my_prof_obj)) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"global reserve: have existing reservation");
err = -EBUSY;
}
@@ -1719,7 +1719,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
/* If there's a global reservation,
* we can't take a per-context one.
*/
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"per-ctxt reserve: global reservation in effect");
err = -EBUSY;
} else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) {
@@ -1732,7 +1732,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
dbg_profiler_object_data, prof_obj_entry) {
if (prof_obj->has_reservation &&
(prof_obj->ch->tsgid == my_tsgid)) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"per-ctxt reserve (tsg): already reserved");
err = -EBUSY;
goto exit;
@@ -1742,7 +1742,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
dbg_s, my_prof_obj)) {
/* Another guest OS has the global reservation */
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"per-ctxt reserve: global reservation in effect");
err = -EBUSY;
}
@@ -1756,7 +1756,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
dbg_profiler_object_data, prof_obj_entry) {
if (prof_obj->has_reservation &&
(prof_obj->ch == my_ch)) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"per-ctxt reserve (ch): already reserved");
err = -EBUSY;
goto exit;
@@ -1766,7 +1766,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
dbg_s, my_prof_obj)) {
/* Another guest OS has the global reservation */
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"per-ctxt reserve: global reservation in effect");
err = -EBUSY;
}
@@ -1791,7 +1791,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
if (!prof_obj) {
gk20a_err(dev_from_gk20a(g), "object not found");
nvgpu_err(g, "object not found");
err = -EINVAL;
goto exit;
}
@@ -1799,7 +1799,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
if (prof_obj->has_reservation)
g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj);
else {
gk20a_err(dev_from_gk20a(g), "No reservation found");
nvgpu_err(g, "No reservation found");
err = -EINVAL;
goto exit;
}
@@ -1854,7 +1854,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
nvgpu_err(g, "failed to poweron");
goto fail_unmap;
}
@@ -1895,7 +1895,7 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
nvgpu_err(g, "failed to poweron");
return err;
}

View File

@@ -19,6 +19,7 @@
#include <nvgpu/log.h>
#include <nvgpu/kmem.h>
#include <nvgpu/semaphore.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "debug_gk20a.h"
@@ -145,7 +146,7 @@ static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
err = gk20a_busy(g);
if (err) {
gk20a_err(dev, "failed to power on gpu: %d", err);
nvgpu_err(g, "failed to power on gpu: %d", err);
return -EINVAL;
}
@@ -186,7 +187,7 @@ static int gk20a_debug_show(struct seq_file *s, void *unused)
err = gk20a_busy(g);
if (err) {
gk20a_err(g->dev, "failed to power on gpu: %d", err);
nvgpu_err(g, "failed to power on gpu: %d", err);
return -EFAULT;
}

View File

@@ -31,6 +31,8 @@
#include "gk20a.h"
#include "gr_gk20a.h"
#include <nvgpu/log.h>
#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h>
#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
@@ -156,7 +158,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid
he = nvgpu_kzalloc(g, sizeof(*he));
if (unlikely(!he)) {
gk20a_warn(dev_from_gk20a(g),
nvgpu_warn(g,
"can't alloc new hash entry for context_ptr=%x pid=%d",
context_ptr, pid);
return -ENOMEM;
@@ -255,7 +257,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
"consuming record trace=%p read=%d record=%p", trace, index, r);
if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) {
gk20a_warn(dev_from_gk20a(g),
nvgpu_warn(g,
"trace=%p read=%d record=%p magic_lo=%08x magic_hi=%08x (invalid)",
trace, index, r, r->magic_lo, r->magic_hi);
return -EINVAL;
@@ -342,7 +344,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g)
nvgpu_mutex_acquire(&trace->poll_lock);
write = gk20a_fecs_trace_get_write_index(g);
if (unlikely((write < 0) || (write >= GK20A_FECS_TRACE_NUM_RECORDS))) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"failed to acquire write index, write=%d", write);
err = write;
goto done;
@@ -571,7 +573,7 @@ static int gk20a_fecs_trace_init(struct gk20a *g)
trace = nvgpu_kzalloc(g, sizeof(struct gk20a_fecs_trace));
if (!trace) {
gk20a_warn(dev_from_gk20a(g), "failed to allocate fecs_trace");
nvgpu_warn(g, "failed to allocate fecs_trace");
return -ENOMEM;
}
g->fecs_trace = trace;
@@ -586,7 +588,7 @@ static int gk20a_fecs_trace_init(struct gk20a *g)
BUG_ON(!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS));
err = gk20a_fecs_trace_alloc_ring(g);
if (err) {
gk20a_warn(dev_from_gk20a(g), "failed to allocate FECS ring");
nvgpu_warn(g, "failed to allocate FECS ring");
goto clean_hash_lock;
}
@@ -754,7 +756,7 @@ static int gk20a_fecs_trace_enable(struct gk20a *g)
task = kthread_run(gk20a_fecs_trace_periodic_polling, g, __func__);
if (unlikely(IS_ERR(task))) {
gk20a_warn(dev_from_gk20a(g),
nvgpu_warn(g,
"failed to create FECS polling task");
return PTR_ERR(task);
}

View File

@@ -30,6 +30,7 @@
#include <nvgpu/timers.h>
#include <nvgpu/semaphore.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "debug_gk20a.h"
@@ -105,7 +106,7 @@ struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 e
}
if (!info)
gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id);
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
return info;
}
@@ -131,7 +132,7 @@ bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id)
}
if (!valid)
gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id);
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
return valid;
}
@@ -146,7 +147,7 @@ u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g)
1, ENGINE_GR_GK20A);
if (!gr_engine_cnt) {
gk20a_err(dev_from_gk20a(g), "No GR engine available on this device!\n");
nvgpu_err(g, "No GR engine available on this device!\n");
}
return gr_engine_id;
@@ -218,7 +219,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
1, ENGINE_GR_GK20A);
if (!gr_engine_cnt) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"No GR engine available on this device!");
goto end;
}
@@ -228,7 +229,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
if (engine_info) {
gr_runlist_id = engine_info->runlist_id;
} else {
gk20a_err(g->dev,
nvgpu_err(g,
"gr_engine_id is not in active list/invalid %d", gr_engine_id);
}
@@ -273,7 +274,7 @@ static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
if (engine_info) {
fault_id = engine_info->fault_id;
} else {
gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id);
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
}
return fault_id;
}
@@ -321,7 +322,6 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
{
struct gk20a *g = f->g;
struct device *d = dev_from_gk20a(g);
u32 i;
u32 max_info_entries = top_device_info__size_1_v();
u32 engine_enum = ENGINE_INVAL_GK20A;
@@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
}
if (!found_pbdma_for_runlist) {
gk20a_err(d, "busted pbdma map");
nvgpu_err(g, "busted pbdma map");
return -EINVAL;
}
}
@@ -647,7 +647,6 @@ static void fifo_engine_exception_status(struct gk20a *g,
static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
{
struct fifo_runlist_info_gk20a *runlist;
struct device *d = dev_from_gk20a(g);
unsigned int runlist_id;
u32 i;
size_t runlist_size;
@@ -689,7 +688,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
int err = nvgpu_dma_alloc_sys(g, runlist_size,
&runlist->mem[i]);
if (err) {
dev_err(d, "memory allocation failed\n");
nvgpu_err(g, "memory allocation failed\n");
goto clean_up_runlist;
}
}
@@ -888,7 +887,6 @@ static void gk20a_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f)
static int gk20a_init_fifo_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct device *d = dev_from_gk20a(g);
unsigned int chid, i;
int err = 0;
@@ -948,7 +946,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
err = nvgpu_dma_alloc_sys(g, f->userd_entry_size *
f->num_channels, &f->userd);
if (err) {
dev_err(d, "userd memory allocation failed\n");
nvgpu_err(g, "userd memory allocation failed\n");
goto clean_up;
}
gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
@@ -1032,7 +1030,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
smp_mb();
if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: CPU wrote 0x%x, \
nvgpu_err(g, "bar1 broken @ gk20a: CPU wrote 0x%x, \
GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr));
return -EINVAL;
}
@@ -1040,14 +1038,14 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
gk20a_bar1_writel(g, bar1_vaddr, v2);
if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: GPU wrote 0x%x, \
nvgpu_err(g, "bar1 broken @ gk20a: GPU wrote 0x%x, \
CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr);
return -EINVAL;
}
/* is it visible to the cpu? */
if (*cpu_vaddr != v2) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"cpu didn't see bar1 write @ %p!",
cpu_vaddr);
}
@@ -1230,7 +1228,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
}
if (engine_enum == ENGINE_INVAL_GK20A)
gk20a_err(dev_from_gk20a(g), "unsupported engine_id %d", engine_id);
nvgpu_err(g, "unsupported engine_id %d", engine_id);
if (engine_enum == ENGINE_GR_GK20A) {
if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
@@ -1242,7 +1240,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
g->ops.fecs_trace.reset(g);
/*HALT_PIPELINE method, halt GR engine*/
if (gr_gk20a_halt_pipe(g))
gk20a_err(dev_from_gk20a(g), "failed to HALT gr pipe");
nvgpu_err(g, "failed to HALT gr pipe");
/* resetting engine using mc_enable_r() is not
enough, we do full init sequence */
gk20a_gr_reset(g);
@@ -1260,16 +1258,15 @@ static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
u32 intr;
intr = gk20a_readl(g, fifo_intr_chsw_error_r());
gk20a_err(dev_from_gk20a(g), "chsw: %08x\n", intr);
nvgpu_err(g, "chsw: %08x\n", intr);
gk20a_fecs_dump_falcon_stats(g);
gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
}
static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
{
struct device *dev = dev_from_gk20a(g);
u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
gk20a_err(dev, "dropped mmu fault (0x%08x)", fault_id);
nvgpu_err(g, "dropped mmu fault (0x%08x)", fault_id);
}
bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
@@ -1381,7 +1378,7 @@ bool gk20a_fifo_error_tsg(struct gk20a *g,
void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
struct channel_gk20a *refch)
{
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"channel %d generated a mmu fault", refch->hw_chid);
gk20a_set_error_notifier(refch,
NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
@@ -1392,7 +1389,7 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
{
struct channel_gk20a *ch = NULL;
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"TSG %d generated a mmu fault", tsg->tsgid);
down_read(&tsg->ch_list_lock);
@@ -1544,7 +1541,7 @@ static bool gk20a_fifo_handle_mmu_fault(
f.engine_subid_desc,
f.client_desc,
f.fault_type_desc);
gk20a_err(dev_from_gk20a(g), "%s mmu fault on engine %d, "
nvgpu_err(g, "%s mmu fault on engine %d, "
"engine subid %d (%s), client %d (%s), "
"addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
"inst_ptr 0x%llx\n",
@@ -1558,7 +1555,7 @@ static bool gk20a_fifo_handle_mmu_fault(
if (ctxsw) {
gk20a_fecs_dump_falcon_stats(g);
gk20a_err(dev_from_gk20a(g), "gr_status_r : 0x%x",
nvgpu_err(g, "gr_status_r : 0x%x",
gk20a_readl(g, gr_status_r()));
}
@@ -1654,18 +1651,18 @@ static bool gk20a_fifo_handle_mmu_fault(
gk20a_channel_abort(ch, false);
gk20a_channel_put(ch);
} else {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"mmu error in freed channel %d",
ch->hw_chid);
}
} else if (f.inst_ptr ==
gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) {
gk20a_err(dev_from_gk20a(g), "mmu fault from bar1");
nvgpu_err(g, "mmu fault from bar1");
} else if (f.inst_ptr ==
gk20a_mm_inst_block_addr(g, &g->mm.pmu.inst_block)) {
gk20a_err(dev_from_gk20a(g), "mmu fault from pmu");
nvgpu_err(g, "mmu fault from pmu");
} else
gk20a_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault");
nvgpu_err(g, "couldn't locate channel for mmu fault");
}
/* clear interrupt */
@@ -2137,7 +2134,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
/* could not find the engine - should never happen */
if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, failed to find engine\n",
nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine\n",
sched_error);
ret = false;
goto err;
@@ -2158,7 +2155,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
}
if (ret) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"fifo sched ctxsw timeout error: "
"engine=%u, %s=%d, ms=%u",
engine_id, is_tsg ? "tsg" : "ch", id, ms);
@@ -2175,7 +2172,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
"%s=%d", ms, is_tsg ? "tsg" : "ch", id);
}
} else {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"fifo sched error : 0x%08x, engine=%u, %s=%d",
sched_error, engine_id, is_tsg ? "tsg" : "ch", id);
}
@@ -2187,7 +2184,6 @@ err:
static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
{
bool print_channel_reset_log = false;
struct device *dev = dev_from_gk20a(g);
u32 handled = 0;
gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr);
@@ -2195,13 +2191,13 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
/* pio mode is unused. this shouldn't happen, ever. */
/* should we clear it or just leave it pending? */
gk20a_err(dev, "fifo pio error!\n");
nvgpu_err(g, "fifo pio error!\n");
BUG_ON(1);
}
if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
gk20a_err(dev, "fifo bind error: 0x%08x", bind_error);
nvgpu_err(g, "fifo bind error: 0x%08x", bind_error);
print_channel_reset_log = true;
handled |= fifo_intr_0_bind_error_pending_f();
}
@@ -2233,7 +2229,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
if (print_channel_reset_log) {
unsigned int engine_id;
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"channel reset initiated from %s; intr=0x%08x",
__func__, fifo_intr);
for (engine_id = 0;
@@ -2301,8 +2297,7 @@ static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, int pbdma_id,
return false;
}
static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
struct gk20a *g,
static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g,
struct fifo_gk20a *f,
u32 pbdma_id)
{
@@ -2323,7 +2318,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
if ((f->intr.pbdma.device_fatal_0 |
f->intr.pbdma.channel_fatal_0 |
f->intr.pbdma.restartable_0) & pbdma_intr_0) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x %08x %08x %08x",
pbdma_id, pbdma_intr_0,
gk20a_readl(g, pbdma_pb_header_r(pbdma_id)),
@@ -2346,7 +2341,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
gk20a_writel(g, pbdma_acquire_r(pbdma_id), val);
if (g->timeouts_enabled) {
reset = true;
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"semaphore acquire timeout!");
}
handled |= pbdma_intr_0_acquire_pending_f();
@@ -2387,7 +2382,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
/* all intrs in _intr_1 are "host copy engine" related,
* which gk20a doesn't have. for now just make them channel fatal. */
if (pbdma_intr_1) {
dev_err(dev, "channel hce error: pbdma_intr_1(%d): 0x%08x",
nvgpu_err(g, "channel hce error: pbdma_intr_1(%d): 0x%08x",
pbdma_id, pbdma_intr_1);
reset = true;
gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
@@ -2428,7 +2423,6 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
{
struct device *dev = dev_from_gk20a(g);
struct fifo_gk20a *f = &g->fifo;
u32 clear_intr = 0, i;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
@@ -2438,7 +2432,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) {
gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i);
clear_intr |=
gk20a_fifo_handle_pbdma_intr(dev, g, f, i);
gk20a_fifo_handle_pbdma_intr(g, f, i);
}
}
return fifo_intr_0_pbdma_intr_pending_f();
@@ -2534,7 +2528,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
struct tsg_gk20a *tsg = &g->fifo.tsg[id];
struct channel_gk20a *ch = NULL;
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"preempt TSG %d timeout\n", id);
down_read(&tsg->ch_list_lock);
@@ -2550,7 +2544,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
} else {
struct channel_gk20a *ch = &g->fifo.channel[id];
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"preempt channel %d timeout\n", id);
if (gk20a_channel_get(ch)) {
@@ -2733,7 +2727,7 @@ int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
err = gk20a_fifo_enable_engine_activity(g,
&g->fifo.engine_info[active_engine_id]);
if (err) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"failed to enable engine %d activity\n", active_engine_id);
ret = err;
}
@@ -2806,7 +2800,7 @@ clean_up:
if (err) {
gk20a_dbg_fn("failed");
if (gk20a_fifo_enable_engine_activity(g, eng_info))
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"failed to enable gr engine activity\n");
} else {
gk20a_dbg_fn("done");
@@ -3155,7 +3149,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
if (ret == -ETIMEDOUT) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"runlist update timeout");
gk20a_fifo_runlist_reset_engines(g, runlist_id);
@@ -3167,10 +3161,10 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
* should be fine */
if (ret)
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"runlist update failed: %d", ret);
} else if (ret == -EINTR)
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"runlist update interrupted");
}
@@ -3196,7 +3190,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid,
/* Capture the last failure error code */
errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish);
if (errcode) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"failed to update_runlist %d %d", runlist_id, errcode);
ret = errcode;
}
@@ -4051,8 +4045,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
struct gk20a *g = ch->g;
if (gk20a_is_channel_marked_as_tsg(ch)) {
gk20a_err(dev_from_gk20a(ch->g),
"invalid operation for TSG!\n");
nvgpu_err(g, "invalid operation for TSG!\n");
return -EINVAL;
}
@@ -4071,8 +4064,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority)
{
if (gk20a_is_channel_marked_as_tsg(ch)) {
gk20a_err(dev_from_gk20a(ch->g),
"invalid operation for TSG!\n");
nvgpu_err(ch->g, "invalid operation for TSG!\n");
return -EINVAL;
}

View File

@@ -282,7 +282,7 @@ static int gk20a_init_support(struct platform_device *dev)
g->regs = gk20a_ioremap_resource(dev, GK20A_BAR0_IORESOURCE_MEM,
&g->reg_mem);
if (IS_ERR(g->regs)) {
dev_err(dev_from_gk20a(g), "failed to remap gk20a registers\n");
nvgpu_err(g, "failed to remap gk20a registers\n");
err = PTR_ERR(g->regs);
goto fail;
}
@@ -290,7 +290,7 @@ static int gk20a_init_support(struct platform_device *dev)
g->bar1 = gk20a_ioremap_resource(dev, GK20A_BAR1_IORESOURCE_MEM,
&g->bar1_mem);
if (IS_ERR(g->bar1)) {
dev_err(dev_from_gk20a(g), "failed to remap gk20a bar1\n");
nvgpu_err(g, "failed to remap gk20a bar1\n");
err = PTR_ERR(g->bar1);
goto fail;
}
@@ -411,7 +411,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
if (platform->busy) {
err = platform->busy(dev);
if (err < 0) {
dev_err(dev, "%s: failed to poweron platform dependency\n",
nvgpu_err(g, "%s: failed to poweron platform dependency\n",
__func__);
goto done;
}
@@ -467,7 +467,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
if (g->ops.clk.init_clk_support) {
err = g->ops.clk.init_clk_support(g);
if (err) {
gk20a_err(dev, "failed to init gk20a clk");
nvgpu_err(g, "failed to init gk20a clk");
goto done;
}
}
@@ -475,7 +475,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
err = g->ops.fifo.reset_enable_hw(g);
if (err) {
gk20a_err(dev, "failed to reset gk20a fifo");
nvgpu_err(g, "failed to reset gk20a fifo");
goto done;
}
@@ -484,13 +484,13 @@ int gk20a_pm_finalize_poweron(struct device *dev)
err = gk20a_init_mm_support(g);
if (err) {
gk20a_err(dev, "failed to init gk20a mm");
nvgpu_err(g, "failed to init gk20a mm");
goto done;
}
err = gk20a_init_fifo_support(g);
if (err) {
gk20a_err(dev, "failed to init gk20a fifo");
nvgpu_err(g, "failed to init gk20a fifo");
goto done;
}
@@ -501,7 +501,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
err = gk20a_enable_gr_hw(g);
if (err) {
gk20a_err(dev, "failed to enable gr");
nvgpu_err(g, "failed to enable gr");
goto done;
}
@@ -509,7 +509,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
if (g->ops.pmu.prepare_ucode)
err = g->ops.pmu.prepare_ucode(g);
if (err) {
gk20a_err(dev, "failed to init pmu ucode");
nvgpu_err(g, "failed to init pmu ucode");
goto done;
}
}
@@ -518,7 +518,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
if (g->ops.pmupstate) {
err = gk20a_init_pstate_support(g);
if (err) {
gk20a_err(dev, "failed to init pstates");
nvgpu_err(g, "failed to init pstates");
goto done;
}
}
@@ -527,21 +527,21 @@ int gk20a_pm_finalize_poweron(struct device *dev)
if (g->ops.pmu.is_pmu_supported(g)) {
err = gk20a_init_pmu_support(g);
if (err) {
gk20a_err(dev, "failed to init gk20a pmu");
nvgpu_err(g, "failed to init gk20a pmu");
goto done;
}
}
err = gk20a_init_gr_support(g);
if (err) {
gk20a_err(dev, "failed to init gk20a gr");
nvgpu_err(g, "failed to init gk20a gr");
goto done;
}
if (g->ops.pmu.mclk_init) {
err = g->ops.pmu.mclk_init(g);
if (err) {
gk20a_err(dev, "failed to set mclk");
nvgpu_err(g, "failed to set mclk");
/* Indicate error dont goto done */
}
}
@@ -550,37 +550,37 @@ int gk20a_pm_finalize_poweron(struct device *dev)
if (g->ops.pmupstate) {
err = gk20a_init_pstate_pmu_support(g);
if (err) {
gk20a_err(dev, "failed to init pstates");
nvgpu_err(g, "failed to init pstates");
goto done;
}
}
err = nvgpu_clk_arb_init_arbiter(g);
if (err) {
gk20a_err(dev, "failed to init clk arb");
nvgpu_err(g, "failed to init clk arb");
goto done;
}
#endif
err = gk20a_init_therm_support(g);
if (err) {
gk20a_err(dev, "failed to init gk20a therm");
nvgpu_err(g, "failed to init gk20a therm");
goto done;
}
err = g->ops.chip_init_gpu_characteristics(g);
if (err) {
gk20a_err(dev, "failed to init gk20a gpu characteristics");
nvgpu_err(g, "failed to init gk20a gpu characteristics");
goto done;
}
err = gk20a_ctxsw_trace_init(g);
if (err)
gk20a_warn(dev, "could not initialize ctxsw tracing");
nvgpu_warn(g, "could not initialize ctxsw tracing");
err = gk20a_sched_ctrl_init(g);
if (err) {
gk20a_err(dev, "failed to init sched control");
nvgpu_err(g, "failed to init sched control");
goto done;
}
@@ -619,7 +619,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
speed = 1 << (fls(speed) - 1);
err = g->ops.xve.set_speed(g, speed);
if (err) {
gk20a_err(dev, "Failed to set PCIe bus speed!\n");
nvgpu_err(g, "Failed to set PCIe bus speed!\n");
goto done;
}
}
@@ -1312,7 +1312,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset)
} while (ref_cnt != target_ref_cnt && !nvgpu_timeout_expired(&timeout));
if (ref_cnt != target_ref_cnt) {
gk20a_err(dev, "failed to idle - refcount %d != 1\n",
nvgpu_err(g, "failed to idle - refcount %d != 1\n",
ref_cnt);
goto fail_drop_usage_count;
}
@@ -1344,7 +1344,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset)
if (is_railgated) {
return 0;
} else {
gk20a_err(dev, "failed to idle in timeout\n");
nvgpu_err(g, "failed to idle in timeout\n");
goto fail_timeout;
}
} else {

View File

@@ -31,6 +31,8 @@
#include "clk_gk20a.h"
#include "gk20a_scale.h"
#include <nvgpu/log.h>
/*
* gk20a_scale_qos_notify()
*
@@ -59,8 +61,8 @@ int gk20a_scale_qos_notify(struct notifier_block *nb,
pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS) * 1000;
if (profile->qos_min_freq > profile->qos_max_freq) {
gk20a_err(g->dev,
"QoS: setting invalid limit, min_freq=%lu max_freq=%lu\n",
nvgpu_err(g,
"QoS: setting invalid limit, min_freq=%lu max_freq=%lu",
profile->qos_min_freq, profile->qos_max_freq);
profile->qos_min_freq = profile->qos_max_freq;
}

View File

@@ -23,6 +23,7 @@
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "gr_ctx_gk20a.h"
@@ -111,7 +112,6 @@ static bool gr_gk20a_is_firmware_defined(void)
static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
{
struct device *d = dev_from_gk20a(g);
const struct firmware *netlist_fw;
struct netlist_image *netlist = NULL;
char name[MAX_NETLIST_NAME];
@@ -135,13 +135,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
for (; net < max; net++) {
if (g->ops.gr_ctx.get_netlist_name(g, net, name) != 0) {
gk20a_warn(d, "invalid netlist index %d", net);
nvgpu_warn(g, "invalid netlist index %d", net);
continue;
}
netlist_fw = nvgpu_request_firmware(g, name, 0);
if (!netlist_fw) {
gk20a_warn(d, "failed to load netlist %s", name);
nvgpu_warn(g, "failed to load netlist %s", name);
continue;
}
@@ -436,7 +436,7 @@ done:
gk20a_dbg_info("netlist image %s loaded", name);
return 0;
} else {
gk20a_err(d, "failed to load netlist image!!");
nvgpu_err(g, "failed to load netlist image!!");
return err;
}
}

View File

@@ -23,6 +23,8 @@
#include "sim_gk20a.h"
#include "gr_ctx_gk20a.h"
#include <nvgpu/log.h>
int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
{
int err = 0;
@@ -239,8 +241,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib");
return 0;
fail:
gk20a_err(dev_from_gk20a(g),
"failed querying grctx info from chiplib");
nvgpu_err(g, "failed querying grctx info from chiplib");
return err;
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -23,6 +23,8 @@
#include "nvgpu_gpuid_t19x.h"
#endif
#include <nvgpu/log.h>
int gpu_init_hal(struct gk20a *g)
{
u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl;
@@ -54,7 +56,7 @@ int gpu_init_hal(struct gk20a *g)
break;
#endif
default:
gk20a_err(g->dev, "no support for %x", ver);
nvgpu_err(g, "no support for %x", ver);
return -ENODEV;
}

View File

@@ -35,6 +35,8 @@
#include "css_gr_gk20a.h"
#include "pramin_gk20a.h"
#include <nvgpu/log.h>
#include <nvgpu/hw/gk20a/hw_proj_gk20a.h>
static struct gpu_ops gk20a_ops = {
@@ -132,7 +134,7 @@ static int gk20a_get_litter_value(struct gk20a *g, int value)
ret = 0;
break;
default:
gk20a_err(dev_from_gk20a(g), "Missing definition %d", value);
nvgpu_err(g, "Missing definition %d", value);
BUG();
break;
}

View File

@@ -20,6 +20,7 @@
#include <trace/events/gk20a.h>
#include <nvgpu/timers.h>
#include <nvgpu/log.h>
#include "gk20a.h"
@@ -160,8 +161,7 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
} while (!nvgpu_timeout_expired(&timeout));
if (nvgpu_timeout_peek_expired(&timeout)) {
gk20a_err(dev_from_gk20a(g),
"comp tag clear timeout\n");
nvgpu_err(g, "comp tag clear timeout");
err = -EBUSY;
goto out;
}
@@ -186,7 +186,7 @@ static void gk20a_ltc_isr(struct gk20a *g)
u32 intr;
intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r());
gk20a_err(dev_from_gk20a(g), "ltc: %08x\n", intr);
nvgpu_err(g, "ltc: %08x\n", intr);
gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr);
}
@@ -215,7 +215,7 @@ static int gk20a_determine_L2_size_bytes(struct gk20a *g)
ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
sets = 16;
} else {
dev_err(dev_from_gk20a(g),
nvgpu_err(g,
"Unknown constant %u for active sets",
(unsigned)active_sets_value);
sets = 0;

View File

@@ -39,6 +39,7 @@
#include <nvgpu/allocator.h>
#include <nvgpu/semaphore.h>
#include <nvgpu/page_allocator.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "mm_gk20a.h"
@@ -536,7 +537,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
0,
NULL);
if (err) {
gk20a_err(g->dev,
nvgpu_err(g,
"Failed to clear vidmem region 1 : %d", err);
return err;
}
@@ -555,7 +556,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
0,
&gk20a_fence_out);
if (err) {
gk20a_err(g->dev,
nvgpu_err(g,
"Failed to clear vidmem region 2 : %d", err);
return err;
}
@@ -575,7 +576,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
gk20a_fence_put(gk20a_fence_out);
if (err) {
gk20a_err(g->dev,
nvgpu_err(g,
"fence wait failed for CE execute ops");
return err;
}
@@ -591,7 +592,6 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
{
#if defined(CONFIG_GK20A_VIDMEM)
struct gk20a *g = mm->g;
struct device *d = dev_from_gk20a(g);
size_t size = g->ops.mm.get_vidmem_size ?
g->ops.mm.get_vidmem_size(g) : 0;
u64 bootstrap_base, bootstrap_size, base;
@@ -625,7 +625,7 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
default_page_size,
GPU_ALLOC_4K_VIDMEM_PAGES);
if (err) {
gk20a_err(d, "Failed to register vidmem for size %zu: %d",
nvgpu_err(g, "Failed to register vidmem for size %zu: %d",
size, err);
return err;
}
@@ -796,7 +796,7 @@ void gk20a_init_mm_ce_context(struct gk20a *g)
NULL);
if (g->mm.vidmem.ce_ctx_id == (u32)~0)
gk20a_err(g->dev,
nvgpu_err(g,
"Failed to allocate CE context for vidmem page clearing support");
}
#endif
@@ -882,7 +882,6 @@ static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry)
static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
struct gk20a_mm_entry *entry)
{
struct device *d = dev_from_vm(vm);
struct gk20a *g = gk20a_from_vm(vm);
u32 num_pages = 1 << order;
u32 len = num_pages * PAGE_SIZE;
@@ -905,7 +904,7 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
if (err) {
gk20a_err(d, "memory allocation failed");
nvgpu_err(g, "memory allocation failed");
return -ENOMEM;
}
@@ -1209,7 +1208,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
struct vm_gk20a_mapping_batch *batch)
{
struct device *d = dev_from_vm(vm);
struct gk20a *g = vm->mm->g;
struct mapped_buffer_node *mapped_buffer;
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
@@ -1217,7 +1216,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset);
if (!mapped_buffer) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
return;
}
@@ -1240,7 +1239,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
if (mapped_buffer->user_mapped == 0) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d, "addr already unmapped from user 0x%llx", offset);
nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
return;
}
@@ -1284,7 +1283,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
offset = nvgpu_alloc(vma, size);
if (!offset) {
gk20a_err(dev_from_vm(vm),
nvgpu_err(vm->mm->g,
"%s oom: sz=0x%llx", vma->name, size);
return 0;
}
@@ -1405,14 +1404,13 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
{
bool kind_compressible;
struct gk20a *g = gk20a_from_vm(vm);
struct device *d = dev_from_gk20a(g);
int ctag_granularity = g->ops.fb.compression_page_size(g);
if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v()))
bfr->kind_v = gmmu_pte_kind_pitch_v();
if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) {
gk20a_err(d, "kind 0x%x not supported", bfr->kind_v);
nvgpu_err(g, "kind 0x%x not supported", bfr->kind_v);
return -EINVAL;
}
@@ -1423,7 +1421,7 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v);
if (unlikely(bfr->uc_kind_v == gmmu_pte_kind_invalid_v())) {
/* shouldn't happen, but it is worth cross-checking */
gk20a_err(d, "comptag kind 0x%x can't be"
nvgpu_err(g, "comptag kind 0x%x can't be"
" downgraded to uncompressed kind",
bfr->kind_v);
return -EINVAL;
@@ -1432,9 +1430,6 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
/* comptags only supported for suitable kinds, 128KB pagesize */
if (kind_compressible &&
vm->gmmu_page_sizes[pgsz_idx] < g->ops.fb.compressible_page_size(g)) {
/*
gk20a_warn(d, "comptags specified"
" but pagesize being used doesn't support it");*/
/* it is safe to fall back to uncompressed as
functionality is not harmed */
bfr->kind_v = bfr->uc_kind_v;
@@ -1453,19 +1448,19 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
u64 map_offset, u64 map_size,
struct vm_reserved_va_node **pva_node)
{
struct device *dev = dev_from_vm(vm);
struct gk20a *g = vm->mm->g;
struct vm_reserved_va_node *va_node;
struct mapped_buffer_node *buffer;
u64 map_end = map_offset + map_size;
/* can wrap around with insane map_size; zero is disallowed too */
if (map_end <= map_offset) {
gk20a_warn(dev, "fixed offset mapping with invalid map_size");
nvgpu_warn(g, "fixed offset mapping with invalid map_size");
return -EINVAL;
}
if (map_offset & (vm->gmmu_page_sizes[bfr->pgsz_idx] - 1)) {
gk20a_err(dev, "map offset must be buffer page size aligned 0x%llx",
nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx",
map_offset);
return -EINVAL;
}
@@ -1474,13 +1469,13 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
* userspace-managed address spaces */
va_node = addr_to_reservation(vm, map_offset);
if (!va_node && !vm->userspace_managed) {
gk20a_warn(dev, "fixed offset mapping without space allocation");
nvgpu_warn(g, "fixed offset mapping without space allocation");
return -EINVAL;
}
/* Mapped area should fit inside va, if there's one */
if (va_node && map_end > va_node->vaddr_start + va_node->size) {
gk20a_warn(dev, "fixed offset mapping size overflows va node");
nvgpu_warn(g, "fixed offset mapping size overflows va node");
return -EINVAL;
}
@@ -1490,7 +1485,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
buffer = find_mapped_buffer_less_than_locked(
vm->mapped_buffers, map_offset + map_size);
if (buffer && buffer->addr + buffer->size > map_offset) {
gk20a_warn(dev, "overlapping buffer map requested");
nvgpu_warn(g, "overlapping buffer map requested");
return -EINVAL;
}
@@ -1517,7 +1512,6 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
{
int err = 0;
bool allocated = false;
struct device *d = dev_from_vm(vm);
struct gk20a *g = gk20a_from_vm(vm);
int ctag_granularity = g->ops.fb.compression_page_size(g);
u32 ctag_lines = DIV_ROUND_UP_ULL(size, ctag_granularity);
@@ -1527,7 +1521,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
map_offset = gk20a_vm_alloc_va(vm, size,
pgsz_idx);
if (!map_offset) {
gk20a_err(d, "failed to allocate va space");
nvgpu_err(g, "failed to allocate va space");
err = -ENOMEM;
goto fail_alloc;
}
@@ -1563,7 +1557,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
priv,
aperture);
if (err) {
gk20a_err(d, "failed to update ptes on map");
nvgpu_err(g, "failed to update ptes on map");
goto fail_validate;
}
@@ -1577,7 +1571,7 @@ fail_validate:
if (allocated)
gk20a_vm_free_va(vm, map_offset, size, pgsz_idx);
fail_alloc:
gk20a_err(d, "%s: failed with err=%d\n", __func__, err);
nvgpu_err(g, "%s: failed with err=%d\n", __func__, err);
return 0;
}
@@ -1596,8 +1590,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
if (va_allocated) {
err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx);
if (err) {
dev_err(dev_from_vm(vm),
"failed to free va");
nvgpu_err(g, "failed to free va");
return;
}
}
@@ -1614,8 +1607,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
sparse, 0,
APERTURE_INVALID); /* don't care for unmap */
if (err)
dev_err(dev_from_vm(vm),
"failed to update gmmu ptes on unmap");
nvgpu_err(g, "failed to update gmmu ptes on unmap");
/* flush l2 so any dirty lines are written out *now*.
* also as we could potentially be switching this buffer
@@ -1647,7 +1639,7 @@ static enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
} else if (WARN_ON(buf_owner == g && !g->mm.vidmem_is_vidmem)) {
/* Looks like our video memory, but this gpu doesn't support
* it. Warn about a bug and bail out */
gk20a_warn(dev_from_gk20a(g),
nvgpu_warn(g,
"dmabuf is our vidmem but we don't have local vidmem");
return APERTURE_INVALID;
} else if (buf_owner != g) {
@@ -1860,7 +1852,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
if (!g->mm.vidmem.cleared) {
err = gk20a_vidmem_clear_all(g);
if (err) {
gk20a_err(g->dev,
nvgpu_err(g,
"failed to clear whole vidmem");
goto err_kfree;
}
@@ -2037,7 +2029,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
if (user_mapped && vm->userspace_managed &&
!(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) {
gk20a_err(d,
nvgpu_err(g,
"%s: non-fixed-offset mapping not available on userspace managed address spaces",
__func__);
return -EFAULT;
@@ -2068,7 +2060,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
* track the difference between those two cases we have
* to fail the mapping when we run out of SMMU space.
*/
gk20a_warn(d, "oom allocating tracking buffer");
nvgpu_warn(g, "oom allocating tracking buffer");
goto clean_up;
}
@@ -2111,7 +2103,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
err = setup_buffer_kind_and_compression(vm, flags, &bfr, bfr.pgsz_idx);
if (unlikely(err)) {
gk20a_err(d, "failure setting up kind and compression");
nvgpu_err(g, "failure setting up kind and compression");
goto clean_up;
}
@@ -2204,7 +2196,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
/* TBD: check for multiple mapping of same buffer */
mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer));
if (!mapped_buffer) {
gk20a_warn(d, "oom allocating tracking buffer");
nvgpu_warn(g, "oom allocating tracking buffer");
goto clean_up;
}
mapped_buffer->dmabuf = dmabuf;
@@ -2230,7 +2222,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
err = insert_mapped_buffer(vm, mapped_buffer);
if (err) {
gk20a_err(d, "failed to insert into mapped buffer tree");
nvgpu_err(g, "failed to insert into mapped buffer tree");
goto clean_up;
}
inserted = true;
@@ -2274,7 +2266,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
u32 *flags)
{
struct mapped_buffer_node *mapped_buffer;
struct device *d = dev_from_vm(vm);
struct gk20a *g = vm->mm->g;
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
@@ -2283,7 +2275,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
if (!mapped_buffer || !mapped_buffer->user_mapped)
{
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva);
nvgpu_err(g, "%s: bad offset 0x%llx", __func__, mapping_gva);
return -EFAULT;
}
@@ -2316,19 +2308,18 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
{
struct mapped_buffer_node *mapped_buffer;
struct gk20a *g = gk20a_from_vm(vm);
struct device *d = dev_from_vm(vm);
const bool fixed_mapping =
(flags & NVGPU_AS_MAP_BUFFER_COMPBITS_FLAGS_FIXED_OFFSET) != 0;
if (vm->userspace_managed && !fixed_mapping) {
gk20a_err(d,
nvgpu_err(g,
"%s: non-fixed-offset mapping is not available on userspace managed address spaces",
__func__);
return -EFAULT;
}
if (fixed_mapping && !vm->userspace_managed) {
gk20a_err(d,
nvgpu_err(g,
"%s: fixed-offset mapping is available only on userspace managed address spaces",
__func__);
return -EFAULT;
@@ -2341,13 +2332,13 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
if (!mapped_buffer || !mapped_buffer->user_mapped) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva);
nvgpu_err(g, "%s: bad offset 0x%llx", __func__, mapping_gva);
return -EFAULT;
}
if (!mapped_buffer->ctags_mappable) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d, "%s: comptags not mappable, offset 0x%llx",
nvgpu_err(g, "%s: comptags not mappable, offset 0x%llx",
__func__, mapping_gva);
return -EFAULT;
}
@@ -2366,7 +2357,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
if (!mapped_buffer->ctag_map_win_size) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d,
nvgpu_err(g,
"%s: mapping 0x%llx does not have "
"mappable comptags",
__func__, mapping_gva);
@@ -2402,7 +2393,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
* before before the buffer is
* unmapped */
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d,
nvgpu_err(g,
"%s: comptags cannot be mapped into allocated space",
__func__);
return -EINVAL;
@@ -2429,7 +2420,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
if (!mapped_buffer->ctag_map_win_addr) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d,
nvgpu_err(g,
"%s: failed to map comptags for mapping 0x%llx",
__func__, mapping_gva);
return -ENOMEM;
@@ -2437,7 +2428,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
} else if (fixed_mapping && *compbits_win_gva &&
mapped_buffer->ctag_map_win_addr != *compbits_win_gva) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d,
nvgpu_err(g,
"%s: re-requesting comptags map into mismatching address. buffer offset 0x"
"%llx, existing comptag map at 0x%llx, requested remap 0x%llx",
__func__, mapping_gva,
@@ -2486,7 +2477,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
aperture);
nvgpu_mutex_release(&vm->update_gmmu_lock);
if (!vaddr) {
gk20a_err(dev_from_vm(vm), "failed to allocate va space");
nvgpu_err(g, "failed to allocate va space");
return 0;
}
@@ -2553,7 +2544,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
&gk20a_fence_out);
if (err) {
gk20a_err(g->dev,
nvgpu_err(g,
"Failed gk20a_ce_execute_ops[%d]", err);
return err;
}
@@ -2576,7 +2567,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
gk20a_fence_put(gk20a_last_fence);
if (err)
gk20a_err(g->dev,
nvgpu_err(g,
"fence wait failed for CE execute ops");
}
@@ -2692,7 +2683,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
int err = 0;
*sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
if (!(*sgt)) {
dev_err(d, "failed to allocate memory\n");
nvgpu_err(g, "failed to allocate memory\n");
err = -ENOMEM;
goto fail;
}
@@ -2700,7 +2691,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
cpuva, iova,
size);
if (err) {
dev_err(d, "failed to create sg table\n");
nvgpu_err(g, "failed to create sg table\n");
goto fail;
}
sg_dma_address((*sgt)->sgl) = iova;
@@ -2723,14 +2714,14 @@ int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt,
*sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
if (!(*sgt)) {
dev_err(d, "failed to allocate memory\n");
nvgpu_err(g, "failed to allocate memory\n");
err = -ENOMEM;
goto fail;
}
err = sg_alloc_table_from_pages(*sgt, pages,
DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL);
if (err) {
dev_err(d, "failed to allocate sg_table\n");
nvgpu_err(g, "failed to allocate sg_table\n");
goto fail;
}
sg_dma_address((*sgt)->sgl) = iova;
@@ -3049,7 +3040,7 @@ static int update_gmmu_level_locked(struct vm_gk20a *vm,
/* get cpu access to the ptes */
err = map_gmmu_pages(g, next_pte);
if (err) {
gk20a_err(dev_from_vm(vm),
nvgpu_err(g,
"couldn't map ptes for update as=%d",
vm_aspace_id(vm));
return err;
@@ -3113,7 +3104,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
err = map_gmmu_pages(g, &vm->pdb);
if (err) {
gk20a_err(dev_from_vm(vm),
nvgpu_err(g,
"couldn't map ptes for update as=%d",
vm_aspace_id(vm));
return err;
@@ -3284,14 +3275,14 @@ void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer,
void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset)
{
struct device *d = dev_from_vm(vm);
struct gk20a *g = vm->mm->g;
struct mapped_buffer_node *mapped_buffer;
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset);
if (!mapped_buffer) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
return;
}
@@ -4195,14 +4186,13 @@ void gk20a_deinit_vm(struct vm_gk20a *vm)
int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
{
struct device *dev = dev_from_gk20a(g);
int err;
gk20a_dbg_fn("");
err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
if (err) {
gk20a_err(dev, "%s: memory allocation failed\n", __func__);
nvgpu_err(g, "%s: memory allocation failed\n", __func__);
return err;
}
@@ -4462,8 +4452,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
} while (!nvgpu_timeout_expired(&timeout));
if (nvgpu_timeout_peek_expired(&timeout))
gk20a_warn(dev_from_gk20a(g),
"l2_system_invalidate too many retries");
nvgpu_warn(g, "l2_system_invalidate too many retries");
trace_gk20a_mm_l2_invalidate_done(g->name);
}

View File

@@ -27,6 +27,7 @@
#include <nvgpu/timers.h>
#include <nvgpu/kmem.h>
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "gr_gk20a.h"
@@ -314,7 +315,7 @@ static void printtrace(struct pmu_gk20a *pmu)
trace = (char *)tracebuffer;
trace1 = (u32 *)tracebuffer;
gk20a_err(dev_from_gk20a(g), "Dump pmutrace");
nvgpu_err(g, "Dump pmutrace");
for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
for (j = 0; j < 0x40; j++)
if (trace1[(i / 4) + j])
@@ -335,7 +336,7 @@ static void printtrace(struct pmu_gk20a *pmu)
m += k + 2;
}
scnprintf((buf + count), 0x40, "%s", (trace+i+20+m));
gk20a_err(dev_from_gk20a(g), "%s", buf);
nvgpu_err(g, "%s", buf);
}
nvgpu_kfree(g, tracebuffer);
}
@@ -2184,8 +2185,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
get_pmu_sequence_out_alloc_ptr_v0;
break;
default:
gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)),
"PMU code version not supported version: %d\n",
nvgpu_err(g, "PMU code version not supported version: %d\n",
pmu->desc->app_version);
err = -EINVAL;
goto fail_pmu_seq;
@@ -2217,14 +2217,12 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu,
u32 *dst_u32 = (u32*)dst;
if (size == 0) {
gk20a_err(dev_from_gk20a(g),
"size is zero");
nvgpu_err(g, "size is zero");
return;
}
if (src & 0x3) {
gk20a_err(dev_from_gk20a(g),
"src (0x%08x) not 4-byte aligned", src);
nvgpu_err(g, "src (0x%08x) not 4-byte aligned", src);
return;
}
@@ -2263,14 +2261,12 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
u32 *src_u32 = (u32*)src;
if (size == 0) {
gk20a_err(dev_from_gk20a(g),
"size is zero");
nvgpu_err(g, "size is zero");
return;
}
if (dst & 0x3) {
gk20a_err(dev_from_gk20a(g),
"dst (0x%08x) not 4-byte aligned", dst);
nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst);
return;
}
@@ -2300,8 +2296,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask;
size = ALIGN(size, 4);
if (data != ((dst + size) & addr_mask)) {
gk20a_err(dev_from_gk20a(g),
"copy failed. bytes written %d, expected %d",
nvgpu_err(g, "copy failed. bytes written %d, expected %d",
data - dst, size);
}
nvgpu_mutex_release(&pmu->pmu_copy_lock);
@@ -2432,7 +2427,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
} while (!nvgpu_timeout_expired(&timeout));
g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
nvgpu_err(g, "Falcon mem scrubbing timeout");
return -ETIMEDOUT;
} else {
@@ -2615,8 +2610,7 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu,
index = find_first_zero_bit(pmu->pmu_seq_tbl,
sizeof(pmu->pmu_seq_tbl));
if (index >= sizeof(pmu->pmu_seq_tbl)) {
gk20a_err(dev_from_gk20a(g),
"no free sequence available");
nvgpu_err(g, "no free sequence available");
nvgpu_mutex_release(&pmu->pmu_seq_lock);
return -EAGAIN;
}
@@ -2787,7 +2781,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token)
gk20a_readl(g, pwr_pmu_mutex_id_r()));
if (data == pwr_pmu_mutex_id_value_init_v() ||
data == pwr_pmu_mutex_id_value_not_avail_v()) {
gk20a_warn(dev_from_gk20a(g),
nvgpu_warn(g,
"fail to generate mutex token: val 0x%08x",
owner);
usleep_range(20, 40);
@@ -2844,8 +2838,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token)
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
if (*token != owner) {
gk20a_err(dev_from_gk20a(g),
"requester 0x%08x NOT match owner 0x%08x",
nvgpu_err(g, "requester 0x%08x NOT match owner 0x%08x",
*token, owner);
return -EINVAL;
}
@@ -2953,8 +2946,7 @@ static int pmu_queue_push(struct pmu_gk20a *pmu,
gk20a_dbg_fn("");
if (!queue->opened && queue->oflag == OFLAG_WRITE){
gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)),
"queue not opened for write");
nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for write");
return -EINVAL;
}
@@ -2972,8 +2964,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu,
*bytes_read = 0;
if (!queue->opened && queue->oflag == OFLAG_READ){
gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)),
"queue not opened for read");
nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for read");
return -EINVAL;
}
@@ -2989,7 +2980,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu,
used = queue->offset + queue->size - tail;
if (size > used) {
gk20a_warn(dev_from_gk20a(gk20a_from_pmu(pmu)),
nvgpu_warn(gk20a_from_pmu(pmu),
"queue size smaller than request read");
size = used;
}
@@ -3008,8 +2999,7 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu,
gk20a_dbg_fn("");
if (!queue->opened) {
gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)),
"queue not opened");
nvgpu_err(gk20a_from_pmu(pmu), "queue not opened");
return;
}
@@ -3132,7 +3122,6 @@ static int gk20a_prepare_ucode(struct gk20a *g)
{
struct pmu_gk20a *pmu = &g->pmu;
int err = 0;
struct device *d = dev_from_gk20a(g);
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = &mm->pmu.vm;
@@ -3141,7 +3130,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
pmu->fw = nvgpu_request_firmware(g, GK20A_PMU_UCODE_IMAGE, 0);
if (!pmu->fw) {
gk20a_err(d, "failed to load pmu ucode!!");
nvgpu_err(g, "failed to load pmu ucode!!");
return err;
}
@@ -3173,7 +3162,6 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
struct pmu_gk20a *pmu = &g->pmu;
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = &mm->pmu.vm;
struct device *d = dev_from_gk20a(g);
unsigned int i;
int err = 0;
u8 *ptr;
@@ -3228,7 +3216,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
&pmu->seq_buf);
if (err) {
gk20a_err(d, "failed to allocate memory\n");
nvgpu_err(g, "failed to allocate memory\n");
goto err_free_seq;
}
@@ -3245,7 +3233,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
&pmu->trace_buf);
if (err) {
gk20a_err(d, "failed to allocate pmu trace buffer\n");
nvgpu_err(g, "failed to allocate pmu trace buffer\n");
goto err_free_seq_buf;
}
@@ -3275,7 +3263,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
if (status != 0) {
gk20a_err(dev_from_gk20a(g), "PGENG cmd aborted");
nvgpu_err(g, "PGENG cmd aborted");
/* TBD: disable ELPG */
return;
}
@@ -3283,7 +3271,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
if ((!pmu->buf_loaded) &&
(pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer");
nvgpu_err(g, "failed to load PGENG buffer");
else {
schedule_work(&pmu->pg_init);
}
@@ -3571,7 +3559,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
gk20a_dbg_fn("");
if (status != 0) {
gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted");
nvgpu_err(g, "ELPG cmd aborted");
/* TBD: disable ELPG */
return;
}
@@ -3615,7 +3603,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
}
break;
default:
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"unsupported ELPG message : 0x%04x", elpg_msg->msg);
}
@@ -3630,7 +3618,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
gk20a_dbg_fn("");
if (status != 0) {
gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted");
nvgpu_err(g, "ELPG cmd aborted");
/* TBD: disable ELPG */
return;
}
@@ -3769,7 +3757,7 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu)
break;
#endif
default:
gk20a_err(g->dev, "no support for %x", ver);
nvgpu_err(g, "no support for %x", ver);
BUG();
}
@@ -3837,8 +3825,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
2 * sizeof(u16));
if (!pmu->sample_buffer) {
gk20a_err(dev_from_gk20a(g),
"failed to allocate perfmon sample buffer");
nvgpu_err(g, "failed to allocate perfmon sample buffer");
return -ENOMEM;
}
@@ -3893,8 +3880,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
pmu_copy_from_dmem(pmu, tail,
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
gk20a_err(dev_from_gk20a(g),
"expecting init msg");
nvgpu_err(g, "expecting init msg");
return -EINVAL;
}
@@ -3902,8 +3888,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
(u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
gk20a_err(dev_from_gk20a(g),
"expecting init msg");
nvgpu_err(g, "expecting init msg");
return -EINVAL;
}
@@ -3970,8 +3955,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
err = pmu_queue_open_read(pmu, queue);
if (err) {
gk20a_err(dev_from_gk20a(g),
"fail to open queue %d for read", queue->id);
nvgpu_err(g, "fail to open queue %d for read", queue->id);
*status = err;
return false;
}
@@ -3979,8 +3963,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
err = pmu_queue_pop(pmu, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if (err || bytes_read != PMU_MSG_HDR_SIZE) {
gk20a_err(dev_from_gk20a(g),
"fail to read msg from queue %d", queue->id);
nvgpu_err(g, "fail to read msg from queue %d", queue->id);
*status = err | -EINVAL;
goto clean_up;
}
@@ -3991,7 +3974,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
err = pmu_queue_pop(pmu, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if (err || bytes_read != PMU_MSG_HDR_SIZE) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"fail to read msg from queue %d", queue->id);
*status = err | -EINVAL;
goto clean_up;
@@ -3999,8 +3982,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
}
if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) {
gk20a_err(dev_from_gk20a(g),
"read invalid unit_id %d from queue %d",
nvgpu_err(g, "read invalid unit_id %d from queue %d",
msg->hdr.unit_id, queue->id);
*status = -EINVAL;
goto clean_up;
@@ -4011,7 +3993,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
err = pmu_queue_pop(pmu, queue, &msg->msg,
read_size, &bytes_read);
if (err || bytes_read != read_size) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"fail to read msg from queue %d", queue->id);
*status = err;
goto clean_up;
@@ -4020,8 +4002,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
err = pmu_queue_close(pmu, queue, true);
if (err) {
gk20a_err(dev_from_gk20a(g),
"fail to close queue %d", queue->id);
nvgpu_err(g, "fail to close queue %d", queue->id);
*status = err;
return false;
}
@@ -4031,8 +4012,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
clean_up:
err = pmu_queue_close(pmu, queue, false);
if (err)
gk20a_err(dev_from_gk20a(g),
"fail to close queue %d", queue->id);
nvgpu_err(g, "fail to close queue %d", queue->id);
return false;
}
@@ -4049,23 +4029,20 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
seq = &pmu->seq[msg->hdr.seq_id];
if (seq->state != PMU_SEQ_STATE_USED &&
seq->state != PMU_SEQ_STATE_CANCELLED) {
gk20a_err(dev_from_gk20a(g),
"msg for an unknown sequence %d", seq->id);
nvgpu_err(g, "msg for an unknown sequence %d", seq->id);
return -EINVAL;
}
if (msg->hdr.unit_id == PMU_UNIT_RC &&
msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) {
gk20a_err(dev_from_gk20a(g),
"unhandled cmd: seq %d", seq->id);
nvgpu_err(g, "unhandled cmd: seq %d", seq->id);
}
else if (seq->state != PMU_SEQ_STATE_CANCELLED) {
if (seq->msg) {
if (seq->msg->hdr.size >= msg->hdr.size) {
memcpy(seq->msg, msg, msg->hdr.size);
} else {
gk20a_err(dev_from_gk20a(g),
"sequence %d msg buffer too small",
nvgpu_err(g, "sequence %d msg buffer too small",
seq->id);
}
}
@@ -4158,7 +4135,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
&pmu->zbc_save_done, 1);
if (!pmu->zbc_save_done)
gk20a_err(dev_from_gk20a(g), "ZBC save timeout");
nvgpu_err(g, "ZBC save timeout");
}
static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu)
@@ -4451,118 +4428,118 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu)
struct gk20a *g = gk20a_from_pmu(pmu);
unsigned int i;
gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d",
nvgpu_err(g, "pwr_falcon_os_r : %d",
gk20a_readl(g, pwr_falcon_os_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_cpuctl_r : 0x%x",
nvgpu_err(g, "pwr_falcon_cpuctl_r : 0x%x",
gk20a_readl(g, pwr_falcon_cpuctl_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_idlestate_r : 0x%x",
nvgpu_err(g, "pwr_falcon_idlestate_r : 0x%x",
gk20a_readl(g, pwr_falcon_idlestate_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox0_r : 0x%x",
nvgpu_err(g, "pwr_falcon_mailbox0_r : 0x%x",
gk20a_readl(g, pwr_falcon_mailbox0_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox1_r : 0x%x",
nvgpu_err(g, "pwr_falcon_mailbox1_r : 0x%x",
gk20a_readl(g, pwr_falcon_mailbox1_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqstat_r : 0x%x",
nvgpu_err(g, "pwr_falcon_irqstat_r : 0x%x",
gk20a_readl(g, pwr_falcon_irqstat_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmode_r : 0x%x",
nvgpu_err(g, "pwr_falcon_irqmode_r : 0x%x",
gk20a_readl(g, pwr_falcon_irqmode_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmask_r : 0x%x",
nvgpu_err(g, "pwr_falcon_irqmask_r : 0x%x",
gk20a_readl(g, pwr_falcon_irqmask_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqdest_r : 0x%x",
nvgpu_err(g, "pwr_falcon_irqdest_r : 0x%x",
gk20a_readl(g, pwr_falcon_irqdest_r()));
for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++)
gk20a_err(dev_from_gk20a(g), "pwr_pmu_mailbox_r(%d) : 0x%x",
nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x",
i, gk20a_readl(g, pwr_pmu_mailbox_r(i)));
for (i = 0; i < pwr_pmu_debug__size_1_v(); i++)
gk20a_err(dev_from_gk20a(g), "pwr_pmu_debug_r(%d) : 0x%x",
nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x",
i, gk20a_readl(g, pwr_pmu_debug_r(i)));
for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) {
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rstat_f() |
pwr_pmu_falcon_icd_cmd_idx_f(i));
gk20a_err(dev_from_gk20a(g), "pmu_rstat (%d) : 0x%x",
nvgpu_err(g, "pmu_rstat (%d) : 0x%x",
i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
}
i = gk20a_readl(g, pwr_pmu_bar0_error_status_r());
gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_error_status_r : 0x%x", i);
nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i);
if (i != 0) {
gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_addr_r : 0x%x",
nvgpu_err(g, "pwr_pmu_bar0_addr_r : 0x%x",
gk20a_readl(g, pwr_pmu_bar0_addr_r()));
gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_data_r : 0x%x",
nvgpu_err(g, "pwr_pmu_bar0_data_r : 0x%x",
gk20a_readl(g, pwr_pmu_bar0_data_r()));
gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_timeout_r : 0x%x",
nvgpu_err(g, "pwr_pmu_bar0_timeout_r : 0x%x",
gk20a_readl(g, pwr_pmu_bar0_timeout_r()));
gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_ctl_r : 0x%x",
nvgpu_err(g, "pwr_pmu_bar0_ctl_r : 0x%x",
gk20a_readl(g, pwr_pmu_bar0_ctl_r()));
}
i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r());
gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
nvgpu_err(g, "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
i = gk20a_readl(g, pwr_falcon_exterrstat_r());
gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterrstat_r : 0x%x", i);
nvgpu_err(g, "pwr_falcon_exterrstat_r : 0x%x", i);
if (pwr_falcon_exterrstat_valid_v(i) ==
pwr_falcon_exterrstat_valid_true_v()) {
gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterraddr_r : 0x%x",
nvgpu_err(g, "pwr_falcon_exterraddr_r : 0x%x",
gk20a_readl(g, pwr_falcon_exterraddr_r()));
gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
nvgpu_err(g, "pmc_enable : 0x%x",
gk20a_readl(g, mc_enable_r()));
}
gk20a_err(dev_from_gk20a(g), "pwr_falcon_engctl_r : 0x%x",
nvgpu_err(g, "pwr_falcon_engctl_r : 0x%x",
gk20a_readl(g, pwr_falcon_engctl_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_curctx_r : 0x%x",
nvgpu_err(g, "pwr_falcon_curctx_r : 0x%x",
gk20a_readl(g, pwr_falcon_curctx_r()));
gk20a_err(dev_from_gk20a(g), "pwr_falcon_nxtctx_r : 0x%x",
nvgpu_err(g, "pwr_falcon_nxtctx_r : 0x%x",
gk20a_readl(g, pwr_falcon_nxtctx_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_IMB : 0x%x",
nvgpu_err(g, "PMU_FALCON_REG_IMB : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_DMB : 0x%x",
nvgpu_err(g, "PMU_FALCON_REG_DMB : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CSW : 0x%x",
nvgpu_err(g, "PMU_FALCON_REG_CSW : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CTX : 0x%x",
nvgpu_err(g, "PMU_FALCON_REG_CTX : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_EXCI : 0x%x",
nvgpu_err(g, "PMU_FALCON_REG_EXCI : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
for (i = 0; i < 4; i++) {
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC));
gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_PC : 0x%x",
nvgpu_err(g, "PMU_FALCON_REG_PC : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP));
gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_SP : 0x%x",
nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
}
gk20a_err(dev_from_gk20a(g), "elpg stat: %d\n",
nvgpu_err(g, "elpg stat: %d\n",
pmu->elpg_stat);
/* PMU may crash due to FECS crash. Dump FECS status */
@@ -4600,8 +4577,7 @@ void gk20a_pmu_isr(struct gk20a *g)
}
if (intr & pwr_falcon_irqstat_halt_true_f()) {
gk20a_err(dev_from_gk20a(g),
"pmu halt intr not implemented");
nvgpu_err(g, "pmu halt intr not implemented");
pmu_dump_falcon_stats(pmu);
if (gk20a_readl(g, pwr_pmu_mailbox_r
(PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) ==
@@ -4610,7 +4586,7 @@ void gk20a_pmu_isr(struct gk20a *g)
g->ops.pmu.dump_secure_fuses(g);
}
if (intr & pwr_falcon_irqstat_exterr_true_f()) {
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"pmu exterr intr not implemented. Clearing interrupt.");
pmu_dump_falcon_stats(pmu);
@@ -4692,7 +4668,7 @@ static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
return true;
invalid_cmd:
gk20a_err(dev_from_gk20a(g), "invalid pmu cmd :\n"
nvgpu_err(g, "invalid pmu cmd :\n"
"queue_id=%d,\n"
"cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n"
"payload in=%p, in_size=%d, in_offset=%d,\n"
@@ -4736,8 +4712,7 @@ static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
clean_up:
if (err)
gk20a_err(dev_from_gk20a(g),
"fail to write cmd to queue %d", queue_id);
nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
else
gk20a_dbg_fn("done");
@@ -4762,7 +4737,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
err = nvgpu_dma_alloc_map_vid(vm, size, mem);
if (err) {
gk20a_err(g->dev, "memory allocation failed");
nvgpu_err(g, "memory allocation failed");
return -ENOMEM;
}
@@ -4778,7 +4753,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
err = nvgpu_dma_alloc_map_sys(vm, size, mem);
if (err) {
gk20a_err(g->dev, "failed to allocate memory\n");
nvgpu_err(g, "failed to allocate memory\n");
return -ENOMEM;
}
@@ -4806,14 +4781,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) {
if (!cmd)
gk20a_warn(dev_from_gk20a(g),
"%s(): PMU cmd buffer is NULL", __func__);
nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
else if (!seq_desc)
gk20a_warn(dev_from_gk20a(g),
"%s(): Seq descriptor is NULL", __func__);
nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
else
gk20a_warn(dev_from_gk20a(g),
"%s(): PMU is not ready", __func__);
nvgpu_warn(g, "%s(): PMU is not ready", __func__);
WARN_ON(1);
return -EINVAL;
@@ -5044,7 +5016,7 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
/* something is not right if we end up in following code path */
if (unlikely(pmu->elpg_refcnt > 1)) {
gk20a_warn(dev_from_gk20a(g),
nvgpu_warn(g,
"%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
__func__, pmu->elpg_refcnt);
WARN_ON(1);
@@ -5102,7 +5074,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
pmu->elpg_refcnt--;
if (pmu->elpg_refcnt > 0) {
gk20a_warn(dev_from_gk20a(g),
nvgpu_warn(g,
"%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
__func__, pmu->elpg_refcnt);
WARN_ON(1);
@@ -5123,8 +5095,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
&pmu->elpg_stat, PMU_ELPG_STAT_ON);
if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
gk20a_err(dev_from_gk20a(g),
"ELPG_ALLOW_ACK failed, elpg_stat=%d",
nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
pmu->elpg_stat);
pmu_dump_elpg_stats(pmu);
pmu_dump_falcon_stats(pmu);
@@ -5175,8 +5146,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
gk20a_get_gr_idle_timeout(g),
ptr, PMU_ELPG_STAT_OFF);
if (*ptr != PMU_ELPG_STAT_OFF) {
gk20a_err(dev_from_gk20a(g),
"ELPG_DISALLOW_ACK failed");
nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
pmu_dump_elpg_stats(pmu);
pmu_dump_falcon_stats(pmu);
ret = -EBUSY;

View File

@@ -20,6 +20,8 @@
#include "gk20a.h"
#include <nvgpu/log.h>
#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
#include <nvgpu/hw/gk20a/hw_pri_ringmaster_gk20a.h>
#include <nvgpu/hw/gk20a/hw_pri_ringstation_sys_gk20a.h>
@@ -121,6 +123,5 @@ void gk20a_priv_ring_isr(struct gk20a *g)
} while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && --retry);
if (retry <= 0)
gk20a_warn(dev_from_gk20a(g),
"priv ringmaster cmd ack too many retries");
nvgpu_warn(g, "priv ringmaster cmd ack too many retries");
}

View File

@@ -25,7 +25,7 @@
#include "dbg_gpu_gk20a.h"
#include "regops_gk20a.h"
#include <nvgpu/log.h>
static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
{
@@ -408,7 +408,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
ops, num_ops);
if (!ok) {
dev_err(dbg_s->dev, "invalid op(s)");
nvgpu_err(g, "invalid op(s)");
err = -EINVAL;
/* each op has its own err/status */
goto clean_up;
@@ -527,7 +527,6 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
break;
default:
op->status |= REGOP(STATUS_UNSUPPORTED_OP);
/*gk20a_err(dbg_s->dev, "Invalid regops op %d!", op->op);*/
err = -EINVAL;
break;
}
@@ -546,7 +545,6 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
*/
default:
op->status |= REGOP(STATUS_INVALID_TYPE);
/*gk20a_err(dbg_s->dev, "Invalid regops type %d!", op->type);*/
err = -EINVAL;
break;
}
@@ -593,7 +591,7 @@ static bool check_whitelists(struct dbg_session_gk20a *dbg_s,
} else if (op->type == REGOP(TYPE_GR_CTX)) {
/* it's a context-relative op */
if (!ch) {
gk20a_err(dbg_s->dev, "can't perform ctx regop unless bound");
nvgpu_err(dbg_s->g, "can't perform ctx regop unless bound");
op->status = REGOP(STATUS_UNSUPPORTED_OP);
return valid;
}
@@ -637,7 +635,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
/* support only 24-bit 4-byte aligned offsets */
if (offset & 0xFF000003) {
gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset);
op->status |= REGOP(STATUS_INVALID_OFFSET);
return -EINVAL;
}
@@ -675,7 +673,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
}
if (!valid) {
gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset);
op->status |= REGOP(STATUS_INVALID_OFFSET);
return -EINVAL;
}

View File

@@ -26,6 +26,7 @@
#include <uapi/linux/nvgpu.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include "ctxsw_trace_gk20a.h"
#include "gk20a.h"
@@ -330,8 +331,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
nvgpu_mutex_acquire(&sched->status_lock);
if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
gk20a_warn(dev_from_gk20a(g),
"tsgid=%d already referenced", tsgid);
nvgpu_warn(g, "tsgid=%d already referenced", tsgid);
/* unlock status_lock as gk20a_tsg_release locks it */
nvgpu_mutex_release(&sched->status_lock);
kref_put(&tsg->refcount, gk20a_tsg_release);
@@ -363,8 +363,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
nvgpu_mutex_acquire(&sched->status_lock);
if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
nvgpu_mutex_release(&sched->status_lock);
gk20a_warn(dev_from_gk20a(g),
"tsgid=%d not previously referenced", tsgid);
nvgpu_warn(g, "tsgid=%d not previously referenced", tsgid);
return -ENXIO;
}
NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap);

View File

@@ -20,6 +20,8 @@
#include "gk20a.h"
#include <nvgpu/log.h>
#include <nvgpu/hw/gk20a/hw_sim_gk20a.h>
static inline void sim_writel(struct gk20a *g, u32 r, u32 v)
@@ -65,7 +67,7 @@ static void gk20a_remove_sim_support(struct sim_gk20a *s)
gk20a_free_sim_support(g);
}
static int alloc_and_kmap_iopage(struct device *d,
static int alloc_and_kmap_iopage(struct gk20a *g,
void **kvaddr,
u64 *phys,
struct page **page)
@@ -75,14 +77,14 @@ static int alloc_and_kmap_iopage(struct device *d,
if (!*page) {
err = -ENOMEM;
dev_err(d, "couldn't allocate io page\n");
nvgpu_err(g, "couldn't allocate io page\n");
goto fail;
}
*kvaddr = kmap(*page);
if (!*kvaddr) {
err = -ENOMEM;
dev_err(d, "couldn't kmap io page\n");
nvgpu_err(g, "couldn't kmap io page\n");
goto fail;
}
*phys = page_to_phys(*page);
@@ -105,27 +107,27 @@ int gk20a_init_sim_support(struct platform_device *pdev)
g->sim.regs = gk20a_ioremap_resource(pdev, GK20A_SIM_IORESOURCE_MEM,
&g->sim.reg_mem);
if (IS_ERR(g->sim.regs)) {
dev_err(dev, "failed to remap gk20a sim regs\n");
nvgpu_err(g, "failed to remap gk20a sim regs\n");
err = PTR_ERR(g->sim.regs);
goto fail;
}
/* allocate sim event/msg buffers */
err = alloc_and_kmap_iopage(dev, &g->sim.send_bfr.kvaddr,
err = alloc_and_kmap_iopage(g, &g->sim.send_bfr.kvaddr,
&g->sim.send_bfr.phys,
&g->sim.send_bfr.page);
err = err || alloc_and_kmap_iopage(dev, &g->sim.recv_bfr.kvaddr,
err = err || alloc_and_kmap_iopage(g, &g->sim.recv_bfr.kvaddr,
&g->sim.recv_bfr.phys,
&g->sim.recv_bfr.page);
err = err || alloc_and_kmap_iopage(dev, &g->sim.msg_bfr.kvaddr,
err = err || alloc_and_kmap_iopage(g, &g->sim.msg_bfr.kvaddr,
&g->sim.msg_bfr.phys,
&g->sim.msg_bfr.page);
if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr &&
g->sim.msg_bfr.kvaddr)) {
dev_err(dev, "couldn't allocate all sim buffers\n");
nvgpu_err(g, "couldn't allocate all sim buffers\n");
goto fail;
}
@@ -275,7 +277,7 @@ static int rpc_recv_poll(struct gk20a *g)
(u64)recv_phys_addr_lo << PAGE_SHIFT;
if (recv_phys_addr != g->sim.msg_bfr.phys) {
dev_err(dev_from_gk20a(g), "%s Error in RPC reply\n",
nvgpu_err(g, "%s Error in RPC reply\n",
__func__);
return -1;
}
@@ -302,21 +304,21 @@ static int issue_rpc_and_wait(struct gk20a *g)
err = rpc_send_message(g);
if (err) {
dev_err(dev_from_gk20a(g), "%s failed rpc_send_message\n",
nvgpu_err(g, "%s failed rpc_send_message\n",
__func__);
return err;
}
err = rpc_recv_poll(g);
if (err) {
dev_err(dev_from_gk20a(g), "%s failed rpc_recv_poll\n",
nvgpu_err(g, "%s failed rpc_recv_poll\n",
__func__);
return err;
}
/* Now check if RPC really succeeded */
if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
dev_err(dev_from_gk20a(g), "%s received failed status!\n",
nvgpu_err(g, "%s received failed status!\n",
__func__);
return -(*sim_msg_hdr(g, sim_msg_result_r()));
}

View File

@@ -15,6 +15,7 @@
*/
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include "gk20a.h"
#include "tsg_gk20a.h"
@@ -93,7 +94,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
if (tsg->runlist_id == FIFO_INVAL_TSG_ID)
tsg->runlist_id = ch->runlist_id;
else if (tsg->runlist_id != ch->runlist_id) {
gk20a_err(dev_from_gk20a(tsg->g),
nvgpu_err(tsg->g,
"Error: TSG channel should be share same runlist ch[%d] tsg[%d]\n",
ch->runlist_id, tsg->runlist_id);
return -EINVAL;
@@ -260,8 +261,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g)
if (g->ops.fifo.tsg_open) {
err = g->ops.fifo.tsg_open(tsg);
if (err) {
gk20a_err(dev_from_gk20a(g),
"tsg %d fifo open failed %d",
nvgpu_err(g, "tsg %d fifo open failed %d",
tsg->tsgid, err);
goto clean_up;
}