gpu: nvgpu: Use device instead of platform_device

Use struct device instead of struct platform_device wherever
possible. This allows adding other bus types later.

Change-Id: I1657287a68d85a542cdbdd8a00d1902c3d6e00ed
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1120466
This commit is contained in:
Terje Bergstrom
2016-03-29 16:02:34 -07:00
parent 2382a8433f
commit e8bac374c0
38 changed files with 692 additions and 739 deletions

View File

@@ -1,7 +1,7 @@
/*
* Color decompression engine support
*
* Copyright (c) 2014-2015, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -227,14 +227,14 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
/* check that the file can hold the buf */
if (buf->data_byte_offset != 0 &&
buf->data_byte_offset + buf->num_bytes > img->size) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid data section. buffer idx = %d",
gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d",
cde_ctx->num_bufs);
return -EINVAL;
}
/* check that we have enough buf elems available */
if (cde_ctx->num_bufs >= MAX_CDE_BUFS) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid data section. buffer idx = %d",
gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d",
cde_ctx->num_bufs);
return -ENOMEM;
}
@@ -243,7 +243,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
mem = cde_ctx->mem + cde_ctx->num_bufs;
err = gk20a_gmmu_alloc_map(cde_ctx->vm, buf->num_bytes, mem);
if (err) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: could not allocate device memory. buffer idx = %d",
gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d",
cde_ctx->num_bufs);
return -ENOMEM;
}
@@ -281,7 +281,7 @@ static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target,
current_value = (u64)(current_value >> 32) |
(u64)(current_value << 32);
} else {
gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown type. type=%d",
gk20a_warn(cde_ctx->dev, "cde: unknown type. type=%d",
type);
return -EINVAL;
}
@@ -315,7 +315,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
if (replace->target_buf >= cde_ctx->num_bufs ||
replace->source_buf >= cde_ctx->num_bufs) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d",
gk20a_warn(cde_ctx->dev, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d",
replace->target_buf, replace->source_buf,
cde_ctx->num_bufs);
return -EINVAL;
@@ -327,7 +327,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
if (source_mem->size < (replace->source_byte_offset + 3) ||
target_mem->size < (replace->target_byte_offset + 3)) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu",
gk20a_warn(cde_ctx->dev, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu",
replace->target_byte_offset,
replace->source_byte_offset,
source_mem->size,
@@ -344,7 +344,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
replace->shift, replace->mask,
vaddr);
if (err) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld",
gk20a_warn(cde_ctx->dev, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld",
err, replace->target_buf,
replace->target_byte_offset,
replace->source_buf,
@@ -431,7 +431,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
param->shift, param->mask, new_data);
if (err) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu",
gk20a_warn(cde_ctx->dev, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu",
err, i, param->id, param->target_buf,
param->target_byte_offset, new_data);
return err;
@@ -448,7 +448,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
struct mem_desc *target_mem;
if (param->target_buf >= cde_ctx->num_bufs) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u",
gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u",
cde_ctx->num_params, param->target_buf,
cde_ctx->num_bufs);
return -EINVAL;
@@ -456,7 +456,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
target_mem = cde_ctx->mem + param->target_buf;
if (target_mem->size< (param->target_byte_offset + 3)) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu",
gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu",
cde_ctx->num_params, param->target_byte_offset,
target_mem->size);
return -EINVAL;
@@ -464,14 +464,14 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
/* does this parameter fit into our parameter structure */
if (cde_ctx->num_params >= MAX_CDE_PARAMS) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: no room for new parameters param idx = %d",
gk20a_warn(cde_ctx->dev, "cde: no room for new parameters param idx = %d",
cde_ctx->num_params);
return -ENOMEM;
}
/* is the given id valid? */
if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u",
gk20a_warn(cde_ctx->dev, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u",
param->id, cde_ctx->num_params,
NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS);
return -EINVAL;
@@ -498,7 +498,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx);
if (err) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: failed to allocate ctx. err=%d",
gk20a_warn(cde_ctx->dev, "cde: failed to allocate ctx. err=%d",
err);
return err;
}
@@ -524,7 +524,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
gpfifo = &cde_ctx->convert_cmd;
num_entries = &cde_ctx->convert_cmd_num_entries;
} else {
gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown command. op=%u",
gk20a_warn(cde_ctx->dev, "cde: unknown command. op=%u",
op);
return -EINVAL;
}
@@ -533,7 +533,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
*gpfifo = kzalloc(sizeof(struct nvgpu_gpfifo) * num_elems,
GFP_KERNEL);
if (!*gpfifo) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: could not allocate memory for gpfifo entries");
gk20a_warn(cde_ctx->dev, "cde: could not allocate memory for gpfifo entries");
return -ENOMEM;
}
@@ -543,7 +543,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
/* validate the current entry */
if (cmd_elem->target_buf >= cde_ctx->num_bufs) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: target buffer is not available (target=%u, num_bufs=%u)",
gk20a_warn(cde_ctx->dev, "cde: target buffer is not available (target=%u, num_bufs=%u)",
cmd_elem->target_buf, cde_ctx->num_bufs);
return -EINVAL;
}
@@ -551,7 +551,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
target_mem = cde_ctx->mem + cmd_elem->target_buf;
if (target_mem->size<
cmd_elem->target_byte_offset + cmd_elem->num_bytes) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)",
gk20a_warn(cde_ctx->dev, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)",
target_mem->size,
cmd_elem->target_byte_offset,
cmd_elem->num_bytes);
@@ -585,7 +585,7 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
/* allocate buffer that has space for both */
combined_cmd = kzalloc(total_bytes, GFP_KERNEL);
if (!combined_cmd) {
gk20a_warn(&cde_ctx->pdev->dev,
gk20a_warn(cde_ctx->dev,
"cde: could not allocate memory for gpfifo entries");
return -ENOMEM;
}
@@ -618,7 +618,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
min_size += 2 * sizeof(u32);
if (img->size < min_size) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid image header");
gk20a_warn(cde_ctx->dev, "cde: invalid image header");
return -EINVAL;
}
@@ -627,7 +627,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
min_size += num_of_elems * sizeof(*elem);
if (img->size < min_size) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: bad image");
gk20a_warn(cde_ctx->dev, "cde: bad image");
return -EINVAL;
}
@@ -664,7 +664,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
MAX_CDE_ARRAY_ENTRIES*sizeof(u32));
break;
default:
gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown header element");
gk20a_warn(cde_ctx->dev, "cde: unknown header element");
err = -EINVAL;
}
@@ -675,13 +675,13 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
}
if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: convert command not defined");
gk20a_warn(cde_ctx->dev, "cde: convert command not defined");
err = -EINVAL;
goto deinit_image;
}
if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: convert command not defined");
gk20a_warn(cde_ctx->dev, "cde: convert command not defined");
err = -EINVAL;
goto deinit_image;
}
@@ -714,12 +714,12 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
gpfifo = cde_ctx->convert_cmd;
num_entries = cde_ctx->convert_cmd_num_entries;
} else {
gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown buffer");
gk20a_warn(cde_ctx->dev, "cde: unknown buffer");
return -EINVAL;
}
if (gpfifo == NULL || num_entries == 0) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: buffer not available");
gk20a_warn(cde_ctx->dev, "cde: buffer not available");
return -ENOSYS;
}
@@ -757,7 +757,7 @@ __releases(&cde_app->mutex)
struct gk20a_cde_ctx *cde_ctx = container_of(delay_work,
struct gk20a_cde_ctx, ctx_deleter_work);
struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
struct platform_device *pdev = cde_ctx->pdev;
struct device *dev = cde_ctx->dev;
int err;
/* someone has just taken it? engine deletion started? */
@@ -767,11 +767,11 @@ __releases(&cde_app->mutex)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx,
"cde: attempting to delete temporary %p", cde_ctx);
err = gk20a_busy(pdev);
err = gk20a_busy(dev);
if (err) {
/* this context would find new use anyway later, so not freeing
* here does not leak anything */
gk20a_warn(&pdev->dev, "cde: cannot set gk20a on, postponing"
gk20a_warn(dev, "cde: cannot set gk20a on, postponing"
" temp ctx deletion");
return;
}
@@ -795,7 +795,7 @@ __releases(&cde_app->mutex)
out:
mutex_unlock(&cde_app->mutex);
gk20a_idle(pdev);
gk20a_idle(dev);
}
static struct gk20a_cde_ctx *gk20a_cde_do_get_context(struct gk20a *g)
@@ -839,7 +839,7 @@ __must_hold(&cde_app->mutex)
cde_ctx = gk20a_cde_allocate_context(g);
if (IS_ERR(cde_ctx)) {
gk20a_warn(&g->dev->dev, "cde: cannot allocate context: %ld",
gk20a_warn(g->dev, "cde: cannot allocate context: %ld",
PTR_ERR(cde_ctx));
return cde_ctx;
}
@@ -888,7 +888,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g)
return ERR_PTR(-ENOMEM);
cde_ctx->g = g;
cde_ctx->pdev = g->dev;
cde_ctx->dev = g->dev;
ret = gk20a_cde_load(cde_ctx);
if (ret) {
@@ -951,7 +951,7 @@ __releases(&cde_app->mutex)
/* First, map the buffer to local va */
/* ensure that the compbits buffer has drvdata */
err = gk20a_dmabuf_alloc_drvdata(compbits_scatter_buf, &g->dev->dev);
err = gk20a_dmabuf_alloc_drvdata(compbits_scatter_buf, g->dev);
if (err)
goto exit_unlock;
@@ -1007,7 +1007,7 @@ __releases(&cde_app->mutex)
surface = dma_buf_vmap(compbits_scatter_buf);
if (IS_ERR(surface)) {
gk20a_warn(&g->dev->dev,
gk20a_warn(g->dev,
"dma_buf_vmap failed");
err = -EINVAL;
goto exit_unlock;
@@ -1017,9 +1017,9 @@ __releases(&cde_app->mutex)
gk20a_dbg(gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p",
surface, scatter_buffer);
sgt = gk20a_mm_pin(&g->dev->dev, compbits_scatter_buf);
sgt = gk20a_mm_pin(g->dev, compbits_scatter_buf);
if (IS_ERR(sgt)) {
gk20a_warn(&g->dev->dev,
gk20a_warn(g->dev,
"mm_pin failed");
err = -EINVAL;
goto exit_unlock;
@@ -1029,7 +1029,7 @@ __releases(&cde_app->mutex)
scatterbuffer_size);
WARN_ON(err);
gk20a_mm_unpin(&g->dev->dev, compbits_scatter_buf,
gk20a_mm_unpin(g->dev, compbits_scatter_buf,
sgt);
if (err)
goto exit_unlock;
@@ -1041,7 +1041,7 @@ __releases(&cde_app->mutex)
}
/* store source buffer compression tags */
gk20a_get_comptags(&g->dev->dev, compbits_scatter_buf, &comptags);
gk20a_get_comptags(g->dev, compbits_scatter_buf, &comptags);
cde_ctx->surf_param_offset = comptags.offset;
cde_ctx->surf_param_lines = comptags.lines;
@@ -1067,7 +1067,7 @@ __releases(&cde_app->mutex)
int id = param->id - NUM_RESERVED_PARAMS;
if (id < 0 || id >= MAX_CDE_USER_PARAMS) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown user parameter");
gk20a_warn(cde_ctx->dev, "cde: unknown user parameter");
err = -EINVAL;
goto exit_unlock;
}
@@ -1077,7 +1077,7 @@ __releases(&cde_app->mutex)
/* patch data */
err = gk20a_cde_patch_params(cde_ctx);
if (err) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: failed to patch parameters");
gk20a_warn(cde_ctx->dev, "cde: failed to patch parameters");
goto exit_unlock;
}
@@ -1140,19 +1140,19 @@ __releases(&cde_app->mutex)
if (ch->has_timedout) {
if (cde_ctx->is_temporary) {
gk20a_warn(&cde_ctx->pdev->dev,
gk20a_warn(cde_ctx->dev,
"cde: channel had timed out"
" (temporary channel)");
/* going to be deleted anyway */
} else {
gk20a_warn(&cde_ctx->pdev->dev,
gk20a_warn(cde_ctx->dev,
"cde: channel had timed out"
", reloading");
/* mark it to be deleted, replace with a new one */
mutex_lock(&cde_app->mutex);
cde_ctx->is_temporary = true;
if (gk20a_cde_create_context(g)) {
gk20a_err(&cde_ctx->pdev->dev,
gk20a_err(cde_ctx->dev,
"cde: can't replace context");
}
mutex_unlock(&cde_app->mutex);
@@ -1181,14 +1181,14 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
img = gk20a_request_firmware(g, "gpu2cde.bin");
if (!img) {
dev_err(&cde_ctx->pdev->dev, "cde: could not fetch the firmware");
dev_err(cde_ctx->dev, "cde: could not fetch the firmware");
return -ENOSYS;
}
ch = gk20a_open_new_channel_with_cb(g, gk20a_cde_finished_ctx_cb,
cde_ctx);
if (!ch) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: gk20a channel not available");
gk20a_warn(cde_ctx->dev, "cde: gk20a channel not available");
err = -ENOMEM;
goto err_get_gk20a_channel;
}
@@ -1198,7 +1198,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
ch->vm = &g->mm.cde.vm;
err = channel_gk20a_commit_va(ch);
if (err) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: could not bind vm");
gk20a_warn(cde_ctx->dev, "cde: could not bind vm");
goto err_commit_va;
}
@@ -1206,7 +1206,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
err = gk20a_alloc_channel_gpfifo(ch,
&(struct nvgpu_alloc_gpfifo_args){1024, 0});
if (err) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: unable to allocate gpfifo");
gk20a_warn(cde_ctx->dev, "cde: unable to allocate gpfifo");
goto err_alloc_gpfifo;
}
@@ -1218,7 +1218,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
false);
if (!vaddr) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: cannot map compression bit backing store");
gk20a_warn(cde_ctx->dev, "cde: cannot map compression bit backing store");
err = -ENOMEM;
goto err_map_backingstore;
}
@@ -1231,7 +1231,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
/* initialise the firmware */
err = gk20a_init_cde_img(cde_ctx, img);
if (err) {
gk20a_warn(&cde_ctx->pdev->dev, "cde: image initialisation failed");
gk20a_warn(cde_ctx->dev, "cde: image initialisation failed");
goto err_init_cde_img;
}
@@ -1248,7 +1248,7 @@ err_alloc_gpfifo:
err_commit_va:
err_get_gk20a_channel:
release_firmware(img);
dev_err(&cde_ctx->pdev->dev, "cde: couldn't initialise buffer converter: %d",
dev_err(cde_ctx->dev, "cde: couldn't initialise buffer converter: %d",
err);
return err;
}
@@ -1386,17 +1386,17 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
g->ops.cde.get_program_numbers(g, block_height_log2,
&hprog, &vprog);
else {
gk20a_warn(&g->dev->dev, "cde: chip not supported");
gk20a_warn(g->dev, "cde: chip not supported");
return -ENOSYS;
}
if (hprog < 0 || vprog < 0) {
gk20a_warn(&g->dev->dev, "cde: could not determine programs");
gk20a_warn(g->dev, "cde: could not determine programs");
return -ENOSYS;
}
if (xtiles > 8192 / 8 || ytiles > 8192 / 8)
gk20a_warn(&g->dev->dev, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
gk20a_warn(g->dev, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
xtiles, ytiles);
gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
@@ -1645,9 +1645,9 @@ static const struct file_operations gk20a_cde_reload_fops = {
.write = gk20a_cde_reload_write,
};
void gk20a_cde_debugfs_init(struct platform_device *dev)
void gk20a_cde_debugfs_init(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct gk20a *g = get_gk20a(dev);
debugfs_create_u32("cde_parameter", S_IWUSR | S_IRUGO,

View File

@@ -1,7 +1,7 @@
/*
* GK20A color decompression engine support
*
* Copyright (c) 2014-2015, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -215,7 +215,7 @@ struct gk20a_cde_param {
struct gk20a_cde_ctx {
struct gk20a *g;
struct platform_device *pdev;
struct device *dev;
/* channel related data */
struct channel_gk20a *ch;
@@ -288,7 +288,7 @@ int gk20a_cde_convert(struct gk20a *g,
struct nvgpu_fence *fence,
u32 __flags, struct gk20a_cde_param *params,
int num_params, struct gk20a_fence **fence_out);
void gk20a_cde_debugfs_init(struct platform_device *dev);
void gk20a_cde_debugfs_init(struct device *dev);
int gk20a_prepare_compressible_read(
struct gk20a *g, u32 buffer_fd, u32 request, u64 offset,

View File

@@ -74,7 +74,9 @@ static void gk20a_channel_clean_up_jobs(struct work_struct *work);
static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
{
struct channel_gk20a *ch = NULL;
struct gk20a_platform *platform = gk20a_get_platform(f->g->dev);
struct gk20a_platform *platform;
platform = gk20a_get_platform(f->g->dev);
mutex_lock(&f->free_chs_mutex);
if (!list_empty(&f->free_chs)) {
@@ -156,7 +158,7 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
int timeslice_period,
int *__timeslice_timeout, int *__timeslice_scale)
{
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
int value = scale_ptimer(timeslice_period,
ptimer_scalingfactor10x(platform->ptimer_src_freq));
int shift = 0;
@@ -1068,7 +1070,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
if (!ch)
return 0;
trace_gk20a_channel_release(dev_name(&g->dev->dev));
trace_gk20a_channel_release(dev_name(g->dev));
err = gk20a_busy(g->dev);
if (err) {
@@ -1189,7 +1191,9 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp)
int err;
struct channel_gk20a *ch;
trace_gk20a_channel_open(dev_name(&g->dev->dev));
gk20a_dbg_fn("");
trace_gk20a_channel_open(dev_name(g->dev));
err = gk20a_busy(g->dev);
if (err) {
@@ -1235,7 +1239,7 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
fd = err;
name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
dev_name(&g->dev->dev), fd);
dev_name(g->dev), fd);
if (!name) {
err = -ENOMEM;
goto clean_up;
@@ -1562,7 +1566,7 @@ static void trace_write_pushbuffer(struct channel_gk20a *c,
*/
for (i = 0; i < words; i += 128U) {
trace_gk20a_push_cmdbuf(
c->g->dev->name,
dev_name(c->g->dev),
0,
min(words - i, 128U),
offset + i * sizeof(u32),
@@ -2051,7 +2055,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
return err;
}
trace_gk20a_channel_submit_gpfifo(c->g->dev->name,
trace_gk20a_channel_submit_gpfifo(dev_name(c->g->dev),
c->hw_chid,
num_entries,
flags,
@@ -2069,11 +2073,11 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
if (locked_path)
mutex_unlock(&c->ioctl_lock);
trace_gk20a_gpfifo_submit_wait_for_space(c->g->dev->name);
trace_gk20a_gpfifo_submit_wait_for_space(dev_name(c->g->dev));
err = wait_event_interruptible(c->submit_wq,
get_gp_free_count(c) >= num_entries + extra_entries ||
c->has_timedout);
trace_gk20a_gpfifo_submit_wait_for_space_done(c->g->dev->name);
trace_gk20a_gpfifo_submit_wait_for_space_done(dev_name(c->g->dev));
if (locked_path)
mutex_lock(&c->ioctl_lock);
@@ -2156,7 +2160,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
gpfifo_mem[c->gpfifo.put].entry0 = u64_lo32(wait_cmd->gva);
gpfifo_mem[c->gpfifo.put].entry1 = u64_hi32(wait_cmd->gva) |
pbdma_gp_entry1_length_f(wait_cmd->size);
trace_gk20a_push_cmdbuf(c->g->dev->name,
trace_gk20a_push_cmdbuf(dev_name(c->g->dev),
0, wait_cmd->size, 0, wait_cmd->ptr);
c->gpfifo.put = (c->gpfifo.put + 1) &
@@ -2244,7 +2248,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
gpfifo_mem[c->gpfifo.put].entry0 = u64_lo32(incr_cmd->gva);
gpfifo_mem[c->gpfifo.put].entry1 = u64_hi32(incr_cmd->gva) |
pbdma_gp_entry1_length_f(incr_cmd->size);
trace_gk20a_push_cmdbuf(c->g->dev->name,
trace_gk20a_push_cmdbuf(dev_name(c->g->dev),
0, incr_cmd->size, 0, incr_cmd->ptr);
c->gpfifo.put = (c->gpfifo.put + 1) &
@@ -2273,7 +2277,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
mutex_unlock(&c->submit_lock);
trace_gk20a_channel_submitted_gpfifo(c->g->dev->name,
trace_gk20a_channel_submitted_gpfifo(dev_name(c->g->dev),
c->hw_chid,
num_entries,
flags,
@@ -2357,7 +2361,7 @@ static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
ulong id, u32 offset,
u32 payload, long timeout)
{
struct platform_device *pdev = ch->g->dev;
struct device *dev = ch->g->dev;
struct dma_buf *dmabuf;
void *data;
u32 *semaphore;
@@ -2370,14 +2374,13 @@ static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
dmabuf = dma_buf_get(id);
if (IS_ERR(dmabuf)) {
gk20a_err(&pdev->dev, "invalid notifier nvmap handle 0x%lx",
id);
gk20a_err(dev, "invalid notifier nvmap handle 0x%lx", id);
return -EINVAL;
}
data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT);
if (!data) {
gk20a_err(&pdev->dev, "failed to map notifier memory");
gk20a_err(dev, "failed to map notifier memory");
ret = -EINVAL;
goto cleanup_put;
}
@@ -2917,7 +2920,7 @@ long gk20a_channel_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct channel_gk20a *ch = filp->private_data;
struct platform_device *dev = ch->g->dev;
struct device *dev = ch->g->dev;
u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE];
int err = 0;
@@ -2956,7 +2959,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -2968,7 +2971,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_FREE_OBJ_CTX:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -2980,7 +2983,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -2996,7 +2999,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_WAIT:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3016,7 +3019,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_ZCULL_BIND:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3028,7 +3031,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3041,7 +3044,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_CYCLE_STATS:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3080,7 +3083,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_SET_PRIORITY:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3092,7 +3095,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_ENABLE:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3106,7 +3109,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_DISABLE:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3120,7 +3123,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_PREEMPT:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3131,7 +3134,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_FORCE_RESET:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3147,7 +3150,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3164,7 +3167,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_SET_RUNLIST_INTERLEAVE:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3176,7 +3179,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_SET_TIMESLICE:
err = gk20a_busy(dev);
if (err) {
dev_err(&dev->dev,
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
__func__, cmd);
break;
@@ -3186,7 +3189,7 @@ long gk20a_channel_ioctl(struct file *filp,
gk20a_idle(dev);
break;
default:
dev_dbg(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd);
dev_dbg(dev, "unrecognized ioctl cmd: 0x%x", cmd);
err = -ENOTTY;
break;
}

View File

@@ -330,13 +330,13 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
sp->host1x_pdev = c->g->host1x_dev;
snprintf(syncpt_name, sizeof(syncpt_name),
"%s_%d", dev_name(&c->g->dev->dev), c->hw_chid);
"%s_%d", dev_name(c->g->dev), c->hw_chid);
sp->id = nvhost_get_syncpt_host_managed(sp->host1x_pdev,
c->hw_chid, syncpt_name);
if (!sp->id) {
kfree(sp);
gk20a_err(&c->g->dev->dev, "failed to get free syncpt");
gk20a_err(c->g->dev, "failed to get free syncpt");
return NULL;
}
@@ -387,7 +387,7 @@ static void gk20a_channel_semaphore_launcher(
fence, fence->name);
err = sync_fence_wait(fence, -1);
if (err < 0)
dev_err(&g->dev->dev, "error waiting pre-fence: %d\n", err);
dev_err(g->dev, "error waiting pre-fence: %d\n", err);
gk20a_dbg_info(
"wait completed (%d) for fence %p '%s', triggering gpu work",

View File

@@ -1,9 +1,7 @@
/*
* drivers/video/tegra/host/gk20a/clk_gk20a.c
*
* GK20A Clocks
*
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -831,7 +829,7 @@ DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
static int clk_gk20a_debugfs_init(struct gk20a *g)
{
struct dentry *d;
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
d = debugfs_create_file(
"rate", S_IRUGO|S_IWUSR, platform->debugfs, g, &rate_fops);

View File

@@ -144,7 +144,7 @@ static int gk20a_ctrl_alloc_as(
struct gk20a *g,
struct nvgpu_alloc_as_args *args)
{
struct platform_device *dev = g->dev;
struct device *dev = g->dev;
struct gk20a_as_share *as_share;
int err;
int fd;
@@ -157,7 +157,7 @@ static int gk20a_ctrl_alloc_as(
fd = err;
name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
dev_name(&dev->dev), fd);
dev_name(dev), fd);
file = anon_inode_getfile(name, g->as.cdev.ops, NULL, O_RDWR);
kfree(name);
@@ -187,7 +187,7 @@ clean_up:
static int gk20a_ctrl_open_tsg(struct gk20a *g,
struct nvgpu_gpu_open_tsg_args *args)
{
struct platform_device *dev = g->dev;
struct device *dev = g->dev;
int err;
int fd;
struct file *file;
@@ -199,7 +199,7 @@ static int gk20a_ctrl_open_tsg(struct gk20a *g,
fd = err;
name = kasprintf(GFP_KERNEL, "nvgpu-%s-tsg%d",
dev_name(&dev->dev), fd);
dev_name(dev), fd);
file = anon_inode_getfile(name, g->tsg.cdev.ops, NULL, O_RDWR);
kfree(name);
@@ -621,7 +621,7 @@ static int nvgpu_gpu_get_cpu_time_correlation_info(
long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct platform_device *dev = filp->private_data;
struct device *dev = filp->private_data;
struct gk20a *g = get_gk20a(dev);
struct nvgpu_gpu_zcull_get_ctx_size_args *get_ctx_size_args;
struct nvgpu_gpu_zcull_get_info_args *get_info_args;

View File

@@ -65,7 +65,6 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
struct dbg_session_gk20a *dbg_session;
struct gk20a *g;
struct platform_device *pdev;
struct device *dev;
int err;
@@ -76,8 +75,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
else
g = container_of(inode->i_cdev,
struct gk20a, prof.cdev);
pdev = g->dev;
dev = &pdev->dev;
dev = g->dev;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", dev_name(dev));
@@ -86,7 +84,6 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
return err;
filp->private_data = dbg_session;
dbg_session->pdev = pdev;
dbg_session->dev = dev;
dbg_session->g = g;
dbg_session->is_profiler = is_profiler;
@@ -95,7 +92,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
/* For vgpu, all power-gating features are currently disabled
* in the server. Set is_pg_disable to true to reflect this
* on the client side. */
if (gk20a_gpu_is_virtual(pdev))
if (gk20a_gpu_is_virtual(dev))
dbg_session->is_pg_disabled = true;
INIT_LIST_HEAD(&dbg_session->dbg_s_list_node);
@@ -486,7 +483,7 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_timeout_args *args)
{
int err;
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
gk20a_dbg_fn("powergate mode = %d", args->enable);
@@ -501,7 +498,7 @@ static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_timeout_args *args)
{
int status;
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
mutex_lock(&g->dbg_sessions_lock);
status = g->timeouts_enabled;
@@ -532,7 +529,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct dbg_session_gk20a *dbg_s = filp->private_data;
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
int err = 0;
@@ -672,7 +669,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
bool is_pg_disabled = false;
struct device *dev = dbg_s->dev;
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
struct nvgpu_dbg_gpu_reg_op *ops;
u64 ops_size = sizeof(ops[0]) * args->num_ops;
@@ -689,7 +686,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
}
/* be sure that ctx info is in place */
if (!gk20a_gpu_is_virtual(dbg_s->pdev) &&
if (!gk20a_gpu_is_virtual(dbg_s->dev) &&
!gr_context_info_available(dbg_s, &g->gr)) {
gk20a_err(dev, "gr context data not available\n");
return -ENODEV;
@@ -757,7 +754,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
__u32 powermode)
{
int err = 0;
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
/* This function must be called with g->dbg_sessions_lock held */
@@ -786,7 +783,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
if (err)
return err;
err = gk20a_busy(dbg_s->pdev);
err = gk20a_busy(dbg_s->dev);
if (err)
return -EPERM;
@@ -838,7 +835,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
gk20a_pmu_enable_elpg(g);
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
gk20a_idle(dbg_s->pdev);
gk20a_idle(dbg_s->dev);
gk20a_idle(g->dev);
}
@@ -862,7 +859,7 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_powergate_args *args)
{
int err;
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
gk20a_dbg_fn("%s powergate mode = %d",
dev_name(dbg_s->dev), args->mode);
@@ -876,7 +873,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args)
{
int err;
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
struct channel_gk20a *ch_gk20a;
gk20a_dbg_fn("%s smpc ctxsw mode = %d",
@@ -912,7 +909,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args)
{
int err;
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
struct channel_gk20a *ch_gk20a;
gk20a_dbg_fn("%s pm ctxsw mode = %d",
@@ -948,7 +945,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
{
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct gk20a *g = get_gk20a(dbg_s->dev);
struct channel_gk20a *ch = dbg_s->ch;
bool ch_is_curr_ctx;
int err = 0, action = args->mode;

View File

@@ -1,7 +1,7 @@
/*
* Tegra GK20A GPU Debugger Driver
*
* Copyright (c) 2013-2015, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2013-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -66,7 +66,6 @@ struct dbg_session_gk20a {
/* gpu module vagaries */
struct device *dev;
struct platform_device *pdev;
struct gk20a *g;
/* bound channel, if any */

View File

@@ -1,7 +1,7 @@
/*
* drivers/video/tegra/host/t20/debug_gk20a.c
*
* Copyright (C) 2011-2015 NVIDIA Corporation. All rights reserved.
* Copyright (C) 2011-2016 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -32,7 +32,7 @@
#include "hw_pbdma_gk20a.h"
unsigned int gk20a_debug_trace_cmdbuf;
static struct platform_device *gk20a_device;
static struct device *gk20a_device;
struct ch_state {
int pid;
@@ -120,7 +120,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
syncpointb = gk20a_mem_rd32(inst_ptr, ram_fc_syncpointb_w());
gk20a_debug_output(o, "%d-%s, pid %d, refs: %d: ", hw_chid,
g->dev->name,
dev_name(g->dev),
ch_state->pid,
ch_state->refs);
gk20a_debug_output(o, "%s in use %s %s\n",
@@ -181,7 +181,7 @@ void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o)
u32 status = gk20a_readl(g, fifo_pbdma_status_r(i));
u32 chan_status = fifo_pbdma_status_chan_status_v(status);
gk20a_debug_output(o, "%s pbdma %d: ", g->dev->name, i);
gk20a_debug_output(o, "%s pbdma %d: ", dev_name(g->dev), i);
gk20a_debug_output(o,
"id: %d (%s), next_id: %d (%s) status: %s\n",
fifo_pbdma_status_id_v(status),
@@ -206,7 +206,7 @@ void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o)
u32 status = gk20a_readl(g, fifo_engine_status_r(i));
u32 ctx_status = fifo_engine_status_ctx_status_v(status);
gk20a_debug_output(o, "%s eng %d: ", g->dev->name, i);
gk20a_debug_output(o, "%s eng %d: ", dev_name(g->dev), i);
gk20a_debug_output(o,
"id: %d (%s), next_id: %d (%s), ctx: %s ",
fifo_engine_status_id_v(status),
@@ -270,67 +270,67 @@ done:
gk20a_idle(g->dev);
}
static int gk20a_gr_dump_regs(struct platform_device *pdev,
static int gk20a_gr_dump_regs(struct device *dev,
struct gk20a_debug_output *o)
{
struct gk20a_platform *platform = gk20a_get_platform(pdev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
int err;
err = gk20a_busy(g->dev);
err = gk20a_busy(dev);
if (err) {
gk20a_err(&pdev->dev, "failed to power on gpu: %d\n", err);
gk20a_err(dev, "failed to power on gpu: %d\n", err);
return -EINVAL;
}
gr_gk20a_elpg_protected_call(g, g->ops.gr.dump_gr_regs(g, o));
gk20a_idle(g->dev);
gk20a_idle(dev);
return 0;
}
int gk20a_gr_debug_dump(struct platform_device *pdev)
int gk20a_gr_debug_dump(struct device *dev)
{
struct gk20a_debug_output o = {
.fn = gk20a_debug_write_printk
};
gk20a_gr_dump_regs(pdev, &o);
gk20a_gr_dump_regs(dev, &o);
return 0;
}
static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
{
struct platform_device *pdev = s->private;
struct device *dev = s->private;
struct gk20a_debug_output o = {
.fn = gk20a_debug_write_to_seqfile,
.ctx = s,
};
gk20a_gr_dump_regs(pdev, &o);
gk20a_gr_dump_regs(dev, &o);
return 0;
}
void gk20a_debug_dump(struct platform_device *pdev)
void gk20a_debug_dump(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(pdev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
struct gk20a_debug_output o = {
.fn = gk20a_debug_write_printk
};
if (platform->dump_platform_dependencies)
platform->dump_platform_dependencies(pdev);
platform->dump_platform_dependencies(dev);
/* HAL only initialized after 1st power-on */
if (g->ops.debug.show_dump)
g->ops.debug.show_dump(g, &o);
}
void gk20a_debug_dump_device(struct platform_device *pdev)
void gk20a_debug_dump_device(void *data)
{
struct gk20a_debug_output o = {
.fn = gk20a_debug_write_printk
@@ -341,15 +341,7 @@ void gk20a_debug_dump_device(struct platform_device *pdev)
if (!tegra_platform_is_silicon())
return;
/* Dump the first device if no info is provided */
if (!pdev) {
if (!gk20a_device)
return;
pdev = gk20a_device;
}
g = gk20a_get_platform(pdev)->g;
g = gk20a_from_dev(gk20a_device);
/* HAL only initialized after 1st power-on */
if (g->ops.debug.show_dump)
g->ops.debug.show_dump(g, &o);
@@ -358,14 +350,14 @@ EXPORT_SYMBOL(gk20a_debug_dump_device);
static int gk20a_debug_show(struct seq_file *s, void *unused)
{
struct platform_device *pdev = s->private;
struct device *dev = s->private;
struct gk20a_debug_output o = {
.fn = gk20a_debug_write_to_seqfile,
.ctx = s,
};
struct gk20a *g;
g = gk20a_get_platform(pdev)->g;
g = gk20a_get_platform(dev)->g;
/* HAL only initialized after 1st power-on */
if (g->ops.debug.show_dump)
g->ops.debug.show_dump(g, &o);
@@ -401,24 +393,24 @@ void gk20a_init_debug_ops(struct gpu_ops *gops)
gops->debug.show_dump = gk20a_debug_show_dump;
}
void gk20a_debug_init(struct platform_device *pdev)
void gk20a_debug_init(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
/* Store the first device */
if (!gk20a_device)
gk20a_device = pdev;
gk20a_device = dev;
platform->debugfs = debugfs_create_dir(pdev->name, NULL);
platform->debugfs = debugfs_create_dir(dev_name(dev), NULL);
if (platform->debugfs) {
platform->debugfs_alias =
debugfs_create_symlink("gpu.0", NULL, pdev->name);
debugfs_create_symlink("gpu.0", NULL, dev_name(dev));
}
debugfs_create_file("status", S_IRUGO, platform->debugfs,
pdev, &gk20a_debug_fops);
dev, &gk20a_debug_fops);
debugfs_create_file("gr_status", S_IRUGO, platform->debugfs,
pdev, &gk20a_gr_debug_fops);
dev, &gk20a_gr_debug_fops);
debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, platform->debugfs,
&gk20a_debug_trace_cmdbuf);

View File

@@ -1,7 +1,7 @@
/*
* GK20A Debug functionality
*
* Copyright (C) 2011-2015 NVIDIA CORPORATION. All rights reserved.
* Copyright (C) 2011-2016 NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -32,10 +32,10 @@ struct gk20a_debug_output {
void gk20a_debug_output(struct gk20a_debug_output *o,
const char *fmt, ...);
void gk20a_debug_dump(struct platform_device *pdev);
void gk20a_debug_dump(struct device *pdev);
void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o);
int gk20a_gr_debug_dump(struct platform_device *pdev);
void gk20a_debug_init(struct platform_device *pdev);
int gk20a_gr_debug_dump(struct device *pdev);
void gk20a_debug_init(struct device *dev);
void gk20a_init_debug_ops(struct gpu_ops *gops);
#endif

View File

@@ -545,7 +545,7 @@ DEFINE_SIMPLE_ATTRIBUTE(gk20a_fecs_trace_debugfs_write_fops,
static void gk20a_fecs_trace_debugfs_init(struct gk20a *g)
{
struct gk20a_platform *plat = platform_get_drvdata(g->dev);
struct gk20a_platform *plat = dev_get_drvdata(g->dev);
debugfs_create_file("ctxsw_trace_read", 0600, plat->debugfs, g,
&gk20a_fecs_trace_debugfs_read_fops);
@@ -557,7 +557,7 @@ static void gk20a_fecs_trace_debugfs_init(struct gk20a *g)
static void gk20a_fecs_trace_debugfs_cleanup(struct gk20a *g)
{
struct gk20a_platform *plat = platform_get_drvdata(g->dev);
struct gk20a_platform *plat = dev_get_drvdata(g->dev);
debugfs_remove_recursive(plat->debugfs);
}

View File

@@ -348,7 +348,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
u32 mask;
u32 timeout;
int i;
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
/* enable pmc pfifo */

View File

@@ -44,10 +44,6 @@
#include <linux/sched.h>
#ifdef CONFIG_TEGRA_GK20A
#include <linux/nvhost.h>
#endif
#include "gk20a.h"
#include "debug_gk20a.h"
#include "ctrl_gk20a.h"
@@ -79,7 +75,6 @@
#define CLASS_NAME "nvidia-gpu"
/* TODO: Change to e.g. "nvidia-gpu%s" once we have symlinks in place. */
#define INTERFACE_NAME "nvhost%s-gpu"
#define GK20A_NUM_CDEVS 7
@@ -92,12 +87,11 @@ u32 gk20a_dbg_ftrace;
#define GK20A_WAIT_FOR_IDLE_MS 2000
static int gk20a_pm_finalize_poweron(struct device *dev);
static int gk20a_pm_prepare_poweroff(struct device *dev);
static inline void set_gk20a(struct platform_device *dev, struct gk20a *gk20a)
static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a)
{
gk20a_get_platform(dev)->g = gk20a;
gk20a_get_platform(&pdev->dev)->g = gk20a;
}
static const struct file_operations gk20a_channel_ops = {
@@ -292,38 +286,38 @@ static void __iomem *gk20a_ioremap_resource(struct platform_device *dev, int i,
}
/* TBD: strip from released */
static int gk20a_init_sim_support(struct platform_device *dev)
static int gk20a_init_sim_support(struct platform_device *pdev)
{
int err = 0;
struct device *dev = &pdev->dev;
struct gk20a *g = get_gk20a(dev);
struct device *d = &dev->dev;
u64 phys;
g->sim.g = g;
g->sim.regs = gk20a_ioremap_resource(dev, GK20A_SIM_IORESOURCE_MEM,
g->sim.regs = gk20a_ioremap_resource(pdev, GK20A_SIM_IORESOURCE_MEM,
&g->sim.reg_mem);
if (IS_ERR(g->sim.regs)) {
dev_err(d, "failed to remap gk20a sim regs\n");
dev_err(dev, "failed to remap gk20a sim regs\n");
err = PTR_ERR(g->sim.regs);
goto fail;
}
/* allocate sim event/msg buffers */
err = alloc_and_kmap_iopage(d, &g->sim.send_bfr.kvaddr,
err = alloc_and_kmap_iopage(dev, &g->sim.send_bfr.kvaddr,
&g->sim.send_bfr.phys,
&g->sim.send_bfr.page);
err = err || alloc_and_kmap_iopage(d, &g->sim.recv_bfr.kvaddr,
err = err || alloc_and_kmap_iopage(dev, &g->sim.recv_bfr.kvaddr,
&g->sim.recv_bfr.phys,
&g->sim.recv_bfr.page);
err = err || alloc_and_kmap_iopage(d, &g->sim.msg_bfr.kvaddr,
err = err || alloc_and_kmap_iopage(dev, &g->sim.msg_bfr.kvaddr,
&g->sim.msg_bfr.phys,
&g->sim.msg_bfr.page);
if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr &&
g->sim.msg_bfr.kvaddr)) {
dev_err(d, "couldn't allocate all sim buffers\n");
dev_err(dev, "couldn't allocate all sim buffers\n");
goto fail;
}
@@ -566,25 +560,25 @@ void gk20a_pbus_isr(struct gk20a *g)
gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
gk20a_readl(g, mc_enable_r()));
gk20a_err(dev_from_gk20a(g), "NV_PBUS_INTR_0 : 0x%x", val);
gk20a_err(&g->dev->dev,
gk20a_err(g->dev,
"NV_PTIMER_PRI_TIMEOUT_SAVE_0: 0x%x\n",
gk20a_readl(g, timer_pri_timeout_save_0_r()));
gk20a_err(&g->dev->dev,
gk20a_err(g->dev,
"NV_PTIMER_PRI_TIMEOUT_SAVE_1: 0x%x\n",
gk20a_readl(g, timer_pri_timeout_save_1_r()));
err_code = gk20a_readl(g, timer_pri_timeout_fecs_errcode_r());
gk20a_err(&g->dev->dev,
gk20a_err(g->dev,
"NV_PTIMER_PRI_TIMEOUT_FECS_ERRCODE: 0x%x\n",
err_code);
if (err_code == 0xbadf13)
gk20a_err(&g->dev->dev,
gk20a_err(g->dev,
"NV_PGRAPH_PRI_GPC0_GPCCS_FS_GPC: 0x%x\n",
gk20a_readl(g, gr_gpc0_fs_gpc_r()));
}
if (val)
gk20a_err(&g->dev->dev,
gk20a_err(g->dev,
"Unhandled pending pbus interrupt\n");
gk20a_writel(g, bus_intr_0_r(), val);
@@ -602,7 +596,7 @@ static irqreturn_t gk20a_intr_thread_nonstall(int irq, void *dev_id)
return g->ops.mc.isr_thread_nonstall(g);
}
static void gk20a_remove_support(struct platform_device *dev)
static void gk20a_remove_support(struct device *dev)
{
struct gk20a *g = get_gk20a(dev);
@@ -642,7 +636,7 @@ static void gk20a_remove_support(struct platform_device *dev)
static int gk20a_init_support(struct platform_device *dev)
{
int err = 0;
struct gk20a *g = get_gk20a(dev);
struct gk20a *g = get_gk20a(&dev->dev);
#ifdef CONFIG_TEGRA_COMMON
tegra_register_idle_unidle(gk20a_do_idle, gk20a_do_unidle);
@@ -689,14 +683,13 @@ static int gk20a_init_support(struct platform_device *dev)
return 0;
fail:
gk20a_remove_support(dev);
gk20a_remove_support(&dev->dev);
return err;
}
static int gk20a_pm_prepare_poweroff(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
int ret = 0;
gk20a_dbg_fn("");
@@ -706,7 +699,7 @@ static int gk20a_pm_prepare_poweroff(struct device *dev)
if (!g->power_on)
goto done;
gk20a_scale_suspend(pdev);
gk20a_scale_suspend(dev);
/* cancel any pending cde work */
gk20a_cde_suspend(g);
@@ -767,11 +760,10 @@ static int gk20a_detect_chip(struct gk20a *g)
return gpu_init_hal(g);
}
static int gk20a_pm_finalize_poweron(struct device *dev)
int gk20a_pm_finalize_poweron(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(pdev);
struct gk20a_platform *platform = gk20a_get_platform(pdev);
struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
int err, nice_value;
gk20a_dbg_fn("");
@@ -904,7 +896,7 @@ static int gk20a_pm_finalize_poweron(struct device *dev)
gk20a_channel_resume(g);
set_user_nice(current, nice_value);
gk20a_scale_resume(pdev);
gk20a_scale_resume(dev);
trace_gk20a_finalize_poweron_done(dev_name(dev));
@@ -955,13 +947,13 @@ static struct of_device_id tegra_gk20a_of_match[] = {
};
static int gk20a_create_device(
struct platform_device *pdev, int devno, const char *cdev_name,
struct device *dev, int devno,
const char *interface_name, const char *cdev_name,
struct cdev *cdev, struct device **out,
const struct file_operations *ops)
{
struct device *dev;
struct device *subdev;
int err;
struct gk20a *g = get_gk20a(pdev);
gk20a_dbg_fn("");
@@ -970,127 +962,112 @@ static int gk20a_create_device(
err = cdev_add(cdev, devno, 1);
if (err) {
dev_err(&pdev->dev,
"failed to add %s cdev\n", cdev_name);
dev_err(dev, "failed to add %s cdev\n", cdev_name);
return err;
}
dev = device_create(g->class, NULL, devno, NULL,
(pdev->id <= 0) ? INTERFACE_NAME : INTERFACE_NAME ".%d",
cdev_name, pdev->id);
subdev = device_create(&nvgpu_class, NULL, devno, NULL,
interface_name, cdev_name);
if (IS_ERR(dev)) {
if (IS_ERR(subdev)) {
err = PTR_ERR(dev);
cdev_del(cdev);
dev_err(&pdev->dev,
"failed to create %s device for %s\n",
cdev_name, pdev->name);
dev_err(dev, "failed to create %s device for %s\n",
cdev_name, dev_name(dev));
return err;
}
*out = dev;
*out = subdev;
return 0;
}
void gk20a_user_deinit(struct platform_device *dev)
void gk20a_user_deinit(struct device *dev)
{
struct gk20a *g = get_gk20a(dev);
struct gk20a *g = gk20a_from_dev(dev);
if (g->channel.node) {
device_destroy(g->class, g->channel.cdev.dev);
device_destroy(&nvgpu_class, g->channel.cdev.dev);
cdev_del(&g->channel.cdev);
}
if (g->as.node) {
device_destroy(g->class, g->as.cdev.dev);
device_destroy(&nvgpu_class, g->as.cdev.dev);
cdev_del(&g->as.cdev);
}
if (g->ctrl.node) {
device_destroy(g->class, g->ctrl.cdev.dev);
device_destroy(&nvgpu_class, g->ctrl.cdev.dev);
cdev_del(&g->ctrl.cdev);
}
if (g->dbg.node) {
device_destroy(g->class, g->dbg.cdev.dev);
device_destroy(&nvgpu_class, g->dbg.cdev.dev);
cdev_del(&g->dbg.cdev);
}
if (g->prof.node) {
device_destroy(g->class, g->prof.cdev.dev);
device_destroy(&nvgpu_class, g->prof.cdev.dev);
cdev_del(&g->prof.cdev);
}
if (g->tsg.node) {
device_destroy(g->class, g->tsg.cdev.dev);
device_destroy(&nvgpu_class, g->tsg.cdev.dev);
cdev_del(&g->tsg.cdev);
}
if (g->ctxsw.node) {
device_destroy(g->class, g->ctxsw.cdev.dev);
device_destroy(&nvgpu_class, g->ctxsw.cdev.dev);
cdev_del(&g->ctxsw.cdev);
}
if (g->cdev_region)
unregister_chrdev_region(g->cdev_region, GK20A_NUM_CDEVS);
if (g->class)
class_destroy(g->class);
}
int gk20a_user_init(struct platform_device *dev)
int gk20a_user_init(struct device *dev, const char *interface_name)
{
int err;
dev_t devno;
struct gk20a *g = get_gk20a(dev);
struct gk20a *g = gk20a_from_dev(dev);
g->class = class_create(THIS_MODULE, CLASS_NAME);
if (IS_ERR(g->class)) {
err = PTR_ERR(g->class);
g->class = NULL;
dev_err(&dev->dev,
"failed to create " CLASS_NAME " class\n");
goto fail;
}
err = alloc_chrdev_region(&devno, 0, GK20A_NUM_CDEVS, CLASS_NAME);
err = alloc_chrdev_region(&devno, 0, GK20A_NUM_CDEVS, dev_name(dev));
if (err) {
dev_err(&dev->dev, "failed to allocate devno\n");
dev_err(dev, "failed to allocate devno\n");
goto fail;
}
g->cdev_region = devno;
err = gk20a_create_device(dev, devno++, "",
err = gk20a_create_device(dev, devno++, interface_name, "",
&g->channel.cdev, &g->channel.node,
&gk20a_channel_ops);
if (err)
goto fail;
err = gk20a_create_device(dev, devno++, "-as",
err = gk20a_create_device(dev, devno++, interface_name, "-as",
&g->as.cdev, &g->as.node,
&gk20a_as_ops);
if (err)
goto fail;
err = gk20a_create_device(dev, devno++, "-ctrl",
err = gk20a_create_device(dev, devno++, interface_name, "-ctrl",
&g->ctrl.cdev, &g->ctrl.node,
&gk20a_ctrl_ops);
if (err)
goto fail;
err = gk20a_create_device(dev, devno++, "-dbg",
err = gk20a_create_device(dev, devno++, interface_name, "-dbg",
&g->dbg.cdev, &g->dbg.node,
&gk20a_dbg_ops);
if (err)
goto fail;
err = gk20a_create_device(dev, devno++, "-prof",
err = gk20a_create_device(dev, devno++, interface_name, "-prof",
&g->prof.cdev, &g->prof.node,
&gk20a_prof_ops);
if (err)
goto fail;
err = gk20a_create_device(dev, devno++, "-tsg",
err = gk20a_create_device(dev, devno++, interface_name, "-tsg",
&g->tsg.cdev, &g->tsg.node,
&gk20a_tsg_ops);
if (err)
@@ -1190,7 +1167,7 @@ static void gk20a_pm_shutdown(struct platform_device *pdev)
#endif
/* Be ready for rail-gate after this point */
if (gk20a_gpu_is_virtual(pdev))
if (gk20a_gpu_is_virtual(&pdev->dev))
vgpu_pm_prepare_poweroff(&pdev->dev);
else
gk20a_pm_prepare_poweroff(&pdev->dev);
@@ -1205,12 +1182,12 @@ static const struct dev_pm_ops gk20a_pm_ops = {
};
#endif
static int _gk20a_pm_railgate(struct platform_device *pdev)
static int _gk20a_pm_railgate(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
int ret = 0;
if (platform->railgate)
ret = platform->railgate(pdev);
ret = platform->railgate(dev);
return ret;
}
@@ -1223,14 +1200,14 @@ static int gk20a_pm_railgate(struct generic_pm_domain *domain)
return _gk20a_pm_railgate(g->dev);
}
static int _gk20a_pm_unrailgate(struct platform_device *pdev)
static int _gk20a_pm_unrailgate(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
int ret = 0;
if (platform->unrailgate) {
mutex_lock(&platform->railgate_lock);
ret = platform->unrailgate(pdev);
ret = platform->unrailgate(dev);
mutex_unlock(&platform->railgate_lock);
}
@@ -1242,7 +1219,7 @@ static int gk20a_pm_unrailgate(struct generic_pm_domain *domain)
struct gk20a_domain_data *gk20a_domain = container_of(domain,
struct gk20a_domain_data, gpd);
struct gk20a *g = gk20a_domain->gk20a;
trace_gk20a_pm_unrailgate(dev_name(&g->dev->dev));
trace_gk20a_pm_unrailgate(dev_name(g->dev));
return _gk20a_pm_unrailgate(g->dev);
}
@@ -1273,11 +1250,14 @@ static int gk20a_pm_resume(struct device *dev)
}
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
static int gk20a_pm_initialise_domain(struct platform_device *pdev)
static int gk20a_pm_initialise_domain(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct dev_power_governor *pm_domain_gov = NULL;
struct generic_pm_domain *domain = dev_to_genpd(&pdev->dev);
struct generic_pm_domain *domain = dev_to_genpd(dev);
if (IS_ERR(domain))
return 0;
#ifdef CONFIG_PM
if (!platform->can_railgate)
@@ -1288,14 +1268,14 @@ static int gk20a_pm_initialise_domain(struct platform_device *pdev)
if (platform->railgate_delay)
pm_genpd_set_poweroff_delay(domain, platform->railgate_delay);
device_set_wakeup_capable(&pdev->dev, 0);
device_set_wakeup_capable(dev, 0);
return 0;
}
#else
static int gk20a_pm_initialise_domain(struct platform_device *pdev)
static int gk20a_pm_initialise_domain(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct dev_power_governor *pm_domain_gov = NULL;
struct generic_pm_domain *domain = NULL;
int ret = 0;
@@ -1326,8 +1306,8 @@ static int gk20a_pm_initialise_domain(struct platform_device *pdev)
domain->dev_ops.suspend = gk20a_pm_suspend;
domain->dev_ops.resume = gk20a_pm_resume;
device_set_wakeup_capable(&pdev->dev, 0);
ret = pm_genpd_add_device(domain, &pdev->dev);
device_set_wakeup_capable(dev, 0);
ret = pm_genpd_add_device(domain, dev);
if (platform->railgate_delay)
pm_genpd_set_poweroff_delay(domain, platform->railgate_delay);
@@ -1336,23 +1316,23 @@ static int gk20a_pm_initialise_domain(struct platform_device *pdev)
}
#endif
static int gk20a_pm_init(struct platform_device *dev)
static int gk20a_pm_init(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
int err = 0;
gk20a_dbg_fn("");
/* Initialise pm runtime */
if (platform->clockgate_delay) {
pm_runtime_set_autosuspend_delay(&dev->dev,
pm_runtime_set_autosuspend_delay(dev,
platform->clockgate_delay);
pm_runtime_use_autosuspend(&dev->dev);
pm_runtime_use_autosuspend(dev);
}
pm_runtime_enable(&dev->dev);
if (!pm_runtime_enabled(&dev->dev))
gk20a_pm_enable_clk(&dev->dev);
pm_runtime_enable(dev);
if (!pm_runtime_enabled(dev))
gk20a_pm_enable_clk(dev);
/* Enable runtime railgating if possible. If not,
* turn on the rail now. */
@@ -1374,7 +1354,7 @@ static int gk20a_secure_page_alloc(struct platform_device *pdev)
int err = 0;
if (platform->secure_page_alloc) {
err = platform->secure_page_alloc(pdev);
err = platform->secure_page_alloc(&pdev->dev);
if (!err)
platform->secure_alloc_ready = true;
}
@@ -1418,7 +1398,7 @@ static int gk20a_probe(struct platform_device *dev)
platform_set_drvdata(dev, platform);
if (gk20a_gpu_is_virtual(dev))
if (gk20a_gpu_is_virtual(&dev->dev))
return vgpu_probe(dev);
gk20a = kzalloc(sizeof(struct gk20a), GFP_KERNEL);
@@ -1437,7 +1417,7 @@ static int gk20a_probe(struct platform_device *dev)
#endif
set_gk20a(dev, gk20a);
gk20a->dev = dev;
gk20a->dev = &dev->dev;
gk20a->irq_stall = platform_get_irq(dev, 0);
gk20a->irq_nonstall = platform_get_irq(dev, 1);
@@ -1468,7 +1448,7 @@ static int gk20a_probe(struct platform_device *dev)
disable_irq(gk20a->irq_stall);
disable_irq(gk20a->irq_nonstall);
err = gk20a_user_init(dev);
err = gk20a_user_init(&dev->dev, INTERFACE_NAME);
if (err)
return err;
@@ -1485,10 +1465,10 @@ static int gk20a_probe(struct platform_device *dev)
platform->reset_control = NULL;
#endif
gk20a_debug_init(dev);
gk20a_debug_init(&dev->dev);
/* Initialize the platform interface. */
err = platform->probe(dev);
err = platform->probe(&dev->dev);
if (err) {
dev_err(&dev->dev, "platform probe failed");
return err;
@@ -1499,7 +1479,7 @@ static int gk20a_probe(struct platform_device *dev)
dev_err(&dev->dev,
"failed to allocate secure buffer %d\n", err);
err = gk20a_pm_init(dev);
err = gk20a_pm_init(&dev->dev);
if (err) {
dev_err(&dev->dev, "pm init failed");
return err;
@@ -1509,7 +1489,7 @@ static int gk20a_probe(struct platform_device *dev)
/* Initialise scaling */
if (IS_ENABLED(CONFIG_GK20A_DEVFREQ))
gk20a_scale_init(dev);
gk20a_scale_init(&dev->dev);
/* Set DMA parameters to allow larger sgt lists */
dev->dev.dma_parms = &gk20a->dma_parms;
@@ -1547,14 +1527,14 @@ static int gk20a_probe(struct platform_device *dev)
gk20a->pmu.aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
if (platform->late_probe) {
err = platform->late_probe(dev);
err = platform->late_probe(&dev->dev);
if (err) {
dev_err(&dev->dev, "late probe failed");
return err;
}
}
gk20a_create_sysfs(dev);
gk20a_create_sysfs(&dev->dev);
#ifdef CONFIG_DEBUG_FS
spin_lock_init(&gk20a->debugfs_lock);
@@ -1612,10 +1592,10 @@ static int gk20a_probe(struct platform_device *dev)
&gk20a->runlist_interleave);
gr_gk20a_debugfs_init(gk20a);
gk20a_pmu_debugfs_init(dev);
gk20a_cde_debugfs_init(dev);
gk20a_pmu_debugfs_init(&dev->dev);
gk20a_cde_debugfs_init(&dev->dev);
gk20a_alloc_debugfs_init(dev);
gk20a_mm_debugfs_init(dev);
gk20a_mm_debugfs_init(&dev->dev);
#endif
gk20a_init_gr(gk20a);
@@ -1623,8 +1603,9 @@ static int gk20a_probe(struct platform_device *dev)
return 0;
}
static int __exit gk20a_remove(struct platform_device *dev)
static int __exit gk20a_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a_domain_data *gk20a_gpd;
@@ -1632,7 +1613,7 @@ static int __exit gk20a_remove(struct platform_device *dev)
gk20a_dbg_fn("");
if (gk20a_gpu_is_virtual(dev))
return vgpu_remove(dev);
return vgpu_remove(pdev);
if (platform->has_cde)
gk20a_cde_destroy(g);
@@ -1650,7 +1631,7 @@ static int __exit gk20a_remove(struct platform_device *dev)
debugfs_remove_recursive(platform->debugfs);
debugfs_remove_recursive(platform->debugfs_alias);
gk20a_remove_sysfs(&dev->dev);
gk20a_remove_sysfs(dev);
if (platform->secure_buffer.destroy)
platform->secure_buffer.destroy(dev,
@@ -1660,15 +1641,15 @@ static int __exit gk20a_remove(struct platform_device *dev)
gk20a_gpd->gk20a = NULL;
kfree(gk20a_gpd);
if (pm_runtime_enabled(&dev->dev))
pm_runtime_disable(&dev->dev);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
else
gk20a_pm_disable_clk(&dev->dev);
gk20a_pm_disable_clk(dev);
if (platform->remove)
platform->remove(dev);
set_gk20a(dev, NULL);
set_gk20a(pdev, NULL);
kfree(g);
gk20a_dbg_fn("removed");
@@ -1751,11 +1732,20 @@ static int gk20a_domain_init(struct of_device_id *matches)
#endif
struct class nvgpu_class = {
.owner = THIS_MODULE,
.name = CLASS_NAME,
};
static int __init gk20a_init(void)
{
int ret;
ret = class_register(&nvgpu_class);
if (ret)
return ret;
ret = gk20a_domain_init(tegra_gpu_domain_match);
if (ret)
return ret;
@@ -1766,50 +1756,51 @@ static int __init gk20a_init(void)
static void __exit gk20a_exit(void)
{
platform_driver_unregister(&gk20a_driver);
class_unregister(&nvgpu_class);
}
void gk20a_busy_noresume(struct platform_device *pdev)
void gk20a_busy_noresume(struct device *dev)
{
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_get_noresume(dev);
}
int gk20a_busy(struct platform_device *pdev)
int gk20a_busy(struct device *dev)
{
int ret = 0;
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
#ifdef CONFIG_PM
struct gk20a_platform *platform = gk20a_get_platform(pdev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
#endif
down_read(&g->busy_lock);
#ifdef CONFIG_PM
if (platform->busy) {
ret = platform->busy(pdev);
ret = platform->busy(dev);
if (ret < 0) {
dev_err(&pdev->dev, "%s: failed to poweron platform dependency\n",
dev_err(dev, "%s: failed to poweron platform dependency\n",
__func__);
goto fail;
}
}
ret = pm_runtime_get_sync(&pdev->dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_put_noidle(dev);
if (platform->idle)
platform->idle(pdev);
platform->idle(dev);
goto fail;
}
#else
if (!g->power_on) {
ret = gk20a_gpu_is_virtual(pdev) ?
vgpu_pm_finalize_poweron(&pdev->dev)
: gk20a_pm_finalize_poweron(&pdev->dev);
vgpu_pm_finalize_poweron(dev)
: gk20a_pm_finalize_poweron(dev);
if (ret)
goto fail;
}
#endif
gk20a_scale_notify_busy(pdev);
gk20a_scale_notify_busy(dev);
fail:
up_read(&g->busy_lock);
@@ -1817,19 +1808,19 @@ fail:
return ret < 0 ? ret : 0;
}
void gk20a_idle(struct platform_device *pdev)
void gk20a_idle(struct device *dev)
{
#ifdef CONFIG_PM
struct gk20a_platform *platform = gk20a_get_platform(pdev);
if (atomic_read(&pdev->dev.power.usage_count) == 1)
gk20a_scale_notify_idle(pdev);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_sync_autosuspend(&pdev->dev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
if (atomic_read(&dev->power.usage_count) == 1)
gk20a_scale_notify_idle(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
if (platform->idle)
platform->idle(pdev);
platform->idle(dev);
#else
gk20a_scale_notify_idle(pdev);
gk20a_scale_notify_idle(dev);
#endif
}
@@ -1882,10 +1873,10 @@ void gk20a_reset(struct gk20a *g, u32 units)
* In success, we hold these locks and return
* In failure, we release these locks and return
*/
int __gk20a_do_idle(struct platform_device *pdev, bool force_reset)
int __gk20a_do_idle(struct device *dev, bool force_reset)
{
struct gk20a *g = get_gk20a(pdev);
struct gk20a_platform *platform = dev_get_drvdata(&pdev->dev);
struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
unsigned long timeout = jiffies +
msecs_to_jiffies(GK20A_WAIT_FOR_IDLE_MS);
int ref_cnt;
@@ -1898,7 +1889,7 @@ int __gk20a_do_idle(struct platform_device *pdev, bool force_reset)
mutex_lock(&platform->railgate_lock);
/* check if it is already railgated ? */
if (platform->is_railgated(pdev))
if (platform->is_railgated(dev))
return 0;
/*
@@ -1906,17 +1897,17 @@ int __gk20a_do_idle(struct platform_device *pdev, bool force_reset)
* re-acquire railgate_lock
*/
mutex_unlock(&platform->railgate_lock);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_get_sync(dev);
mutex_lock(&platform->railgate_lock);
/* check and wait until GPU is idle (with a timeout) */
do {
msleep(1);
ref_cnt = atomic_read(&pdev->dev.power.usage_count);
ref_cnt = atomic_read(&dev->power.usage_count);
} while (ref_cnt != 1 && time_before(jiffies, timeout));
if (ref_cnt != 1) {
gk20a_err(&pdev->dev, "failed to idle - refcount %d != 1\n",
gk20a_err(dev, "failed to idle - refcount %d != 1\n",
ref_cnt);
goto fail_drop_usage_count;
}
@@ -1931,7 +1922,7 @@ int __gk20a_do_idle(struct platform_device *pdev, bool force_reset)
* if GPU is now idle, we will have only one ref count,
* drop this ref which will rail gate the GPU
*/
pm_runtime_put_sync(&pdev->dev);
pm_runtime_put_sync(dev);
/* add sufficient delay to allow GPU to rail gate */
msleep(platform->railgate_delay);
@@ -1941,13 +1932,13 @@ int __gk20a_do_idle(struct platform_device *pdev, bool force_reset)
/* check in loop if GPU is railgated or not */
do {
msleep(1);
is_railgated = platform->is_railgated(pdev);
is_railgated = platform->is_railgated(dev);
} while (!is_railgated && time_before(jiffies, timeout));
if (is_railgated) {
return 0;
} else {
gk20a_err(&pdev->dev, "failed to idle in timeout\n");
gk20a_err(dev, "failed to idle in timeout\n");
goto fail_timeout;
}
} else {
@@ -1964,12 +1955,12 @@ int __gk20a_do_idle(struct platform_device *pdev, bool force_reset)
*/
/* Save the GPU state */
gk20a_pm_prepare_poweroff(&pdev->dev);
gk20a_pm_prepare_poweroff(dev);
gk20a_pm_disable_clk(&pdev->dev);
gk20a_pm_disable_clk(dev);
/* railgate GPU */
platform->railgate(pdev);
platform->railgate(dev);
udelay(10);
@@ -1978,7 +1969,7 @@ int __gk20a_do_idle(struct platform_device *pdev, bool force_reset)
}
fail_drop_usage_count:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_put_noidle(dev);
fail_timeout:
mutex_unlock(&platform->railgate_lock);
up_write(&g->busy_lock);
@@ -1997,7 +1988,7 @@ int gk20a_do_idle(void)
of_find_matching_node(NULL, tegra_gk20a_of_match);
struct platform_device *pdev = of_find_device_by_node(node);
int ret = __gk20a_do_idle(pdev, true);
int ret = __gk20a_do_idle(&pdev->dev, true);
of_node_put(node);
@@ -2007,25 +1998,25 @@ int gk20a_do_idle(void)
/**
* __gk20a_do_unidle() - unblock all the tasks blocked by __gk20a_do_idle()
*/
int __gk20a_do_unidle(struct platform_device *pdev)
int __gk20a_do_unidle(struct device *dev)
{
struct gk20a *g = get_gk20a(pdev);
struct gk20a_platform *platform = dev_get_drvdata(&pdev->dev);
struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
if (g->forced_reset) {
/*
* If we did a forced-reset/railgate
* then unrailgate the GPU here first
*/
platform->unrailgate(pdev);
platform->unrailgate(dev);
gk20a_pm_enable_clk(&pdev->dev);
gk20a_pm_enable_clk(dev);
/* restore the GPU state */
gk20a_pm_finalize_poweron(&pdev->dev);
gk20a_pm_finalize_poweron(dev);
/* balance GPU usage counter */
pm_runtime_put_sync(&pdev->dev);
pm_runtime_put_sync(dev);
g->forced_reset = false;
}
@@ -2046,7 +2037,7 @@ int gk20a_do_unidle(void)
of_find_matching_node(NULL, tegra_gk20a_of_match);
struct platform_device *pdev = of_find_device_by_node(node);
int ret = __gk20a_do_unidle(pdev);
int ret = __gk20a_do_unidle(&pdev->dev);
of_node_put(node);
@@ -2057,7 +2048,7 @@ int gk20a_do_unidle(void)
int gk20a_init_gpu_characteristics(struct gk20a *g)
{
struct nvgpu_gpu_characteristics *gpu = &g->gpu_characteristics;
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gpu->L2_cache_size = g->ops.ltc.determine_L2_size_bytes(g);
gpu->on_board_video_memory_size = 0; /* integrated GPU */
@@ -2164,7 +2155,7 @@ do_request_firmware(struct device *dev, const char *prefix, const char *fw_name)
const struct firmware *
gk20a_request_firmware(struct gk20a *g, const char *fw_name)
{
struct device *dev = &g->dev->dev;
struct device *dev = g->dev;
const struct firmware *fw;
/* current->fs is NULL when calling from SYS_EXIT.
@@ -2177,8 +2168,10 @@ gk20a_request_firmware(struct gk20a *g, const char *fw_name)
#ifdef CONFIG_TEGRA_GK20A
/* TO BE REMOVED - Support loading from legacy SOC specific path. */
if (!fw)
fw = nvhost_client_request_firmware(g->dev, fw_name);
if (!fw) {
struct gk20a_platform *platform = gk20a_get_platform(dev);
fw = do_request_firmware(dev, platform->soc_name, fw_name);
}
#endif
if (!fw) {

View File

@@ -18,7 +18,6 @@
#ifndef GK20A_H
#define GK20A_H
struct gk20a;
struct fifo_gk20a;
struct channel_gk20a;
@@ -206,7 +205,7 @@ struct gpu_ops {
bool *post_event, struct channel_gk20a *fault_ch);
int (*handle_tex_exception)(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event);
void (*create_gr_sysfs)(struct platform_device *dev);
void (*create_gr_sysfs)(struct device *dev);
u32 (*get_lrf_tex_ltc_dram_override)(struct gk20a *g);
} gr;
const char *name;
@@ -517,7 +516,7 @@ struct gpu_ops {
};
struct gk20a {
struct platform_device *dev;
struct device *dev;
struct platform_device *host1x_dev;
struct resource *reg_mem;
@@ -602,7 +601,7 @@ struct gk20a {
*/
u64 separate_fixed_allocs;
void (*remove_support)(struct platform_device *);
void (*remove_support)(struct device *);
u64 pg_ingating_time_us;
u64 pg_ungating_time_us;
@@ -648,7 +647,6 @@ struct gk20a {
int client_refcount; /* open channels and ctrl nodes */
dev_t cdev_region;
struct class *class;
struct gpu_ops ops;
@@ -685,7 +683,7 @@ static inline unsigned long gk20a_get_gr_idle_timeout(struct gk20a *g)
g->gr_idle_timeout_default : MAX_SCHEDULE_TIMEOUT;
}
static inline struct gk20a *get_gk20a(struct platform_device *dev)
static inline struct gk20a *get_gk20a(struct device *dev)
{
return gk20a_get_platform(dev)->g;
}
@@ -880,7 +878,11 @@ static inline u32 gk20a_bar1_readl(struct gk20a *g, u32 b)
/* convenience */
static inline struct device *dev_from_gk20a(struct gk20a *g)
{
return &g->dev->dev;
return g->dev;
}
static inline struct gk20a *gk20a_from_dev(struct device *dev)
{
return ((struct gk20a_platform *)dev_get_drvdata(dev))->g;
}
static inline struct gk20a *gk20a_from_as(struct gk20a_as *as)
{
@@ -927,14 +929,14 @@ enum {
KEPLER_CHANNEL_GPFIFO_C = 0xA26F,
};
static inline bool gk20a_gpu_is_virtual(struct platform_device *dev)
static inline bool gk20a_gpu_is_virtual(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
return platform->virtual_dev;
}
static inline int support_gk20a_pmu(struct platform_device *dev)
static inline int support_gk20a_pmu(struct device *dev)
{
if (IS_ENABLED(CONFIG_GK20A_PMU)) {
/* gPMU is not supported for vgpu */
@@ -944,23 +946,23 @@ static inline int support_gk20a_pmu(struct platform_device *dev)
return 0;
}
void gk20a_create_sysfs(struct platform_device *dev);
void gk20a_create_sysfs(struct device *dev);
void gk20a_remove_sysfs(struct device *dev);
#define GK20A_BAR0_IORESOURCE_MEM 0
#define GK20A_BAR1_IORESOURCE_MEM 1
#define GK20A_SIM_IORESOURCE_MEM 2
void gk20a_busy_noresume(struct platform_device *pdev);
int __must_check gk20a_busy(struct platform_device *pdev);
void gk20a_idle(struct platform_device *pdev);
void gk20a_busy_noresume(struct device *dev);
int __must_check gk20a_busy(struct device *dev);
void gk20a_idle(struct device *dev);
void gk20a_disable(struct gk20a *g, u32 units);
void gk20a_enable(struct gk20a *g, u32 units);
void gk20a_reset(struct gk20a *g, u32 units);
int gk20a_do_idle(void);
int gk20a_do_unidle(void);
int __gk20a_do_idle(struct platform_device *pdev, bool force_reset);
int __gk20a_do_unidle(struct platform_device *pdev);
int __gk20a_do_idle(struct device *dev, bool force_reset);
int __gk20a_do_unidle(struct device *dev);
const struct firmware *
gk20a_request_firmware(struct gk20a *g, const char *fw_name);
@@ -981,10 +983,10 @@ int gk20a_init_gpu_characteristics(struct gk20a *g);
void gk20a_pbus_isr(struct gk20a *g);
int gk20a_user_init(struct platform_device *dev);
void gk20a_user_deinit(struct platform_device *dev);
int gk20a_user_init(struct device *dev, const char *interface_name);
void gk20a_user_deinit(struct device *dev);
extern void gk20a_debug_dump_device(struct platform_device *pdev);
void gk20a_debug_dump_device(void *dev);
static inline u32 ptimer_scalingfactor10x(u32 ptimer_src_freq)
{
@@ -999,4 +1001,8 @@ static inline u32 scale_ptimer(u32 timeout , u32 scale10x)
}
u64 gk20a_read_ptimer(struct gk20a *g);
extern struct class nvgpu_class;
#define INTERFACE_NAME "nvhost%s-gpu"
#endif /* GK20A_H */

View File

@@ -1,7 +1,7 @@
/*
* gk20a clock scaling profile
*
* Copyright (c) 2013-2015, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2013-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -47,8 +47,8 @@ static int gk20a_scale_qos_notify(struct notifier_block *nb,
struct gk20a_scale_profile *profile =
container_of(nb, struct gk20a_scale_profile,
qos_notify_block);
struct gk20a_platform *platform = platform_get_drvdata(profile->pdev);
struct gk20a *g = get_gk20a(profile->pdev);
struct gk20a_platform *platform = dev_get_drvdata(profile->dev);
struct gk20a *g = get_gk20a(profile->dev);
unsigned long freq;
if (!platform->postscale)
@@ -56,7 +56,7 @@ static int gk20a_scale_qos_notify(struct notifier_block *nb,
/* get the frequency requirement. if devfreq is enabled, check if it
* has higher demand than qos */
freq = platform->clk_round_rate(profile->pdev,
freq = platform->clk_round_rate(profile->dev,
pm_qos_request(platform->qos_id));
if (g->devfreq)
freq = max(g->devfreq->previous_freq, freq);
@@ -64,7 +64,7 @@ static int gk20a_scale_qos_notify(struct notifier_block *nb,
/* Update gpu load because we may scale the emc target
* if the gpu load changed. */
gk20a_pmu_load_update(g);
platform->postscale(profile->pdev, freq);
platform->postscale(profile->dev, freq);
return NOTIFY_OK;
}
@@ -77,12 +77,12 @@ static int gk20a_scale_qos_notify(struct notifier_block *nb,
static int gk20a_scale_make_freq_table(struct gk20a_scale_profile *profile)
{
struct gk20a_platform *platform = platform_get_drvdata(profile->pdev);
struct gk20a_platform *platform = dev_get_drvdata(profile->dev);
int num_freqs, err;
unsigned long *freqs;
/* get gpu frequency table */
err = platform->get_clk_freqs(profile->pdev, &freqs,
err = platform->get_clk_freqs(profile->dev, &freqs,
&num_freqs);
if (err)
return -ENOSYS;
@@ -103,22 +103,21 @@ static int gk20a_scale_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct platform_device *pdev = to_platform_device(dev);
unsigned long rounded_rate =
platform->clk_round_rate(pdev, *freq);
platform->clk_round_rate(dev, *freq);
if (platform->clk_get_rate(pdev) == rounded_rate)
if (platform->clk_get_rate(dev) == rounded_rate)
*freq = rounded_rate;
else {
platform->clk_set_rate(pdev, rounded_rate);
*freq = platform->clk_get_rate(pdev);
platform->clk_set_rate(dev, rounded_rate);
*freq = platform->clk_get_rate(dev);
}
/* postscale will only scale emc (dram clock) if evaluating
* gk20a_tegra_get_emc_rate() produces a new or different emc
* target because the load or_and gpufreq has changed */
if (platform->postscale)
platform->postscale(pdev, rounded_rate);
platform->postscale(dev, rounded_rate);
return 0;
}
@@ -130,9 +129,9 @@ static int gk20a_scale_target(struct device *dev, unsigned long *freq,
* based on the time it was asked last time.
*/
static void update_load_estimate_gpmu(struct platform_device *pdev)
static void update_load_estimate_gpmu(struct device *dev)
{
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
struct gk20a_scale_profile *profile = g->scale_profile;
unsigned long dt;
u32 busy_time;
@@ -148,14 +147,14 @@ static void update_load_estimate_gpmu(struct platform_device *pdev)
}
/*
* gk20a_scale_suspend(pdev)
* gk20a_scale_suspend(dev)
*
* This function informs devfreq of suspend
*/
void gk20a_scale_suspend(struct platform_device *pdev)
void gk20a_scale_suspend(struct device *dev)
{
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
struct devfreq *devfreq = g->devfreq;
if (!devfreq)
@@ -165,14 +164,14 @@ void gk20a_scale_suspend(struct platform_device *pdev)
}
/*
* gk20a_scale_resume(pdev)
* gk20a_scale_resume(dev)
*
* This functions informs devfreq of resume
*/
void gk20a_scale_resume(struct platform_device *pdev)
void gk20a_scale_resume(struct device *dev)
{
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
struct devfreq *devfreq = g->devfreq;
if (!devfreq)
@@ -182,15 +181,15 @@ void gk20a_scale_resume(struct platform_device *pdev)
}
/*
* gk20a_scale_notify(pdev, busy)
* gk20a_scale_notify(dev, busy)
*
* Calling this function informs that the device is idling (..or busy). This
* data is used to estimate the current load
*/
static void gk20a_scale_notify(struct platform_device *pdev, bool busy)
static void gk20a_scale_notify(struct device *dev, bool busy)
{
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
struct gk20a_scale_profile *profile = g->scale_profile;
struct devfreq *devfreq = g->devfreq;
@@ -204,15 +203,15 @@ static void gk20a_scale_notify(struct platform_device *pdev, bool busy)
mutex_unlock(&devfreq->lock);
}
void gk20a_scale_notify_idle(struct platform_device *pdev)
void gk20a_scale_notify_idle(struct device *dev)
{
gk20a_scale_notify(pdev, false);
gk20a_scale_notify(dev, false);
}
void gk20a_scale_notify_busy(struct platform_device *pdev)
void gk20a_scale_notify_busy(struct device *dev)
{
gk20a_scale_notify(pdev, true);
gk20a_scale_notify(dev, true);
}
/*
@@ -224,24 +223,23 @@ void gk20a_scale_notify_busy(struct platform_device *pdev)
static int gk20a_scale_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
struct gk20a *g = get_gk20a(to_platform_device(dev));
struct gk20a *g = get_gk20a(dev);
struct gk20a_scale_profile *profile = g->scale_profile;
struct platform_device *pdev = to_platform_device(dev);
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
/* update the software shadow */
gk20a_pmu_load_update(g);
/* inform edp about new constraint */
if (platform->prescale)
platform->prescale(pdev);
platform->prescale(dev);
/* Make sure there are correct values for the current frequency */
profile->dev_stat.current_frequency =
platform->clk_get_rate(profile->pdev);
platform->clk_get_rate(profile->dev);
/* Update load estimate */
update_load_estimate_gpmu(to_platform_device(dev));
update_load_estimate_gpmu(dev);
/* Copy the contents of the current device status */
*stat = profile->dev_stat;
@@ -254,12 +252,12 @@ static int gk20a_scale_get_dev_status(struct device *dev,
}
/*
* gk20a_scale_init(pdev)
* gk20a_scale_init(dev)
*/
void gk20a_scale_init(struct platform_device *pdev)
void gk20a_scale_init(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct gk20a *g = platform->g;
struct gk20a_scale_profile *profile;
int err;
@@ -269,7 +267,7 @@ void gk20a_scale_init(struct platform_device *pdev)
profile = kzalloc(sizeof(*profile), GFP_KERNEL);
profile->pdev = pdev;
profile->dev = dev;
profile->dev_stat.busy = false;
/* Create frequency table */
@@ -290,7 +288,7 @@ void gk20a_scale_init(struct platform_device *pdev)
profile->devfreq_profile.get_dev_status =
gk20a_scale_get_dev_status;
devfreq = devfreq_add_device(&pdev->dev,
devfreq = devfreq_add_device(dev,
&profile->devfreq_profile,
platform->devfreq_governor, NULL);
@@ -316,9 +314,9 @@ err_get_freqs:
kfree(profile);
}
void gk20a_scale_exit(struct platform_device *pdev)
void gk20a_scale_exit(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct gk20a *g = platform->g;
int err;
@@ -344,9 +342,9 @@ void gk20a_scale_exit(struct platform_device *pdev)
* Initialize hardware portion of the device
*/
void gk20a_scale_hw_init(struct platform_device *pdev)
void gk20a_scale_hw_init(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct gk20a_scale_profile *profile = platform->g->scale_profile;
/* make sure that scaling has bee initialised */

View File

@@ -1,7 +1,7 @@
/*
* gk20a clock scaling profile
*
* Copyright (c) 2013-2015, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2013-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -21,11 +21,10 @@
#include <linux/devfreq.h>
struct platform_device;
struct clk;
struct gk20a_scale_profile {
struct platform_device *pdev;
struct device *dev;
ktime_t last_event_time;
struct devfreq_dev_profile devfreq_profile;
struct devfreq_dev_status dev_stat;
@@ -34,25 +33,25 @@ struct gk20a_scale_profile {
};
/* Initialization and de-initialization for module */
void gk20a_scale_init(struct platform_device *);
void gk20a_scale_exit(struct platform_device *);
void gk20a_scale_hw_init(struct platform_device *pdev);
void gk20a_scale_init(struct device *);
void gk20a_scale_exit(struct device *);
void gk20a_scale_hw_init(struct device *dev);
#if defined(CONFIG_GK20A_DEVFREQ)
/*
* call when performing submit to notify scaling mechanism that the module is
* in use
*/
void gk20a_scale_notify_busy(struct platform_device *);
void gk20a_scale_notify_idle(struct platform_device *);
void gk20a_scale_notify_busy(struct device *);
void gk20a_scale_notify_idle(struct device *);
void gk20a_scale_suspend(struct platform_device *);
void gk20a_scale_resume(struct platform_device *);
void gk20a_scale_suspend(struct device *);
void gk20a_scale_resume(struct device *);
#else
static inline void gk20a_scale_notify_busy(struct platform_device *pdev) {}
static inline void gk20a_scale_notify_idle(struct platform_device *pdev) {}
static inline void gk20a_scale_suspend(struct platform_device *pdev) {}
static inline void gk20a_scale_resume(struct platform_device *pdev) {}
static inline void gk20a_scale_notify_busy(struct device *dev) {}
static inline void gk20a_scale_notify_idle(struct device *dev) {}
static inline void gk20a_scale_suspend(struct device *dev) {}
static inline void gk20a_scale_resume(struct device *dev) {}
#endif
#endif

View File

@@ -18,7 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/kernel.h>
#include <linux/fb.h>
@@ -34,11 +34,10 @@
#define ROOTRW (S_IRWXU|S_IRGRP|S_IROTH)
static ssize_t elcg_enable_store(struct device *device,
static ssize_t elcg_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
int err;
@@ -60,28 +59,26 @@ static ssize_t elcg_enable_store(struct device *device,
}
gk20a_idle(g->dev);
dev_info(device, "ELCG is %s.\n", g->elcg_enabled ? "enabled" :
dev_info(dev, "ELCG is %s.\n", g->elcg_enabled ? "enabled" :
"disabled");
return count;
}
static ssize_t elcg_enable_read(struct device *device,
static ssize_t elcg_enable_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d\n", g->elcg_enabled ? 1 : 0);
}
static DEVICE_ATTR(elcg_enable, ROOTRW, elcg_enable_read, elcg_enable_store);
static ssize_t blcg_enable_store(struct device *device,
static ssize_t blcg_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
int err;
@@ -118,17 +115,16 @@ static ssize_t blcg_enable_store(struct device *device,
g->blcg_enabled);
gk20a_idle(g->dev);
dev_info(device, "BLCG is %s.\n", g->blcg_enabled ? "enabled" :
dev_info(dev, "BLCG is %s.\n", g->blcg_enabled ? "enabled" :
"disabled");
return count;
}
static ssize_t blcg_enable_read(struct device *device,
static ssize_t blcg_enable_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d\n", g->blcg_enabled ? 1 : 0);
}
@@ -136,11 +132,10 @@ static ssize_t blcg_enable_read(struct device *device,
static DEVICE_ATTR(blcg_enable, ROOTRW, blcg_enable_read, blcg_enable_store);
static ssize_t slcg_enable_store(struct device *device,
static ssize_t slcg_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
int err;
@@ -185,17 +180,16 @@ static ssize_t slcg_enable_store(struct device *device,
g->ops.clock_gating.slcg_xbar_load_gating_prod(g, g->slcg_enabled);
gk20a_idle(g->dev);
dev_info(device, "SLCG is %s.\n", g->slcg_enabled ? "enabled" :
dev_info(dev, "SLCG is %s.\n", g->slcg_enabled ? "enabled" :
"disabled");
return count;
}
static ssize_t slcg_enable_read(struct device *device,
static ssize_t slcg_enable_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d\n", g->slcg_enabled ? 1 : 0);
}
@@ -203,8 +197,8 @@ static ssize_t slcg_enable_read(struct device *device,
static DEVICE_ATTR(slcg_enable, ROOTRW, slcg_enable_read, slcg_enable_store);
static ssize_t ptimer_scale_factor_show(struct device *dev,
struct device_attribute *attr,
char *buf)
struct device_attribute *attr,
char *buf)
{
struct gk20a_platform *platform = dev_get_drvdata(dev);
u32 src_freq_hz = platform->ptimer_src_freq;
@@ -240,8 +234,7 @@ static ssize_t railgate_enable_store(struct device *dev,
{
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct generic_pm_domain *genpd = dev_to_genpd(dev);
struct platform_device *ndev = to_platform_device(dev);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long railgate_enable = 0;
int err;
@@ -255,7 +248,7 @@ static ssize_t railgate_enable_store(struct device *dev,
/* release extra ref count:if power domains not enabled */
if ((platform->railgate) && \
!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
err = platform->railgate(ndev);
err = platform->railgate(dev);
mutex_unlock(&platform->railgate_lock);
} else if (railgate_enable == 0 && platform->can_railgate) {
mutex_lock(&platform->railgate_lock);
@@ -265,7 +258,7 @@ static ssize_t railgate_enable_store(struct device *dev,
/* take extra ref count - incase of power domains not enabled */
if ((platform->unrailgate) && \
!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
err = platform->unrailgate(ndev);
err = platform->unrailgate(dev);
mutex_unlock(&platform->railgate_lock);
}
dev_info(dev, "railgate is %s.\n", platform->can_railgate ?
@@ -279,11 +272,10 @@ static ssize_t railgate_enable_store(struct device *dev,
return count;
}
static ssize_t railgate_enable_read(struct device *device,
static ssize_t railgate_enable_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a_platform *platform = dev_get_drvdata(&ndev->dev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", platform->can_railgate ? 1 : 0);
}
@@ -297,9 +289,8 @@ static ssize_t railgate_delay_store(struct device *dev,
const char *buf, size_t count)
{
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct platform_device *ndev = to_platform_device(dev);
int railgate_delay = 0, ret = 0;
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
int err;
if (!platform->can_railgate) {
@@ -373,8 +364,7 @@ static DEVICE_ATTR(clockgate_delay, ROOTRW, clockgate_delay_show,
static ssize_t counters_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
u32 busy_cycles, total_cycles;
ssize_t res;
@@ -390,8 +380,7 @@ static ssize_t counters_show_reset(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t res = counters_show(dev, attr, buf);
struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
gk20a_pmu_reset_load_counters(g);
@@ -403,8 +392,7 @@ static ssize_t gk20a_load_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
u32 busy_time;
ssize_t res;
int err;
@@ -427,11 +415,10 @@ static ssize_t gk20a_load_show(struct device *dev,
}
static DEVICE_ATTR(load, S_IRUGO, gk20a_load_show, NULL);
static ssize_t elpg_enable_store(struct device *device,
static ssize_t elpg_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
int err;
@@ -455,28 +442,26 @@ static ssize_t elpg_enable_store(struct device *device,
}
gk20a_idle(g->dev);
dev_info(device, "ELPG is %s.\n", g->elpg_enabled ? "enabled" :
dev_info(dev, "ELPG is %s.\n", g->elpg_enabled ? "enabled" :
"disabled");
return count;
}
static ssize_t elpg_enable_read(struct device *device,
static ssize_t elpg_enable_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d\n", g->elpg_enabled ? 1 : 0);
}
static DEVICE_ATTR(elpg_enable, ROOTRW, elpg_enable_read, elpg_enable_store);
static ssize_t aelpg_param_store(struct device *device,
static ssize_t aelpg_param_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
int status = 0;
union pmu_ap_cmd ap_cmd;
int *paramlist = (int *)g->pmu.aelpg_param;
@@ -514,11 +499,10 @@ static ssize_t aelpg_param_store(struct device *device,
return count;
}
static ssize_t aelpg_param_read(struct device *device,
static ssize_t aelpg_param_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d %d %d %d %d\n", g->pmu.aelpg_param[0],
g->pmu.aelpg_param[1], g->pmu.aelpg_param[2],
@@ -528,11 +512,10 @@ static ssize_t aelpg_param_read(struct device *device,
static DEVICE_ATTR(aelpg_param, ROOTRW,
aelpg_param_read, aelpg_param_store);
static ssize_t aelpg_enable_store(struct device *device,
static ssize_t aelpg_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
int status = 0;
union pmu_ap_cmd ap_cmd;
@@ -558,21 +541,20 @@ static ssize_t aelpg_enable_store(struct device *device,
status = gk20a_pmu_ap_send_command(g, &ap_cmd, false);
}
} else {
dev_info(device, "PMU is not ready, AELPG request failed\n");
dev_info(dev, "PMU is not ready, AELPG request failed\n");
}
gk20a_idle(g->dev);
dev_info(device, "AELPG is %s.\n", g->aelpg_enabled ? "enabled" :
dev_info(dev, "AELPG is %s.\n", g->aelpg_enabled ? "enabled" :
"disabled");
return count;
}
static ssize_t aelpg_enable_read(struct device *device,
static ssize_t aelpg_enable_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d\n", g->aelpg_enabled ? 1 : 0);
}
@@ -581,19 +563,17 @@ static DEVICE_ATTR(aelpg_enable, ROOTRW,
aelpg_enable_read, aelpg_enable_store);
static ssize_t allow_all_enable_read(struct device *device,
static ssize_t allow_all_enable_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d\n", g->allow_all ? 1 : 0);
}
static ssize_t allow_all_enable_store(struct device *device,
static ssize_t allow_all_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
int err;
@@ -610,11 +590,10 @@ static ssize_t allow_all_enable_store(struct device *device,
static DEVICE_ATTR(allow_all, ROOTRW,
allow_all_enable_read, allow_all_enable_store);
static ssize_t emc3d_ratio_store(struct device *device,
static ssize_t emc3d_ratio_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
if (kstrtoul(buf, 10, &val) < 0)
@@ -625,22 +604,20 @@ static ssize_t emc3d_ratio_store(struct device *device,
return count;
}
static ssize_t emc3d_ratio_read(struct device *device,
static ssize_t emc3d_ratio_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d\n", g->emc3d_ratio);
}
static DEVICE_ATTR(emc3d_ratio, ROOTRW, emc3d_ratio_read, emc3d_ratio_store);
static ssize_t fmax_at_vmin_safe_read(struct device *device,
static ssize_t fmax_at_vmin_safe_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long gpu_fmax_at_vmin_hz = 0;
gpu_fmax_at_vmin_hz = tegra_dvfs_get_fmax_at_vmin_safe_t(
@@ -652,11 +629,10 @@ static ssize_t fmax_at_vmin_safe_read(struct device *device,
static DEVICE_ATTR(fmax_at_vmin_safe, S_IRUGO, fmax_at_vmin_safe_read, NULL);
#ifdef CONFIG_PM
static ssize_t force_idle_store(struct device *device,
static ssize_t force_idle_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
int err = 0;
@@ -667,10 +643,10 @@ static ssize_t force_idle_store(struct device *device,
if (g->forced_idle)
return count; /* do nothing */
else {
err = __gk20a_do_idle(ndev, false);
err = __gk20a_do_idle(dev, false);
if (!err) {
g->forced_idle = 1;
dev_info(device, "gpu is idle : %d\n",
dev_info(dev, "gpu is idle : %d\n",
g->forced_idle);
}
}
@@ -678,10 +654,10 @@ static ssize_t force_idle_store(struct device *device,
if (!g->forced_idle)
return count; /* do nothing */
else {
err = __gk20a_do_unidle(ndev);
err = __gk20a_do_unidle(dev);
if (!err) {
g->forced_idle = 0;
dev_info(device, "gpu is idle : %d\n",
dev_info(dev, "gpu is idle : %d\n",
g->forced_idle);
}
}
@@ -690,11 +666,10 @@ static ssize_t force_idle_store(struct device *device,
return count;
}
static ssize_t force_idle_read(struct device *device,
static ssize_t force_idle_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
return sprintf(buf, "%d\n", g->forced_idle ? 1 : 0);
}
@@ -702,11 +677,10 @@ static ssize_t force_idle_read(struct device *device,
static DEVICE_ATTR(force_idle, ROOTRW, force_idle_read, force_idle_store);
#endif
static ssize_t tpc_fs_mask_store(struct device *device,
static ssize_t tpc_fs_mask_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
if (kstrtoul(buf, 10, &val) < 0)
@@ -730,11 +704,10 @@ static ssize_t tpc_fs_mask_store(struct device *device,
return count;
}
static ssize_t tpc_fs_mask_read(struct device *device,
static ssize_t tpc_fs_mask_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ndev = to_platform_device(device);
struct gk20a *g = get_gk20a(ndev);
struct gk20a *g = get_gk20a(dev);
struct gr_gk20a *gr = &g->gr;
u32 gpc_index;
u32 tpc_fs_mask = 0;
@@ -760,7 +733,7 @@ static DEVICE_ATTR(tpc_fs_mask, ROOTRW, tpc_fs_mask_read, tpc_fs_mask_store);
void gk20a_remove_sysfs(struct device *dev)
{
struct gk20a *g = get_gk20a(to_platform_device(dev));
struct gk20a *g = get_gk20a(dev);
device_remove_file(dev, &dev_attr_elcg_enable);
device_remove_file(dev, &dev_attr_blcg_enable);
@@ -797,50 +770,50 @@ void gk20a_remove_sysfs(struct device *dev)
}
}
void gk20a_create_sysfs(struct platform_device *dev)
void gk20a_create_sysfs(struct device *dev)
{
struct gk20a *g = get_gk20a(dev);
struct gk20a *g = gk20a_from_dev(dev);
int error = 0;
error |= device_create_file(&dev->dev, &dev_attr_elcg_enable);
error |= device_create_file(&dev->dev, &dev_attr_blcg_enable);
error |= device_create_file(&dev->dev, &dev_attr_slcg_enable);
error |= device_create_file(&dev->dev, &dev_attr_ptimer_scale_factor);
error |= device_create_file(&dev->dev, &dev_attr_elpg_enable);
error |= device_create_file(&dev->dev, &dev_attr_emc3d_ratio);
error |= device_create_file(&dev->dev, &dev_attr_fmax_at_vmin_safe);
error |= device_create_file(&dev->dev, &dev_attr_counters);
error |= device_create_file(&dev->dev, &dev_attr_counters_reset);
error |= device_create_file(&dev->dev, &dev_attr_load);
error |= device_create_file(&dev->dev, &dev_attr_railgate_delay);
error |= device_create_file(&dev->dev, &dev_attr_is_railgated);
error |= device_create_file(&dev->dev, &dev_attr_clockgate_delay);
error |= device_create_file(dev, &dev_attr_elcg_enable);
error |= device_create_file(dev, &dev_attr_blcg_enable);
error |= device_create_file(dev, &dev_attr_slcg_enable);
error |= device_create_file(dev, &dev_attr_ptimer_scale_factor);
error |= device_create_file(dev, &dev_attr_elpg_enable);
error |= device_create_file(dev, &dev_attr_emc3d_ratio);
error |= device_create_file(dev, &dev_attr_fmax_at_vmin_safe);
error |= device_create_file(dev, &dev_attr_counters);
error |= device_create_file(dev, &dev_attr_counters_reset);
error |= device_create_file(dev, &dev_attr_load);
error |= device_create_file(dev, &dev_attr_railgate_delay);
error |= device_create_file(dev, &dev_attr_is_railgated);
error |= device_create_file(dev, &dev_attr_clockgate_delay);
#ifdef CONFIG_PM
error |= device_create_file(&dev->dev, &dev_attr_force_idle);
error |= device_create_file(dev, &dev_attr_force_idle);
#if defined(CONFIG_PM_GENERIC_DOMAINS)
error |= device_create_file(&dev->dev, &dev_attr_railgate_enable);
error |= device_create_file(dev, &dev_attr_railgate_enable);
#endif
#endif
error |= device_create_file(&dev->dev, &dev_attr_aelpg_param);
error |= device_create_file(&dev->dev, &dev_attr_aelpg_enable);
error |= device_create_file(&dev->dev, &dev_attr_allow_all);
error |= device_create_file(&dev->dev, &dev_attr_tpc_fs_mask);
error |= device_create_file(dev, &dev_attr_aelpg_param);
error |= device_create_file(dev, &dev_attr_aelpg_enable);
error |= device_create_file(dev, &dev_attr_allow_all);
error |= device_create_file(dev, &dev_attr_tpc_fs_mask);
if (g->host1x_dev && (dev->dev.parent != &g->host1x_dev->dev)) {
if (g->host1x_dev && (dev->parent != &g->host1x_dev->dev)) {
error |= sysfs_create_link(&g->host1x_dev->dev.kobj,
&dev->dev.kobj,
dev_name(&dev->dev));
if (strcmp(dev_name(&dev->dev), "gpu.0")) {
struct kobject *kobj = &dev->dev.kobj;
&dev->kobj,
dev_name(dev));
if (strcmp(dev_name(dev), "gpu.0")) {
struct kobject *kobj = &dev->kobj;
struct device *parent = container_of((kobj->parent),
struct device, kobj);
error |= sysfs_create_link(&parent->kobj,
&dev->dev.kobj, "gpu.0");
&dev->kobj, "gpu.0");
}
}
if (error)
dev_err(&dev->dev, "Failed to create sysfs attributes!\n");
dev_err(dev, "Failed to create sysfs attributes!\n");
}

View File

@@ -2439,10 +2439,10 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
{
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
struct gr_gk20a *gr = &g->gr;
int i, attr_buffer_size, err;
struct platform_device *pdev = g->dev;
struct device *dev = g->dev;
u32 cb_buffer_size = gr->bundle_cb_default_size *
gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
@@ -2462,7 +2462,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
goto clean_up;
if (platform->secure_alloc)
platform->secure_alloc(pdev,
platform->secure_alloc(dev,
&gr->global_ctx_buffer[CIRCULAR_VPR],
cb_buffer_size);
@@ -2474,7 +2474,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
goto clean_up;
if (platform->secure_alloc)
platform->secure_alloc(pdev,
platform->secure_alloc(dev,
&gr->global_ctx_buffer[PAGEPOOL_VPR],
pagepool_buffer_size);
@@ -2486,12 +2486,12 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
goto clean_up;
if (platform->secure_alloc)
platform->secure_alloc(pdev,
platform->secure_alloc(dev,
&gr->global_ctx_buffer[ATTRIBUTE_VPR],
attr_buffer_size);
if (platform->secure_buffer.destroy)
platform->secure_buffer.destroy(pdev, &platform->secure_buffer);
platform->secure_buffer.destroy(dev, &platform->secure_buffer);
gk20a_dbg_info("golden_image_size : %d",
gr->ctx_vars.golden_image_size);
@@ -5016,7 +5016,7 @@ static int gr_gk20a_handle_sw_method(struct gk20a *g, u32 addr,
{
gk20a_dbg_fn("");
trace_gr_gk20a_handle_sw_method(g->dev->name);
trace_gr_gk20a_handle_sw_method(dev_name(g->dev));
if (class_num == KEPLER_COMPUTE_A) {
switch (offset << 2) {
@@ -8113,7 +8113,7 @@ static int gr_gk20a_dump_gr_status_regs(struct gk20a *g,
#ifdef CONFIG_DEBUG_FS
int gr_gk20a_debugfs_init(struct gk20a *g)
{
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
g->debugfs_gr_default_attrib_cb_size =
debugfs_create_u32("gr_default_attrib_cb_size",

View File

@@ -1,7 +1,7 @@
/*
* NVIDIA GPU HAL interface.
*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -41,7 +41,7 @@ int gpu_init_hal(struct gk20a *g)
break;
#endif
default:
gk20a_err(&g->dev->dev, "no support for %x", ver);
gk20a_err(g->dev, "no support for %x", ver);
return -ENODEV;
}

View File

@@ -111,7 +111,7 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
gk20a_dbg_fn("");
trace_gk20a_ltc_cbc_ctrl_start(g->dev->name, op, min, max);
trace_gk20a_ltc_cbc_ctrl_start(dev_name(g->dev), op, min, max);
if (gr->compbit_store.mem.size == 0)
return 0;
@@ -163,7 +163,7 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
}
}
out:
trace_gk20a_ltc_cbc_ctrl_done(g->dev->name);
trace_gk20a_ltc_cbc_ctrl_done(dev_name(g->dev));
mutex_unlock(&g->mm.l2_op_lock);
return err;
}
@@ -204,7 +204,7 @@ static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g)
gk20a_dbg_fn("");
trace_gk20a_mm_g_elpg_flush_locked(g->dev->name);
trace_gk20a_mm_g_elpg_flush_locked(dev_name(g->dev));
/* Make sure all previous writes are committed to the L2. There's no
guarantee that writes are to DRAM. This will be a sysmembar internal
@@ -227,7 +227,7 @@ static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g)
gk20a_warn(dev_from_gk20a(g),
"g_elpg_flush too many retries");
trace_gk20a_mm_g_elpg_flush_locked_done(g->dev->name);
trace_gk20a_mm_g_elpg_flush_locked_done(dev_name(g->dev));
}

View File

@@ -1,7 +1,7 @@
/*
* GK20A memory interface
* GK20A Master Control
*
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -24,7 +24,7 @@ irqreturn_t mc_gk20a_isr_stall(struct gk20a *g)
{
u32 mc_intr_0;
trace_mc_gk20a_intr_stall(g->dev->name);
trace_mc_gk20a_intr_stall(dev_name(g->dev));
if (!g->power_on)
return IRQ_NONE;
@@ -42,7 +42,7 @@ irqreturn_t mc_gk20a_isr_stall(struct gk20a *g)
atomic_inc(&g->hw_irq_stall_count);
trace_mc_gk20a_intr_stall_done(g->dev->name);
trace_mc_gk20a_intr_stall_done(dev_name(g->dev));
return IRQ_WAKE_THREAD;
}
@@ -77,7 +77,7 @@ irqreturn_t mc_gk20a_intr_thread_stall(struct gk20a *g)
gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
trace_mc_gk20a_intr_thread_stall(g->dev->name);
trace_mc_gk20a_intr_thread_stall(dev_name(g->dev));
mc_intr_0 = gk20a_readl(g, mc_intr_0_r());
hw_irq_count = atomic_read(&g->hw_irq_stall_count);
@@ -111,7 +111,7 @@ irqreturn_t mc_gk20a_intr_thread_stall(struct gk20a *g)
wake_up_all(&g->sw_irq_stall_last_handled_wq);
trace_mc_gk20a_intr_thread_stall_done(g->dev->name);
trace_mc_gk20a_intr_thread_stall_done(dev_name(g->dev));
return IRQ_HANDLED;
}

View File

@@ -3515,7 +3515,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
gk20a_busy_noresume(g->dev);
if (!g->power_on) {
pm_runtime_put_noidle(&g->dev->dev);
pm_runtime_put_noidle(g->dev);
return 0;
}
@@ -3525,7 +3525,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
guarantee that writes are to DRAM. This will be a sysmembar internal
to the L2. */
trace_gk20a_mm_fb_flush(g->dev->name);
trace_gk20a_mm_fb_flush(dev_name(g->dev));
gk20a_writel(g, flush_fb_flush_r(),
flush_fb_flush_pending_busy_f());
@@ -3552,11 +3552,11 @@ int gk20a_mm_fb_flush(struct gk20a *g)
ret = -EBUSY;
}
trace_gk20a_mm_fb_flush_done(g->dev->name);
trace_gk20a_mm_fb_flush_done(dev_name(g->dev));
mutex_unlock(&mm->l2_op_lock);
pm_runtime_put_noidle(&g->dev->dev);
pm_runtime_put_noidle(g->dev);
return ret;
}
@@ -3566,7 +3566,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
u32 data;
s32 retry = 200;
trace_gk20a_mm_l2_invalidate(g->dev->name);
trace_gk20a_mm_l2_invalidate(dev_name(g->dev));
/* Invalidate any clean lines from the L2 so subsequent reads go to
DRAM. Dirty lines are not affected by this operation. */
@@ -3592,7 +3592,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
gk20a_warn(dev_from_gk20a(g),
"l2_system_invalidate too many retries");
trace_gk20a_mm_l2_invalidate_done(g->dev->name);
trace_gk20a_mm_l2_invalidate_done(dev_name(g->dev));
}
void gk20a_mm_l2_invalidate(struct gk20a *g)
@@ -3604,7 +3604,7 @@ void gk20a_mm_l2_invalidate(struct gk20a *g)
gk20a_mm_l2_invalidate_locked(g);
mutex_unlock(&mm->l2_op_lock);
}
pm_runtime_put_noidle(&g->dev->dev);
pm_runtime_put_noidle(g->dev);
}
void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
@@ -3621,7 +3621,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
mutex_lock(&mm->l2_op_lock);
trace_gk20a_mm_l2_flush(g->dev->name);
trace_gk20a_mm_l2_flush(dev_name(g->dev));
/* Flush all dirty lines from the L2 to DRAM. Lines are left in the L2
as clean, so subsequent reads might hit in the L2. */
@@ -3646,7 +3646,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
gk20a_warn(dev_from_gk20a(g),
"l2_flush_dirty too many retries");
trace_gk20a_mm_l2_flush_done(g->dev->name);
trace_gk20a_mm_l2_flush_done(dev_name(g->dev));
if (invalidate)
gk20a_mm_l2_invalidate_locked(g);
@@ -3654,7 +3654,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
mutex_unlock(&mm->l2_op_lock);
hw_was_off:
pm_runtime_put_noidle(&g->dev->dev);
pm_runtime_put_noidle(g->dev);
}
void gk20a_mm_cbc_clean(struct gk20a *g)
@@ -3696,7 +3696,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
mutex_unlock(&mm->l2_op_lock);
hw_was_off:
pm_runtime_put_noidle(&g->dev->dev);
pm_runtime_put_noidle(g->dev);
}
int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
@@ -3746,7 +3746,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
mutex_lock(&tlb_lock);
trace_gk20a_mm_tlb_invalidate(g->dev->name);
trace_gk20a_mm_tlb_invalidate(dev_name(g->dev));
do {
data = gk20a_readl(g, fb_mmu_ctrl_r());
@@ -3783,7 +3783,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
gk20a_warn(dev_from_gk20a(g),
"mmu invalidate too many retries");
trace_gk20a_mm_tlb_invalidate_done(g->dev->name);
trace_gk20a_mm_tlb_invalidate_done(dev_name(g->dev));
out:
mutex_unlock(&tlb_lock);
@@ -3868,11 +3868,11 @@ clean_up:
return err;
}
void gk20a_mm_debugfs_init(struct platform_device *pdev)
void gk20a_mm_debugfs_init(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct dentry *gpu_root = platform->debugfs;
struct gk20a *g = gk20a_get_platform(pdev)->g;
struct gk20a *g = gk20a_get_platform(dev)->g;
debugfs_create_x64("separate_fixed_allocs", 0664, gpu_root,
&g->separate_fixed_allocs);

View File

@@ -288,7 +288,7 @@ struct channel_gk20a;
int gk20a_init_mm_support(struct gk20a *g);
int gk20a_init_mm_setup_sw(struct gk20a *g);
int gk20a_init_mm_setup_hw(struct gk20a *g);
void gk20a_mm_debugfs_init(struct platform_device *pdev);
void gk20a_mm_debugfs_init(struct device *dev);
int gk20a_mm_fb_flush(struct gk20a *g);
void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate);

View File

@@ -1,9 +1,7 @@
/*
* drivers/video/tegra/host/gk20a/soc/platform_gk20a.h
*
* GK20A Platform (SoC) Interface
*
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -18,7 +16,7 @@
#ifndef _GK20A_PLATFORM_H_
#define _GK20A_PLATFORM_H_
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/pm_domain.h>
#include <linux/dma-attrs.h>
@@ -28,7 +26,7 @@ struct gr_ctx_buffer_desc;
struct gk20a_scale_profile;
struct secure_page_buffer {
void (*destroy)(struct platform_device *, struct secure_page_buffer *);
void (*destroy)(struct device *, struct secure_page_buffer *);
size_t size;
u64 iova;
struct dma_attrs attrs;
@@ -114,28 +112,28 @@ struct gk20a_platform {
* After this function is finished, the driver will initialise
* pm runtime and genpd based on the platform configuration.
*/
int (*probe)(struct platform_device *dev);
int (*probe)(struct device *dev);
/* Second stage initialisation - called once all power management
* initialisations are done.
*/
int (*late_probe)(struct platform_device *dev);
int (*late_probe)(struct device *dev);
/* Remove device after power management has been done
*/
int (*remove)(struct platform_device *dev);
int (*remove)(struct device *dev);
/* Poweron platform dependencies */
int (*busy)(struct platform_device *dev);
int (*busy)(struct device *dev);
/* Powerdown platform dependencies */
void (*idle)(struct platform_device *dev);
void (*idle)(struct device *dev);
/* This function is called to allocate secure memory (memory that the
* CPU cannot see). The function should fill the context buffer
* descriptor (especially fields destroy, sgt, size).
*/
int (*secure_alloc)(struct platform_device *dev,
int (*secure_alloc)(struct device *dev,
struct gr_ctx_buffer_desc *desc,
size_t size);
@@ -143,7 +141,7 @@ struct gk20a_platform {
* This is also helpful to trigger secure memory resizing
* while GPU is off
*/
int (*secure_page_alloc)(struct platform_device *dev);
int (*secure_page_alloc)(struct device *dev);
struct secure_page_buffer secure_buffer;
bool secure_alloc_ready;
@@ -151,33 +149,33 @@ struct gk20a_platform {
int (*suspend)(struct device *);
/* Called to turn off the device */
int (*railgate)(struct platform_device *dev);
int (*railgate)(struct device *dev);
/* Called to turn on the device */
int (*unrailgate)(struct platform_device *dev);
int (*unrailgate)(struct device *dev);
struct mutex railgate_lock;
/* Called to check state of device */
bool (*is_railgated)(struct platform_device *dev);
bool (*is_railgated)(struct device *dev);
/* get supported frequency list */
int (*get_clk_freqs)(struct platform_device *pdev,
int (*get_clk_freqs)(struct device *pdev,
unsigned long **freqs, int *num_freqs);
/* clk related supported functions */
unsigned long (*clk_get_rate)(struct platform_device *pdev);
long (*clk_round_rate)(struct platform_device *pdev,
unsigned long (*clk_get_rate)(struct device *dev);
long (*clk_round_rate)(struct device *dev,
unsigned long rate);
int (*clk_set_rate)(struct platform_device *pdev,
int (*clk_set_rate)(struct device *dev,
unsigned long rate);
/* Postscale callback is called after frequency change */
void (*postscale)(struct platform_device *pdev,
void (*postscale)(struct device *dev,
unsigned long freq);
/* Pre callback is called before frequency change */
void (*prescale)(struct platform_device *pdev);
void (*prescale)(struct device *dev);
/* Devfreq governor name. If scaling is enabled, we request
* this governor to be used in scaling */
@@ -193,11 +191,11 @@ struct gk20a_platform {
* hw units which may interact with the gpu without direct supervision
* of the CPU.
*/
void (*dump_platform_dependencies)(struct platform_device *dev);
void (*dump_platform_dependencies)(struct device *dev);
/* Callbacks to assert/deassert GPU reset */
int (*reset_assert)(struct platform_device *pdev);
int (*reset_deassert)(struct platform_device *pdev);
int (*reset_assert)(struct device *dev);
int (*reset_deassert)(struct device *dev);
struct clk *clk_reset;
struct dvfs_rail *gpu_rail;
@@ -210,12 +208,15 @@ struct gk20a_platform {
u32 ptimer_src_freq;
bool has_cde;
/* soc name for finding firmware files */
const char *soc_name;
};
static inline struct gk20a_platform *gk20a_get_platform(
struct platform_device *dev)
struct device *dev)
{
return (struct gk20a_platform *)platform_get_drvdata(dev);
return (struct gk20a_platform *)dev_get_drvdata(dev);
}
extern struct gk20a_platform gk20a_generic_platform;
@@ -227,14 +228,14 @@ extern struct gk20a_platform vgpu_tegra_platform;
#endif
#endif
static inline bool gk20a_platform_has_syncpoints(struct platform_device *dev)
static inline bool gk20a_platform_has_syncpoints(struct device *dev)
{
struct gk20a_platform *p = gk20a_get_platform(dev);
struct gk20a_platform *p = dev_get_drvdata(dev);
return p->has_syncpoints;
}
int gk20a_tegra_busy(struct platform_device *dev);
void gk20a_tegra_idle(struct platform_device *dev);
void gk20a_tegra_debug_dump(struct platform_device *pdev);
int gk20a_tegra_busy(struct device *dev);
void gk20a_tegra_idle(struct device *dev);
void gk20a_tegra_debug_dump(struct device *pdev);
#endif

View File

@@ -31,9 +31,9 @@
* the clock information to gk20a platform data.
*/
static int gk20a_generic_get_clocks(struct platform_device *pdev)
static int gk20a_generic_get_clocks(struct device *pdev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(pdev);
platform->clk[0] = clk_get_sys("tegra_gk20a.0", "PLLG_ref");
platform->clk[1] = clk_get_sys("tegra_gk20a.0", "pwr");
@@ -65,19 +65,19 @@ err_get_clock:
return -ENODEV;
}
static int gk20a_generic_probe(struct platform_device *dev)
static int gk20a_generic_probe(struct device *dev)
{
gk20a_generic_get_clocks(dev);
return 0;
}
static int gk20a_generic_late_probe(struct platform_device *dev)
static int gk20a_generic_late_probe(struct device *dev)
{
return 0;
}
static int gk20a_generic_remove(struct platform_device *dev)
static int gk20a_generic_remove(struct device *dev)
{
return 0;
}

View File

@@ -1,6 +1,4 @@
/*
* drivers/video/tegra/host/gk20a/platform_gk20a_tegra.c
*
* GK20A Tegra Platform Interface
*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
@@ -66,7 +64,7 @@ static inline void pmc_write(u32 val, unsigned long reg)
#define MHZ_TO_HZ(x) ((x) * 1000000)
#define HZ_TO_MHZ(x) ((x) / 1000000)
static void gk20a_tegra_secure_page_destroy(struct platform_device *pdev,
static void gk20a_tegra_secure_page_destroy(struct device *dev,
struct secure_page_buffer *secure_buffer)
{
dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
@@ -74,9 +72,9 @@ static void gk20a_tegra_secure_page_destroy(struct platform_device *pdev,
secure_buffer->iova, &secure_buffer->attrs);
}
int gk20a_tegra_secure_page_alloc(struct platform_device *pdev)
int gk20a_tegra_secure_page_alloc(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
DEFINE_DMA_ATTRS(attrs);
dma_addr_t iova;
@@ -113,12 +111,11 @@ static void gk20a_tegra_secure_destroy(struct gk20a *g,
}
}
int gk20a_tegra_secure_alloc(struct platform_device *pdev,
int gk20a_tegra_secure_alloc(struct device *dev,
struct gr_ctx_buffer_desc *desc,
size_t size)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct gk20a_platform *platform = dev_get_drvdata(dev);
DEFINE_DMA_ATTRS(attrs);
dma_addr_t iova;
struct sg_table *sgt;
@@ -198,13 +195,13 @@ static unsigned long gk20a_tegra_get_emc_rate(struct gk20a *g,
* This function sets emc frequency based on current gpu frequency
*/
static void gk20a_tegra_postscale(struct platform_device *pdev,
static void gk20a_tegra_postscale(struct device *dev,
unsigned long freq)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct gk20a_scale_profile *profile = platform->g->scale_profile;
struct gk20a_emc_params *emc_params = profile->private_data;
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
struct clk *emc_clk = platform->clk[2];
enum tegra_chipid chip_id = tegra_get_chip_id();
unsigned long emc_target;
@@ -257,9 +254,9 @@ static void gk20a_tegra_postscale(struct platform_device *pdev,
* This function informs EDP about changed constraints.
*/
static void gk20a_tegra_prescale(struct platform_device *pdev)
static void gk20a_tegra_prescale(struct device *dev)
{
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
u32 avg = 0;
gk20a_pmu_load_norm(g, &avg);
@@ -271,7 +268,7 @@ static void gk20a_tegra_prescale(struct platform_device *pdev)
*
*/
static void gk20a_tegra_calibrate_emc(struct platform_device *pdev,
static void gk20a_tegra_calibrate_emc(struct device *dev,
struct gk20a_emc_params *emc_params)
{
enum tegra_chipid cid = tegra_get_chipid();
@@ -308,9 +305,9 @@ static void gk20a_tegra_calibrate_emc(struct platform_device *pdev,
* Check status of gk20a power rail
*/
static bool gk20a_tegra_is_railgated(struct platform_device *pdev)
static bool gk20a_tegra_is_railgated(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
bool ret = false;
if (!tegra_platform_is_linsim())
@@ -325,9 +322,9 @@ static bool gk20a_tegra_is_railgated(struct platform_device *pdev)
* Gate (disable) gk20a power rail
*/
static int gk20a_tegra_railgate(struct platform_device *pdev)
static int gk20a_tegra_railgate(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
int ret = 0;
if (tegra_platform_is_linsim() ||
@@ -344,7 +341,7 @@ static int gk20a_tegra_railgate(struct platform_device *pdev)
udelay(10);
platform->reset_assert(pdev);
platform->reset_assert(dev);
udelay(10);
@@ -367,7 +364,7 @@ static int gk20a_tegra_railgate(struct platform_device *pdev)
return 0;
err_power_off:
gk20a_err(&pdev->dev, "Could not railgate GPU");
gk20a_err(dev, "Could not railgate GPU");
return ret;
}
@@ -377,9 +374,9 @@ err_power_off:
* Gate (disable) gm20b power rail
*/
static int gm20b_tegra_railgate(struct platform_device *pdev)
static int gm20b_tegra_railgate(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
int ret = 0;
if (tegra_platform_is_linsim() ||
@@ -396,7 +393,7 @@ static int gm20b_tegra_railgate(struct platform_device *pdev)
udelay(10);
platform->reset_assert(pdev);
platform->reset_assert(dev);
udelay(10);
@@ -422,7 +419,7 @@ static int gm20b_tegra_railgate(struct platform_device *pdev)
return 0;
err_power_off:
gk20a_err(&pdev->dev, "Could not railgate GPU");
gk20a_err(dev, "Could not railgate GPU");
return ret;
}
@@ -432,9 +429,9 @@ err_power_off:
* Ungate (enable) gk20a power rail
*/
static int gk20a_tegra_unrailgate(struct platform_device *pdev)
static int gk20a_tegra_unrailgate(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
int ret = 0;
bool first = false;
@@ -457,19 +454,19 @@ static int gk20a_tegra_unrailgate(struct platform_device *pdev)
if (!first) {
ret = clk_enable(platform->clk[0]);
if (ret) {
gk20a_err(&pdev->dev, "could not turn on gpu pll");
gk20a_err(dev, "could not turn on gpu pll");
goto err_clk_on;
}
ret = clk_enable(platform->clk[1]);
if (ret) {
gk20a_err(&pdev->dev, "could not turn on pwr clock");
gk20a_err(dev, "could not turn on pwr clock");
goto err_clk_on;
}
}
udelay(10);
platform->reset_assert(pdev);
platform->reset_assert(dev);
udelay(10);
@@ -478,7 +475,7 @@ static int gk20a_tegra_unrailgate(struct platform_device *pdev)
udelay(10);
platform->reset_deassert(pdev);
platform->reset_deassert(dev);
/* Flush MC after boot/railgate/SC7 */
tegra_mc_flush(MC_CLIENT_GPU);
@@ -503,9 +500,9 @@ err_clk_on:
* Ungate (enable) gm20b power rail
*/
static int gm20b_tegra_unrailgate(struct platform_device *pdev)
static int gm20b_tegra_unrailgate(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
int ret = 0;
bool first = false;
@@ -530,25 +527,25 @@ static int gm20b_tegra_unrailgate(struct platform_device *pdev)
if (!first) {
ret = clk_enable(platform->clk_reset);
if (ret) {
gk20a_err(&pdev->dev, "could not turn on gpu_gate");
gk20a_err(dev, "could not turn on gpu_gate");
goto err_clk_on;
}
ret = clk_enable(platform->clk[0]);
if (ret) {
gk20a_err(&pdev->dev, "could not turn on gpu pll");
gk20a_err(dev, "could not turn on gpu pll");
goto err_clk_on;
}
ret = clk_enable(platform->clk[1]);
if (ret) {
gk20a_err(&pdev->dev, "could not turn on pwr clock");
gk20a_err(dev, "could not turn on pwr clock");
goto err_clk_on;
}
}
udelay(10);
platform->reset_assert(pdev);
platform->reset_assert(dev);
udelay(10);
@@ -558,7 +555,7 @@ static int gm20b_tegra_unrailgate(struct platform_device *pdev)
udelay(10);
clk_disable(platform->clk_reset);
platform->reset_deassert(pdev);
platform->reset_deassert(dev);
clk_enable(platform->clk_reset);
/* Flush MC after boot/railgate/SC7 */
@@ -594,16 +591,14 @@ static struct {
* the clock information to gk20a platform data.
*/
static int gk20a_tegra_get_clocks(struct platform_device *pdev)
static int gk20a_tegra_get_clocks(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
char devname[16];
int i;
int ret = 0;
snprintf(devname, sizeof(devname),
(pdev->id <= 0) ? "tegra_%s" : "tegra_%s.%d\n",
pdev->name, pdev->id);
snprintf(devname, sizeof(devname), "tegra_%s", dev_name(dev));
platform->num_clks = 0;
for (i = 0; i < ARRAY_SIZE(tegra_gk20a_clocks); i++) {
@@ -630,7 +625,7 @@ err_get_clock:
return ret;
}
static int gk20a_tegra_reset_assert(struct platform_device *dev)
static int gk20a_tegra_reset_assert(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
@@ -642,7 +637,7 @@ static int gk20a_tegra_reset_assert(struct platform_device *dev)
return 0;
}
static int gk20a_tegra_reset_deassert(struct platform_device *dev)
static int gk20a_tegra_reset_deassert(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
@@ -654,14 +649,14 @@ static int gk20a_tegra_reset_deassert(struct platform_device *dev)
return 0;
}
static int gm20b_tegra_reset_assert(struct platform_device *dev)
static int gm20b_tegra_reset_assert(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
if (!platform->clk_reset) {
platform->clk_reset = clk_get(&dev->dev, "gpu_gate");
platform->clk_reset = clk_get(dev, "gpu_gate");
if (IS_ERR(platform->clk_reset)) {
gk20a_err(&dev->dev, "fail to get gpu reset clk\n");
gk20a_err(dev, "fail to get gpu reset clk\n");
return PTR_ERR(platform->clk_reset);
}
}
@@ -671,9 +666,9 @@ static int gm20b_tegra_reset_assert(struct platform_device *dev)
return 0;
}
static void gk20a_tegra_scale_init(struct platform_device *pdev)
static void gk20a_tegra_scale_init(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(pdev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a_scale_profile *profile = platform->g->scale_profile;
struct gk20a_emc_params *emc_params;
@@ -685,28 +680,28 @@ static void gk20a_tegra_scale_init(struct platform_device *pdev)
return;
emc_params->freq_last_set = -1;
gk20a_tegra_calibrate_emc(pdev, emc_params);
gk20a_tegra_calibrate_emc(dev, emc_params);
profile->private_data = emc_params;
}
static void gk20a_tegra_scale_exit(struct platform_device *pdev)
static void gk20a_tegra_scale_exit(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct gk20a_scale_profile *profile = platform->g->scale_profile;
if (profile)
kfree(profile->private_data);
}
void gk20a_tegra_debug_dump(struct platform_device *pdev)
void gk20a_tegra_debug_dump(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(pdev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
nvhost_debug_dump_device(g->host1x_dev);
}
int gk20a_tegra_busy(struct platform_device *dev)
int gk20a_tegra_busy(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
@@ -716,7 +711,7 @@ int gk20a_tegra_busy(struct platform_device *dev)
return 0;
}
void gk20a_tegra_idle(struct platform_device *dev)
void gk20a_tegra_idle(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
@@ -725,10 +720,10 @@ void gk20a_tegra_idle(struct platform_device *dev)
nvhost_module_idle_ext(g->host1x_dev);
}
static int gk20a_tegra_probe(struct platform_device *dev)
static int gk20a_tegra_probe(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct device_node *np = dev->dev.of_node;
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct device_node *np = dev->of_node;
const __be32 *host1x_ptr;
struct platform_device *host1x_pdev = NULL;
@@ -739,13 +734,13 @@ static int gk20a_tegra_probe(struct platform_device *dev)
host1x_pdev = of_find_device_by_node(host1x_node);
if (!host1x_pdev) {
dev_warn(&dev->dev, "host1x device not available");
dev_warn(dev, "host1x device not available");
return -EPROBE_DEFER;
}
} else {
host1x_pdev = to_platform_device(dev->dev.parent);
dev_warn(&dev->dev, "host1x reference not found. assuming host1x to be parent");
host1x_pdev = to_platform_device(dev->parent);
dev_warn(dev, "host1x reference not found. assuming host1x to be parent");
}
platform->g->host1x_dev = host1x_pdev;
@@ -761,7 +756,7 @@ static int gk20a_tegra_probe(struct platform_device *dev)
np = of_find_node_by_path("/gpu-dvfs-rework");
if (!(np && of_device_is_available(np))) {
platform->devfreq_governor = "";
dev_warn(&dev->dev, "board does not support scaling");
dev_warn(dev, "board does not support scaling");
}
}
@@ -770,10 +765,10 @@ static int gk20a_tegra_probe(struct platform_device *dev)
return 0;
}
static int gk20a_tegra_late_probe(struct platform_device *dev)
static int gk20a_tegra_late_probe(struct device *dev)
{
/* Make gk20a power domain a subdomain of host1x */
nvhost_register_client_domain(dev_to_genpd(&dev->dev));
nvhost_register_client_domain(dev_to_genpd(dev));
/* Initialise tegra specific scaling quirks */
gk20a_tegra_scale_init(dev);
@@ -781,15 +776,15 @@ static int gk20a_tegra_late_probe(struct platform_device *dev)
return 0;
}
static int gk20a_tegra_remove(struct platform_device *dev)
static int gk20a_tegra_remove(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
if (platform->g->host1x_dev)
nvhost_unregister_dump_device(platform->g->host1x_dev);
/* remove gk20a power subdomain from host1x */
nvhost_unregister_client_domain(dev_to_genpd(&dev->dev));
nvhost_unregister_client_domain(dev_to_genpd(dev));
/* deinitialise tegra specific scaling quirks */
gk20a_tegra_scale_exit(dev);
@@ -804,7 +799,7 @@ static int gk20a_tegra_suspend(struct device *dev)
}
#ifdef CONFIG_TEGRA_CLK_FRAMEWORK
static unsigned long gk20a_get_clk_rate(struct platform_device *dev)
static unsigned long gk20a_get_clk_rate(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
@@ -813,8 +808,7 @@ static unsigned long gk20a_get_clk_rate(struct platform_device *dev)
}
static long gk20a_round_clk_rate(struct platform_device *dev,
unsigned long rate)
static long gk20a_round_clk_rate(struct device *dev, unsigned long rate)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
@@ -822,7 +816,7 @@ static long gk20a_round_clk_rate(struct platform_device *dev,
return gk20a_clk_round_rate(g, rate);
}
static int gk20a_set_clk_rate(struct platform_device *dev, unsigned long rate)
static int gk20a_set_clk_rate(struct device *dev, unsigned long rate)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
@@ -830,7 +824,7 @@ static int gk20a_set_clk_rate(struct platform_device *dev, unsigned long rate)
return gk20a_clk_set_rate(g, rate);
}
static int gk20a_clk_get_freqs(struct platform_device *dev,
static int gk20a_clk_get_freqs(struct device *dev,
unsigned long **freqs, int *num_freqs)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
@@ -900,6 +894,8 @@ struct gk20a_platform gk20a_tegra_platform = {
.secure_alloc = gk20a_tegra_secure_alloc,
.secure_page_alloc = gk20a_tegra_secure_page_alloc,
.dump_platform_dependencies = gk20a_tegra_debug_dump,
.soc_name = "tegra12x",
};
struct gk20a_platform gm20b_tegra_platform = {
@@ -958,4 +954,6 @@ struct gk20a_platform gm20b_tegra_platform = {
.dump_platform_dependencies = gk20a_tegra_debug_dump,
.has_cde = true,
.soc_name = "tegra21x",
};

View File

@@ -1,7 +1,7 @@
/*
* Tegra Virtualized GPU Platform Interface
*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -19,10 +19,10 @@
#include "hal_gk20a.h"
#include "platform_gk20a.h"
static int gk20a_tegra_probe(struct platform_device *dev)
static int gk20a_tegra_probe(struct device *dev)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct device_node *np = dev->dev.of_node;
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct device_node *np = dev->of_node;
const __be32 *host1x_ptr;
struct platform_device *host1x_pdev = NULL;
@@ -33,13 +33,13 @@ static int gk20a_tegra_probe(struct platform_device *dev)
host1x_pdev = of_find_device_by_node(host1x_node);
if (!host1x_pdev) {
dev_warn(&dev->dev, "host1x device not available");
dev_warn(dev, "host1x device not available");
return -EPROBE_DEFER;
}
} else {
host1x_pdev = to_platform_device(dev->dev.parent);
dev_warn(&dev->dev, "host1x reference not found. assuming host1x to be parent");
host1x_pdev = to_platform_device(dev->parent);
dev_warn(dev, "host1x reference not found. assuming host1x to be parent");
}
platform->g->host1x_dev = host1x_pdev;

View File

@@ -1814,7 +1814,7 @@ int pmu_reset(struct pmu_gk20a *pmu)
int pmu_bootstrap(struct pmu_gk20a *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
struct mm_gk20a *mm = &g->mm;
struct pmu_ucode_desc *desc = pmu->desc;
u64 addr_code, addr_data, addr_load;
@@ -2955,7 +2955,7 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu)
break;
#endif
default:
gk20a_err(&g->dev->dev, "no support for %x", ver);
gk20a_err(g->dev, "no support for %x", ver);
BUG();
}
@@ -4667,10 +4667,10 @@ static const struct file_operations security_fops = {
.release = single_release,
};
int gk20a_pmu_debugfs_init(struct platform_device *dev)
int gk20a_pmu_debugfs_init(struct device *dev)
{
struct dentry *d;
struct gk20a_platform *platform = platform_get_drvdata(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct gk20a *g = get_gk20a(dev);
d = debugfs_create_file(

View File

@@ -1394,7 +1394,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token);
int gk20a_pmu_destroy(struct gk20a *g);
int gk20a_pmu_load_norm(struct gk20a *g, u32 *load);
int gk20a_pmu_load_update(struct gk20a *g);
int gk20a_pmu_debugfs_init(struct platform_device *dev);
int gk20a_pmu_debugfs_init(struct device *dev);
void gk20a_pmu_reset_load_counters(struct gk20a *g);
void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
u32 *total_cycles);

View File

@@ -400,7 +400,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
* regops implementation, so we return -ENOSYS. This will allow
* compute apps to run with vgpu. Tools will not work in this
* configuration and are not required to work at this time. */
if (gk20a_gpu_is_virtual(dbg_s->pdev))
if (gk20a_gpu_is_virtual(dbg_s->dev))
return -ENOSYS;
ok = validate_reg_ops(dbg_s,

View File

@@ -470,7 +470,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
{
err = gk20a_busy(g->dev);
if (err) {
gk20a_err(&g->dev->dev,
gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd);
return err;
}
@@ -483,7 +483,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
{
err = gk20a_busy(g->dev);
if (err) {
gk20a_err(&g->dev->dev,
gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd);
return err;
}
@@ -496,7 +496,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
{
err = gk20a_busy(g->dev);
if (err) {
gk20a_err(&g->dev->dev,
gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd);
return err;
}

View File

@@ -1257,7 +1257,7 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
struct pmu_gk20a *pmu = &g->pmu;
int err;
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");

View File

@@ -1,7 +1,7 @@
/*
* GM20B Clocks
*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1651,7 +1651,7 @@ static const struct file_operations pll_param_fops = {
static int clk_gm20b_debugfs_init(struct gk20a *g)
{
struct dentry *d;
struct gk20a_platform *platform = platform_get_drvdata(g->dev);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
d = debugfs_create_file(
"rate", S_IRUGO|S_IWUSR, platform->debugfs, g, &rate_fops);

View File

@@ -112,7 +112,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
gk20a_dbg_fn("");
trace_gk20a_ltc_cbc_ctrl_start(g->dev->name, op, min, max);
trace_gk20a_ltc_cbc_ctrl_start(dev_name(g->dev), op, min, max);
if (gr->compbit_store.mem.size == 0)
return 0;
@@ -162,7 +162,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
}
}
out:
trace_gk20a_ltc_cbc_ctrl_done(g->dev->name);
trace_gk20a_ltc_cbc_ctrl_done(dev_name(g->dev));
mutex_unlock(&g->mm.l2_op_lock);
return err;
}
@@ -230,7 +230,7 @@ void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g)
gk20a_dbg_fn("");
trace_gk20a_mm_g_elpg_flush_locked(g->dev->name);
trace_gk20a_mm_g_elpg_flush_locked(dev_name(g->dev));
for (i = 0; i < g->ltc_count; i++)
done[i] = 0;
@@ -264,7 +264,7 @@ void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g)
gk20a_warn(dev_from_gk20a(g),
"g_elpg_flush too many retries");
trace_gk20a_mm_g_elpg_flush_locked_done(g->dev->name);
trace_gk20a_mm_g_elpg_flush_locked_done(dev_name(g->dev));
}
u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)

View File

@@ -1,7 +1,7 @@
/*
* GM20B MMU
*
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -52,7 +52,7 @@ int gm20b_mm_mmu_vpr_info_fetch(struct gk20a *g)
gk20a_busy_noresume(g->dev);
#ifdef CONFIG_PM
if (!pm_runtime_active(&g->dev->dev))
if (!pm_runtime_active(g->dev))
goto fail;
#endif
@@ -67,7 +67,7 @@ int gm20b_mm_mmu_vpr_info_fetch(struct gk20a *g)
ret = gm20b_mm_mmu_vpr_info_fetch_wait(g, VPR_INFO_FETCH_WAIT);
fail:
pm_runtime_put(&g->dev->dev);
pm_runtime_put(g->dev);
return ret;
}

View File

@@ -1,9 +1,7 @@
/*
* drivers/video/tegra/host/gk20a/soc/platform_gk20a.h
*
* GK20A Platform (SoC) Interface
*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -23,9 +21,9 @@
struct platform_device;
struct gr_ctx_buffer_desc;
int gk20a_tegra_secure_alloc(struct platform_device *pdev,
int gk20a_tegra_secure_alloc(struct device *dev,
struct gr_ctx_buffer_desc *desc,
size_t size);
int gk20a_tegra_secure_page_alloc(struct platform_device *pdev);
int gk20a_tegra_secure_page_alloc(struct device *dev);
#endif

View File

@@ -140,7 +140,7 @@ static int vgpu_intr_thread(void *dev_id)
return 0;
}
static void vgpu_remove_support(struct platform_device *dev)
static void vgpu_remove_support(struct device *dev)
{
struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform = gk20a_get_platform(dev);
@@ -174,10 +174,10 @@ static void vgpu_remove_support(struct platform_device *dev)
}
}
static int vgpu_init_support(struct platform_device *dev)
static int vgpu_init_support(struct platform_device *pdev)
{
struct resource *r = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct gk20a *g = get_gk20a(dev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct gk20a *g = get_gk20a(&pdev->dev);
int err = 0;
if (!r) {
@@ -186,7 +186,7 @@ static int vgpu_init_support(struct platform_device *dev)
goto fail;
}
g->bar1 = devm_ioremap_resource(&dev->dev, r);
g->bar1 = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(g->bar1)) {
dev_err(dev_from_gk20a(g), "failed to remap gk20a bar1\n");
err = PTR_ERR(g->bar1);
@@ -200,14 +200,13 @@ static int vgpu_init_support(struct platform_device *dev)
return 0;
fail:
vgpu_remove_support(dev);
vgpu_remove_support(&pdev->dev);
return err;
}
int vgpu_pm_prepare_poweroff(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
int ret = 0;
gk20a_dbg_fn("");
@@ -284,7 +283,7 @@ static int vgpu_init_hal(struct gk20a *g)
break;
#endif
default:
gk20a_err(&g->dev->dev, "no support for %x", ver);
gk20a_err(g->dev, "no support for %x", ver);
err = -ENODEV;
break;
}
@@ -294,8 +293,7 @@ static int vgpu_init_hal(struct gk20a *g)
int vgpu_pm_finalize_poweron(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(pdev);
struct gk20a *g = get_gk20a(dev);
int err;
gk20a_dbg_fn("");
@@ -342,9 +340,9 @@ done:
return err;
}
static int vgpu_pm_initialise_domain(struct platform_device *pdev)
static int vgpu_pm_initialise_domain(struct device *dev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct gk20a_platform *platform = dev_get_drvdata(dev);
struct dev_power_governor *pm_domain_gov = NULL;
struct gk20a_domain_data *vgpu_pd_data;
struct generic_pm_domain *domain;
@@ -369,17 +367,17 @@ static int vgpu_pm_initialise_domain(struct platform_device *pdev)
domain->dev_ops.save_state = vgpu_pm_prepare_poweroff;
domain->dev_ops.restore_state = vgpu_pm_finalize_poweron;
device_set_wakeup_capable(&pdev->dev, 0);
return pm_genpd_add_device(domain, &pdev->dev);
device_set_wakeup_capable(dev, 0);
return pm_genpd_add_device(domain, dev);
}
static int vgpu_pm_init(struct platform_device *dev)
static int vgpu_pm_init(struct device *dev)
{
int err = 0;
gk20a_dbg_fn("");
pm_runtime_enable(&dev->dev);
pm_runtime_enable(dev);
/* genpd will take care of runtime power management if it is enabled */
if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
@@ -388,14 +386,15 @@ static int vgpu_pm_init(struct platform_device *dev)
return err;
}
int vgpu_probe(struct platform_device *dev)
int vgpu_probe(struct platform_device *pdev)
{
struct gk20a *gk20a;
int err;
struct device *dev = &pdev->dev;
struct gk20a_platform *platform = gk20a_get_platform(dev);
if (!platform) {
dev_err(&dev->dev, "no platform data\n");
dev_err(dev, "no platform data\n");
return -ENODATA;
}
@@ -403,18 +402,18 @@ int vgpu_probe(struct platform_device *dev)
gk20a = kzalloc(sizeof(struct gk20a), GFP_KERNEL);
if (!gk20a) {
dev_err(&dev->dev, "couldn't allocate gk20a support");
dev_err(dev, "couldn't allocate gk20a support");
return -ENOMEM;
}
platform->g = gk20a;
gk20a->dev = dev;
err = gk20a_user_init(dev);
err = gk20a_user_init(dev, INTERFACE_NAME);
if (err)
return err;
vgpu_init_support(dev);
vgpu_init_support(pdev);
vgpu_dbg_init();
init_rwsem(&gk20a->busy_lock);
@@ -424,33 +423,33 @@ int vgpu_probe(struct platform_device *dev)
/* Initialize the platform interface. */
err = platform->probe(dev);
if (err) {
dev_err(&dev->dev, "platform probe failed");
dev_err(dev, "platform probe failed");
return err;
}
err = vgpu_pm_init(dev);
if (err) {
dev_err(&dev->dev, "pm init failed");
dev_err(dev, "pm init failed");
return err;
}
if (platform->late_probe) {
err = platform->late_probe(dev);
if (err) {
dev_err(&dev->dev, "late probe failed");
dev_err(dev, "late probe failed");
return err;
}
}
err = vgpu_comm_init(dev);
err = vgpu_comm_init(pdev);
if (err) {
dev_err(&dev->dev, "failed to init comm interface\n");
dev_err(dev, "failed to init comm interface\n");
return -ENOSYS;
}
platform->virt_handle = vgpu_connect();
if (!platform->virt_handle) {
dev_err(&dev->dev, "failed to connect to server node\n");
dev_err(dev, "failed to connect to server node\n");
vgpu_comm_deinit();
return -ENOSYS;
}
@@ -462,8 +461,8 @@ int vgpu_probe(struct platform_device *dev)
gk20a_debug_init(dev);
/* Set DMA parameters to allow larger sgt lists */
dev->dev.dma_parms = &gk20a->dma_parms;
dma_set_max_seg_size(&dev->dev, UINT_MAX);
dev->dma_parms = &gk20a->dma_parms;
dma_set_max_seg_size(dev, UINT_MAX);
gk20a->gr_idle_timeout_default =
CONFIG_GK20A_DEFAULT_TIMEOUT;
@@ -475,8 +474,9 @@ int vgpu_probe(struct platform_device *dev)
return 0;
}
int vgpu_remove(struct platform_device *dev)
int vgpu_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gk20a *g = get_gk20a(dev);
struct gk20a_domain_data *vgpu_gpd;
gk20a_dbg_fn("");

View File

@@ -1,7 +1,7 @@
/*
* gk20a GPU driver
*
* Copyright (c) 2014, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,11 +26,9 @@ struct platform_device;
#ifdef CONFIG_GK20A
int nvhost_vpr_info_fetch(void);
void gk20a_debug_dump_device(struct platform_device *pdev);
int gk20a_do_idle(void);
int gk20a_do_unidle(void);
#else
static inline void gk20a_debug_dump_device(struct platform_device *pdev) {}
static inline int nvhost_vpr_info_fetch(void)
{
return -ENOSYS;