gpu: nvgpu: Always do full buffer compbits allocs

Remove parameter 'lines' from gk20a_alloc_or_get_comptags() and
nvgpu_ctag_buffer_info. We're always doing full buffer allocs
anyways. This simplifies the code a bit.

Bug 1902982

Change-Id: Iacfc9cdba8cb75b31a7d44b175660252e09d605d
Signed-off-by: Sami Kiminki <skiminki@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1597131
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sami Kiminki
2017-11-13 14:32:29 +02:00
committed by mobile promotions
parent 23396c58db
commit 1f28b429a2
3 changed files with 17 additions and 20 deletions

View File

@@ -20,6 +20,7 @@
#include <nvgpu/linux/vm.h>
#include "gk20a/gk20a.h"
#include "dmabuf.h"
void gk20a_get_comptags(struct nvgpu_os_buffer *buf,
@@ -42,26 +43,33 @@ void gk20a_get_comptags(struct nvgpu_os_buffer *buf,
int gk20a_alloc_or_get_comptags(struct gk20a *g,
struct nvgpu_os_buffer *buf,
struct gk20a_comptag_allocator *allocator,
u32 lines,
struct gk20a_comptags *comptags)
{
struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(buf->dmabuf,
buf->dev);
u32 offset;
int err;
unsigned int ctag_granularity;
u32 lines;
if (!priv)
return -ENOSYS;
if (!lines)
return -EINVAL;
if (priv->comptags.allocated) {
/* already allocated */
/*
* already allocated
*/
*comptags = priv->comptags;
return 0;
}
ctag_granularity = g->ops.fb.compression_page_size(g);
lines = DIV_ROUND_UP_ULL(buf->dmabuf->size, ctag_granularity);
/* 0-sized buffer? Shouldn't occur, but let's check anyways. */
if (lines < 1)
return -EINVAL;
/* store the allocator so we can use it when we free the ctags */
priv->comptag_allocator = allocator;
err = gk20a_comptaglines_alloc(allocator, &offset, lines);

View File

@@ -774,7 +774,8 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
binfo.flags = flags;
binfo.size = nvgpu_os_buf_get_size(os_buf);
binfo.compr_kind = compr_kind;
binfo.compr_kind = (vm->enable_ctag && compr_kind != NV_KIND_INVALID ?
compr_kind : NV_KIND_INVALID);
binfo.incompr_kind = incompr_kind;
if (compr_kind != NV_KIND_INVALID)
@@ -847,13 +848,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
goto clean_up;
}
/*
* bar1 and pmu VMs don't need ctags.
*/
if (!vm->enable_ctag)
binfo.ctag_lines = 0;
if (binfo.ctag_lines) {
if (binfo.compr_kind != NV_KIND_INVALID) {
struct gk20a_comptags comptags = { 0 };
/*
@@ -861,7 +856,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
*/
err = gk20a_alloc_or_get_comptags(g, os_buf,
&g->gr.comp_tags,
binfo.ctag_lines, &comptags);
&comptags);
if (err) {
/*
* This is an irrecoverable failure and we need to
@@ -1116,7 +1111,6 @@ static int nvgpu_vm_compute_compression(struct vm_gk20a *vm,
{
bool kind_compressible = (binfo->compr_kind != NV_KIND_INVALID);
struct gk20a *g = gk20a_from_vm(vm);
int ctag_granularity = g->ops.fb.compression_page_size(g);
if (kind_compressible &&
vm->gmmu_page_sizes[binfo->pgsz_idx] <
@@ -1138,9 +1132,5 @@ static int nvgpu_vm_compute_compression(struct vm_gk20a *vm,
}
}
if (kind_compressible)
binfo->ctag_lines = DIV_ROUND_UP_ULL(binfo->size,
ctag_granularity);
return 0;
}

View File

@@ -71,7 +71,6 @@ void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator,
int gk20a_alloc_or_get_comptags(struct gk20a *g,
struct nvgpu_os_buffer *buf,
struct gk20a_comptag_allocator *allocator,
u32 lines,
struct gk20a_comptags *comptags);
void gk20a_get_comptags(struct nvgpu_os_buffer *buf,
struct gk20a_comptags *comptags);