gpu: nvgpu: changing page_idx from int to u64

page_idx is an element of the struct nvgpu_semaphore_pool, defined in
include/nvgpu/semaphore.h file.

page_idx can not be negative so changing it from int to u64 and its
related changes in various files.

This also fixes MISRA 10.4 violations in these files.

Jira NVGPU-992

Change-Id: Ie9696dab7da9e139bc31563783b422c84144f18b
Signed-off-by: Sai Nikhil <snikhil@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1801632
Reviewed-by: Adeel Raza <araza@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sai Nikhil
2018-08-27 12:42:02 +05:30
committed by mobile promotions
parent 7f8226887c
commit 2dd9bb03dd
7 changed files with 22 additions and 22 deletions

View File

@@ -79,7 +79,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
* integer range. This way any buggy comparisons would start to fail * integer range. This way any buggy comparisons would start to fail
* sooner rather than later. * sooner rather than later.
*/ */
for (i = 0; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4) { for (i = 0U; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4U) {
nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0); nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0);
} }
@@ -192,7 +192,7 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
__unlock_sema_sea(sea); __unlock_sema_sea(sea);
gpu_sema_dbg(sea->gk20a, gpu_sema_dbg(sea->gk20a,
"Allocated semaphore pool: page-idx=%d", p->page_idx); "Allocated semaphore pool: page-idx=%llu", p->page_idx);
*pool = p; *pool = p;
return 0; return 0;
@@ -221,7 +221,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
} }
gpu_sema_dbg(pool_to_gk20a(p), gpu_sema_dbg(pool_to_gk20a(p),
"Mapping semaphore pool! (idx=%d)", p->page_idx); "Mapping semaphore pool! (idx=%llu)", p->page_idx);
/* /*
* Take the sea lock so that we don't race with a possible change to the * Take the sea lock so that we don't race with a possible change to the
@@ -243,7 +243,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
p->mapped = true; p->mapped = true;
gpu_sema_dbg(pool_to_gk20a(p), gpu_sema_dbg(pool_to_gk20a(p),
" %d: GPU read-only VA = 0x%llx", " %llu: GPU read-only VA = 0x%llx",
p->page_idx, p->gpu_va_ro); p->page_idx, p->gpu_va_ro);
/* /*
@@ -272,10 +272,10 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
__unlock_sema_sea(p->sema_sea); __unlock_sema_sea(p->sema_sea);
gpu_sema_dbg(pool_to_gk20a(p), gpu_sema_dbg(pool_to_gk20a(p),
" %d: GPU read-write VA = 0x%llx", " %llu: GPU read-write VA = 0x%llx",
p->page_idx, p->gpu_va); p->page_idx, p->gpu_va);
gpu_sema_dbg(pool_to_gk20a(p), gpu_sema_dbg(pool_to_gk20a(p),
" %d: CPU VA = 0x%p", " %llu: CPU VA = 0x%p",
p->page_idx, p->rw_mem.cpu_va); p->page_idx, p->rw_mem.cpu_va);
return 0; return 0;
@@ -285,7 +285,7 @@ fail_free_submem:
fail_unmap: fail_unmap:
nvgpu_gmmu_unmap(vm, &p->sema_sea->sea_mem, p->gpu_va_ro); nvgpu_gmmu_unmap(vm, &p->sema_sea->sea_mem, p->gpu_va_ro);
gpu_sema_dbg(pool_to_gk20a(p), gpu_sema_dbg(pool_to_gk20a(p),
" %d: Failed to map semaphore pool!", p->page_idx); " %llu: Failed to map semaphore pool!", p->page_idx);
fail_unlock: fail_unlock:
__unlock_sema_sea(p->sema_sea); __unlock_sema_sea(p->sema_sea);
return err; return err;
@@ -310,7 +310,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
__unlock_sema_sea(p->sema_sea); __unlock_sema_sea(p->sema_sea);
gpu_sema_dbg(pool_to_gk20a(p), gpu_sema_dbg(pool_to_gk20a(p),
"Unmapped semaphore pool! (idx=%d)", p->page_idx); "Unmapped semaphore pool! (idx=%llu)", p->page_idx);
} }
/* /*
@@ -330,14 +330,14 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
__lock_sema_sea(s); __lock_sema_sea(s);
nvgpu_list_del(&p->pool_list_entry); nvgpu_list_del(&p->pool_list_entry);
clear_bit(p->page_idx, s->pools_alloced); clear_bit((int)p->page_idx, s->pools_alloced);
s->page_count--; s->page_count--;
__unlock_sema_sea(s); __unlock_sema_sea(s);
nvgpu_mutex_destroy(&p->pool_lock); nvgpu_mutex_destroy(&p->pool_lock);
gpu_sema_dbg(pool_to_gk20a(p), gpu_sema_dbg(pool_to_gk20a(p),
"Freed semaphore pool! (idx=%d)", p->page_idx); "Freed semaphore pool! (idx=%llu)", p->page_idx);
nvgpu_kfree(p->sema_sea->gk20a, p); nvgpu_kfree(p->sema_sea->gk20a, p);
} }
@@ -393,7 +393,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
ch->hw_sema = hw_sema; ch->hw_sema = hw_sema;
hw_sema->ch = ch; hw_sema->ch = ch;
hw_sema->location.pool = p; hw_sema->location.pool = p;
hw_sema->location.offset = SEMAPHORE_SIZE * hw_sema_idx; hw_sema->location.offset = SEMAPHORE_SIZE * (u32)hw_sema_idx;
current_value = nvgpu_mem_rd(ch->g, &p->rw_mem, current_value = nvgpu_mem_rd(ch->g, &p->rw_mem,
hw_sema->location.offset); hw_sema->location.offset);
nvgpu_atomic_set(&hw_sema->next_value, current_value); nvgpu_atomic_set(&hw_sema->next_value, current_value);
@@ -590,7 +590,7 @@ bool nvgpu_semaphore_reset(struct nvgpu_semaphore_int *hw_sema)
* more than what we expect to be the max. * more than what we expect to be the max.
*/ */
if (WARN_ON(__nvgpu_semaphore_value_released(threshold + 1, if (WARN_ON(__nvgpu_semaphore_value_released(threshold + 1U,
current_val))) current_val)))
return false; return false;

View File

@@ -366,13 +366,13 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
g->ops.fifo.add_sema_cmd(g, s, va, cmd, off, acquire, wfi); g->ops.fifo.add_sema_cmd(g, s, va, cmd, off, acquire, wfi);
if (acquire) { if (acquire) {
gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3d" gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3llu"
"va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
ch, nvgpu_semaphore_get_value(s), ch, nvgpu_semaphore_get_value(s),
s->location.pool->page_idx, va, cmd->gva, s->location.pool->page_idx, va, cmd->gva,
cmd->mem->gpu_va, ob); cmd->mem->gpu_va, ob);
} else { } else {
gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3d" gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3llu"
"va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
ch, nvgpu_semaphore_get_value(s), ch, nvgpu_semaphore_get_value(s),
nvgpu_semaphore_read(s), nvgpu_semaphore_read(s),

View File

@@ -301,7 +301,7 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt);
*/ */
int nvgpu_mem_create_from_mem(struct gk20a *g, int nvgpu_mem_create_from_mem(struct gk20a *g,
struct nvgpu_mem *dest, struct nvgpu_mem *src, struct nvgpu_mem *dest, struct nvgpu_mem *src,
int start_page, int nr_pages); u64 start_page, int nr_pages);
/* /*
* Really free a vidmem buffer. There's a fair amount of work involved in * Really free a vidmem buffer. There's a fair amount of work involved in

View File

@@ -41,9 +41,9 @@
* Max number of channels that can be used is 512. This of course needs to be * Max number of channels that can be used is 512. This of course needs to be
* fixed to be dynamic but still fast. * fixed to be dynamic but still fast.
*/ */
#define SEMAPHORE_POOL_COUNT 512 #define SEMAPHORE_POOL_COUNT 512U
#define SEMAPHORE_SIZE 16 #define SEMAPHORE_SIZE 16U
#define SEMAPHORE_SEA_GROWTH_RATE 32 #define SEMAPHORE_SEA_GROWTH_RATE 32U
struct nvgpu_semaphore_sea; struct nvgpu_semaphore_sea;
@@ -84,7 +84,7 @@ struct nvgpu_semaphore_pool {
struct nvgpu_list_node pool_list_entry; /* Node for list of pools. */ struct nvgpu_list_node pool_list_entry; /* Node for list of pools. */
u64 gpu_va; /* GPU access to the pool. */ u64 gpu_va; /* GPU access to the pool. */
u64 gpu_va_ro; /* GPU access to the pool. */ u64 gpu_va_ro; /* GPU access to the pool. */
int page_idx; /* Index into sea bitmap. */ u64 page_idx; /* Index into sea bitmap. */
DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE); DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE);

View File

@@ -135,7 +135,7 @@ u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
*/ */
int nvgpu_mem_create_from_mem(struct gk20a *g, int nvgpu_mem_create_from_mem(struct gk20a *g,
struct nvgpu_mem *dest, struct nvgpu_mem *src, struct nvgpu_mem *dest, struct nvgpu_mem *src,
int start_page, int nr_pages) u64 start_page, int nr_pages)
{ {
int ret; int ret;
u64 start = start_page * PAGE_SIZE; u64 start = start_page * PAGE_SIZE;

View File

@@ -284,7 +284,7 @@ static void gk20a_sync_pt_value_str_for_sema(struct gk20a_sync_pt *pt,
{ {
struct nvgpu_semaphore *s = pt->sema; struct nvgpu_semaphore *s = pt->sema;
snprintf(str, size, "S: pool=%d [v=%u,r_v=%u]", snprintf(str, size, "S: pool=%llu [v=%u,r_v=%u]",
s->location.pool->page_idx, s->location.pool->page_idx,
nvgpu_semaphore_get_value(s), nvgpu_semaphore_get_value(s),
nvgpu_semaphore_read(s)); nvgpu_semaphore_read(s));

View File

@@ -114,7 +114,7 @@ struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g,
int nvgpu_mem_create_from_mem(struct gk20a *g, int nvgpu_mem_create_from_mem(struct gk20a *g,
struct nvgpu_mem *dest, struct nvgpu_mem *src, struct nvgpu_mem *dest, struct nvgpu_mem *src,
int start_page, int nr_pages) u64 start_page, int nr_pages)
{ {
u64 start = start_page * PAGE_SIZE; u64 start = start_page * PAGE_SIZE;
u64 size = nr_pages * PAGE_SIZE; u64 size = nr_pages * PAGE_SIZE;