mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: common: MISRA rule 15.6 fixes
MISRA rule 15.6 requires that all if/else/loop blocks should be enclosed by brackets. This patch adds brackets to single line if/else/loop blocks in the common directory. JIRA NVGPU-775 Change-Id: I0dfb38dbf256d49bc0391d889d9fbe5e21da5641 Signed-off-by: Adeel Raza <araza@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2011655 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Scott Long <scottl@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
254253732c
commit
d828e013db
@@ -2355,11 +2355,13 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
|
||||
}
|
||||
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
||||
err = nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
goto fail_4;
|
||||
}
|
||||
err = nvgpu_mutex_init(&c->cs_client_mutex);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
goto fail_5;
|
||||
}
|
||||
#endif
|
||||
err = nvgpu_mutex_init(&c->dbg_s_lock);
|
||||
if (err != 0) {
|
||||
|
||||
@@ -346,9 +346,10 @@ void nvgpu_init_mm_ce_context(struct gk20a *g)
|
||||
-1,
|
||||
-1);
|
||||
|
||||
if (g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)
|
||||
if (g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID) {
|
||||
nvgpu_err(g,
|
||||
"Failed to allocate CE context for vidmem page clearing support");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -43,8 +43,9 @@ void nvgpu_vidmem_destroy(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_timeout timeout;
|
||||
|
||||
if (!g->ops.fb.get_vidmem_size)
|
||||
if (!g->ops.fb.get_vidmem_size) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
@@ -65,8 +66,9 @@ void nvgpu_vidmem_destroy(struct gk20a *g)
|
||||
empty = nvgpu_list_empty(&g->mm.vidmem.clear_list_head);
|
||||
nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
|
||||
|
||||
if (empty)
|
||||
if (empty) {
|
||||
break;
|
||||
}
|
||||
|
||||
nvgpu_msleep(10);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
@@ -77,11 +79,13 @@ void nvgpu_vidmem_destroy(struct gk20a *g)
|
||||
*/
|
||||
nvgpu_thread_stop(&g->mm.vidmem.clearing_thread);
|
||||
|
||||
if (nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
|
||||
if (nvgpu_alloc_initialized(&g->mm.vidmem.allocator)) {
|
||||
nvgpu_alloc_destroy(&g->mm.vidmem.allocator);
|
||||
}
|
||||
|
||||
if (nvgpu_alloc_initialized(&g->mm.vidmem.bootstrap_allocator))
|
||||
if (nvgpu_alloc_initialized(&g->mm.vidmem.bootstrap_allocator)) {
|
||||
nvgpu_alloc_destroy(&g->mm.vidmem.bootstrap_allocator);
|
||||
}
|
||||
}
|
||||
|
||||
static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
|
||||
@@ -90,8 +94,9 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
|
||||
struct gk20a_fence *gk20a_fence_out = NULL;
|
||||
int err = 0;
|
||||
|
||||
if (mm->vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)
|
||||
if (mm->vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vidmem_dbg(g, "Clearing all VIDMEM:");
|
||||
|
||||
@@ -153,8 +158,9 @@ void nvgpu_vidmem_thread_pause_sync(struct mm_gk20a *mm)
|
||||
* released by the clearing thread in case the thread is currently
|
||||
* processing work items.
|
||||
*/
|
||||
if (nvgpu_atomic_inc_return(&mm->vidmem.pause_count) == 1)
|
||||
if (nvgpu_atomic_inc_return(&mm->vidmem.pause_count) == 1) {
|
||||
nvgpu_mutex_acquire(&mm->vidmem.clearing_thread_lock);
|
||||
}
|
||||
|
||||
vidmem_dbg(mm->g, "Clearing thread paused; new count=%d",
|
||||
nvgpu_atomic_read(&mm->vidmem.pause_count));
|
||||
@@ -188,8 +194,9 @@ int nvgpu_vidmem_clear_list_enqueue(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
* free function which will attempt to enqueue the vidmem into the
|
||||
* vidmem clearing thread.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING))
|
||||
if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&mm->vidmem.clear_list_mutex);
|
||||
nvgpu_list_add_tail(&mem->clear_list_entry,
|
||||
@@ -265,16 +272,18 @@ static int nvgpu_vidmem_clear_pending_allocs_thr(void *mm_ptr)
|
||||
&mm->vidmem.clearing_thread) ||
|
||||
!nvgpu_list_empty(&mm->vidmem.clear_list_head),
|
||||
0);
|
||||
if (ret == -ERESTARTSYS)
|
||||
if (ret == -ERESTARTSYS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this lock to implement a pause mechanism. By taking this
|
||||
* lock some other code can prevent this thread from processing
|
||||
* work items.
|
||||
*/
|
||||
if (!nvgpu_mutex_tryacquire(&mm->vidmem.clearing_thread_lock))
|
||||
if (!nvgpu_mutex_tryacquire(&mm->vidmem.clearing_thread_lock)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
nvgpu_vidmem_clear_pending_allocs(mm);
|
||||
|
||||
@@ -297,8 +306,9 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
|
||||
|
||||
size = g->ops.fb.get_vidmem_size ?
|
||||
g->ops.fb.get_vidmem_size(g) : 0;
|
||||
if (!size)
|
||||
if (!size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
vidmem_dbg(g, "init begin");
|
||||
|
||||
@@ -342,8 +352,9 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
|
||||
mm->vidmem.bootstrap_size = bootstrap_size;
|
||||
|
||||
err = nvgpu_cond_init(&mm->vidmem.clearing_thread_cond);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0);
|
||||
nvgpu_init_list_node(&mm->vidmem.clear_list_head);
|
||||
@@ -364,8 +375,9 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
|
||||
err = nvgpu_thread_create(&mm->vidmem.clearing_thread, mm,
|
||||
nvgpu_vidmem_clear_pending_allocs_thr,
|
||||
"vidmem-clear");
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
vidmem_dbg(g, "VIDMEM Total: %zu MB", size >> 20);
|
||||
vidmem_dbg(g, "VIDMEM Ranges:");
|
||||
@@ -393,8 +405,9 @@ int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_alloc_initialized(allocator))
|
||||
if (!nvgpu_alloc_initialized(allocator)) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
|
||||
*space = nvgpu_alloc_space(allocator) +
|
||||
@@ -411,14 +424,16 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
struct nvgpu_sgl *sgl = NULL;
|
||||
int err = 0;
|
||||
|
||||
if (g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)
|
||||
if (g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
alloc = mem->vidmem_alloc;
|
||||
|
||||
nvgpu_sgt_for_each_sgl(sgl, &alloc->sgt) {
|
||||
if (gk20a_last_fence)
|
||||
if (gk20a_last_fence) {
|
||||
gk20a_fence_put(gk20a_last_fence);
|
||||
}
|
||||
|
||||
err = gk20a_ce_execute_ops(g,
|
||||
g->mm.vidmem.ce_ctx_id,
|
||||
@@ -458,9 +473,10 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
gk20a_fence_put(gk20a_last_fence);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
nvgpu_err(g,
|
||||
"fence wait failed for CE execute ops");
|
||||
}
|
||||
}
|
||||
|
||||
vidmem_dbg(g, " Done");
|
||||
@@ -472,8 +488,9 @@ static int nvgpu_vidmem_clear_all(struct gk20a *g)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (g->mm.vidmem.cleared)
|
||||
if (g->mm.vidmem.cleared) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex);
|
||||
if (!g->mm.vidmem.cleared) {
|
||||
@@ -495,12 +512,14 @@ struct nvgpu_vidmem_buf *nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes)
|
||||
int err;
|
||||
|
||||
err = nvgpu_vidmem_clear_all(g);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
buf = nvgpu_kzalloc(g, sizeof(*buf));
|
||||
if (!buf)
|
||||
if (!buf) {
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
buf->g = g;
|
||||
buf->mem = nvgpu_kzalloc(g, sizeof(*buf->mem));
|
||||
@@ -510,8 +529,9 @@ struct nvgpu_vidmem_buf *nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes)
|
||||
}
|
||||
|
||||
err = nvgpu_dma_alloc_vid(g, bytes, buf->mem);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Alerts the DMA API that when we free this vidmem buf we have to
|
||||
@@ -533,8 +553,9 @@ void nvgpu_vidmem_buf_free(struct gk20a *g, struct nvgpu_vidmem_buf *buf)
|
||||
/*
|
||||
* In some error paths it's convenient to be able to "free" a NULL buf.
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(buf))
|
||||
if (IS_ERR_OR_NULL(buf)) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_dma_free(g, buf->mem);
|
||||
|
||||
|
||||
@@ -527,8 +527,9 @@ clean_up_page_tables:
|
||||
nvgpu_pd_free(vm, &vm->pdb);
|
||||
clean_up_vgpu_vm:
|
||||
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
|
||||
if (g->is_virtual)
|
||||
if (g->is_virtual) {
|
||||
vgpu_vm_remove(vm);
|
||||
}
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
@@ -654,8 +655,9 @@ static void nvgpu_vm_remove(struct vm_gk20a *vm)
|
||||
nvgpu_vm_free_entries(vm, &vm->pdb);
|
||||
|
||||
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
|
||||
if (g->is_virtual)
|
||||
if (g->is_virtual) {
|
||||
vgpu_vm_remove(vm);
|
||||
}
|
||||
#endif
|
||||
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Cycle stats snapshots support
|
||||
*
|
||||
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -89,12 +89,14 @@ static int css_gr_create_shared_data(struct gr_gk20a *gr)
|
||||
{
|
||||
struct gk20a_cs_snapshot *data;
|
||||
|
||||
if (gr->cs_data)
|
||||
if (gr->cs_data) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
data = nvgpu_kzalloc(gr->g, sizeof(*data));
|
||||
if (!data)
|
||||
if (!data) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nvgpu_init_list_node(&data->clients);
|
||||
gr->cs_data = data;
|
||||
@@ -111,16 +113,19 @@ int nvgpu_css_enable_snapshot(struct channel_gk20a *ch,
|
||||
u32 snapshot_size = cs_client->snapshot_size;
|
||||
int ret;
|
||||
|
||||
if (data->hw_snapshot)
|
||||
if (data->hw_snapshot) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE)
|
||||
if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE) {
|
||||
snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE;
|
||||
}
|
||||
|
||||
ret = nvgpu_dma_alloc_map_sys(g->mm.pmu.vm, snapshot_size,
|
||||
&data->hw_memdesc);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* perf output buffer may not cross a 4GB boundary - with a separate */
|
||||
/* va smaller than that, it won't but check anyway */
|
||||
@@ -161,8 +166,9 @@ void nvgpu_css_disable_snapshot(struct gr_gk20a *gr)
|
||||
struct gk20a *g = gr->g;
|
||||
struct gk20a_cs_snapshot *data = gr->cs_data;
|
||||
|
||||
if (!data->hw_snapshot)
|
||||
if (!data->hw_snapshot) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.perf.membuf_reset_streaming(g);
|
||||
g->ops.perf.disable_membuf(g);
|
||||
@@ -196,8 +202,9 @@ nvgpu_css_gr_search_client(struct nvgpu_list_node *clients, u32 perfmon)
|
||||
|
||||
nvgpu_list_for_each_entry(client, clients,
|
||||
gk20a_cs_snapshot_client, list) {
|
||||
if (CONTAINS_PERFMON(client, perfmon))
|
||||
if (CONTAINS_PERFMON(client, perfmon)) {
|
||||
return client;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -226,19 +233,23 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
|
||||
struct gk20a_cs_snapshot_fifo_entry *dst_head;
|
||||
struct gk20a_cs_snapshot_fifo_entry *dst_tail;
|
||||
|
||||
if (!css)
|
||||
if (!css) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nvgpu_list_empty(&css->clients))
|
||||
if (nvgpu_list_empty(&css->clients)) {
|
||||
return -EBADF;
|
||||
}
|
||||
|
||||
/* check data available */
|
||||
err = g->ops.css.check_data_available(ch, &pending, &hw_overflow);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!pending)
|
||||
if (!pending) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (hw_overflow) {
|
||||
nvgpu_list_for_each_entry(cur, &css->clients,
|
||||
@@ -282,8 +293,9 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
|
||||
dst_tail = CSS_FIFO_ENTRY(dst, dst->end);
|
||||
|
||||
dst_nxt = dst_put + 1;
|
||||
if (dst_nxt == dst_tail)
|
||||
if (dst_nxt == dst_tail) {
|
||||
dst_nxt = dst_head;
|
||||
}
|
||||
} else {
|
||||
/* client not found - skipping this entry */
|
||||
nvgpu_warn(g, "cyclestats: orphaned perfmon %u",
|
||||
@@ -304,19 +316,22 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
|
||||
|
||||
dst_put = dst_nxt++;
|
||||
|
||||
if (dst_nxt == dst_tail)
|
||||
if (dst_nxt == dst_tail) {
|
||||
dst_nxt = dst_head;
|
||||
}
|
||||
}
|
||||
|
||||
next_hw_fifo_entry:
|
||||
sid++;
|
||||
if (++src >= css->hw_end)
|
||||
if (++src >= css->hw_end) {
|
||||
src = css->hw_snapshot;
|
||||
}
|
||||
}
|
||||
|
||||
/* update client put pointer if necessary */
|
||||
if (cur && dst)
|
||||
if (cur && dst) {
|
||||
dst->put = (char *)dst_put - (char *)dst;
|
||||
}
|
||||
|
||||
/* re-set HW buffer after processing taking wrapping into account */
|
||||
if (css->hw_get < src) {
|
||||
@@ -330,8 +345,9 @@ next_hw_fifo_entry:
|
||||
}
|
||||
gr->cs_data->hw_get = src;
|
||||
|
||||
if (g->ops.css.set_handled_snapshots)
|
||||
if (g->ops.css.set_handled_snapshots) {
|
||||
g->ops.css.set_handled_snapshots(g, sid);
|
||||
}
|
||||
|
||||
if (completed != sid) {
|
||||
/* not all entries proceed correctly. some of problems */
|
||||
@@ -352,10 +368,11 @@ u32 nvgpu_css_allocate_perfmon_ids(struct gk20a_cs_snapshot *data,
|
||||
|
||||
f = bitmap_find_next_zero_area(pids, CSS_MAX_PERFMON_IDS,
|
||||
CSS_FIRST_PERFMON_ID, count, 0);
|
||||
if (f > CSS_MAX_PERFMON_IDS)
|
||||
if (f > CSS_MAX_PERFMON_IDS) {
|
||||
f = 0;
|
||||
else
|
||||
} else {
|
||||
bitmap_set(pids, f, count);
|
||||
}
|
||||
|
||||
return f;
|
||||
}
|
||||
@@ -383,14 +400,16 @@ static int css_gr_free_client_data(struct gk20a *g,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (client->list.next && client->list.prev)
|
||||
if (client->list.next && client->list.prev) {
|
||||
nvgpu_list_del(&client->list);
|
||||
}
|
||||
|
||||
if (client->perfmon_start && client->perfmon_count
|
||||
&& g->ops.css.release_perfmon_ids) {
|
||||
if (client->perfmon_count != g->ops.css.release_perfmon_ids(data,
|
||||
client->perfmon_start, client->perfmon_count))
|
||||
client->perfmon_start, client->perfmon_count)) {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -428,8 +447,9 @@ static int css_gr_create_client_data(struct gk20a *g,
|
||||
if (cur->perfmon_count && g->ops.css.allocate_perfmon_ids) {
|
||||
cur->perfmon_start = g->ops.css.allocate_perfmon_ids(data,
|
||||
cur->perfmon_count);
|
||||
if (!cur->perfmon_start)
|
||||
if (!cur->perfmon_start) {
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_list_add_tail(&cur->list, &data->clients);
|
||||
@@ -448,12 +468,14 @@ int gr_gk20a_css_attach(struct channel_gk20a *ch,
|
||||
struct gr_gk20a *gr;
|
||||
|
||||
/* we must have a placeholder to store pointer to client structure */
|
||||
if (!cs_client)
|
||||
if (!cs_client) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!perfmon_count ||
|
||||
perfmon_count > CSS_MAX_PERFMON_IDS - CSS_FIRST_PERFMON_ID)
|
||||
perfmon_count > CSS_MAX_PERFMON_IDS - CSS_FIRST_PERFMON_ID) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvgpu_speculation_barrier();
|
||||
|
||||
@@ -462,21 +484,25 @@ int gr_gk20a_css_attach(struct channel_gk20a *ch,
|
||||
nvgpu_mutex_acquire(&gr->cs_lock);
|
||||
|
||||
ret = css_gr_create_shared_data(gr);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
goto failed;
|
||||
}
|
||||
|
||||
ret = css_gr_create_client_data(g, gr->cs_data,
|
||||
perfmon_count,
|
||||
cs_client);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
goto failed;
|
||||
}
|
||||
|
||||
ret = g->ops.css.enable_snapshot(ch, cs_client);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (perfmon_start)
|
||||
if (perfmon_start) {
|
||||
*perfmon_start = cs_client->perfmon_start;
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&gr->cs_lock);
|
||||
|
||||
@@ -489,13 +515,15 @@ failed:
|
||||
cs_client = NULL;
|
||||
}
|
||||
|
||||
if (nvgpu_list_empty(&gr->cs_data->clients))
|
||||
if (nvgpu_list_empty(&gr->cs_data->clients)) {
|
||||
css_gr_free_shared_data(gr);
|
||||
}
|
||||
}
|
||||
nvgpu_mutex_release(&gr->cs_lock);
|
||||
|
||||
if (perfmon_start)
|
||||
if (perfmon_start) {
|
||||
*perfmon_start = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -507,20 +535,23 @@ int gr_gk20a_css_detach(struct channel_gk20a *ch,
|
||||
struct gk20a *g = ch->g;
|
||||
struct gr_gk20a *gr;
|
||||
|
||||
if (!cs_client)
|
||||
if (!cs_client) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr = &g->gr;
|
||||
nvgpu_mutex_acquire(&gr->cs_lock);
|
||||
if (gr->cs_data) {
|
||||
struct gk20a_cs_snapshot *data = gr->cs_data;
|
||||
|
||||
if (g->ops.css.detach_snapshot)
|
||||
if (g->ops.css.detach_snapshot) {
|
||||
g->ops.css.detach_snapshot(ch, cs_client);
|
||||
}
|
||||
|
||||
ret = css_gr_free_client_data(g, data, cs_client);
|
||||
if (nvgpu_list_empty(&data->clients))
|
||||
if (nvgpu_list_empty(&data->clients)) {
|
||||
css_gr_free_shared_data(gr);
|
||||
}
|
||||
} else {
|
||||
ret = -EBADF;
|
||||
}
|
||||
@@ -536,8 +567,9 @@ int gr_gk20a_css_flush(struct channel_gk20a *ch,
|
||||
struct gk20a *g = ch->g;
|
||||
struct gr_gk20a *gr;
|
||||
|
||||
if (!cs_client)
|
||||
if (!cs_client) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr = &g->gr;
|
||||
nvgpu_mutex_acquire(&gr->cs_lock);
|
||||
@@ -565,12 +597,14 @@ int nvgpu_css_check_data_available(struct channel_gk20a *ch, u32 *pending,
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
struct gk20a_cs_snapshot *css = gr->cs_data;
|
||||
|
||||
if (!css->hw_snapshot)
|
||||
if (!css->hw_snapshot) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*pending = nvgpu_css_get_pending_snapshots(g);
|
||||
if (!*pending)
|
||||
if (!*pending) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
*hw_overflow = nvgpu_css_get_overflow_status(g);
|
||||
return 0;
|
||||
|
||||
@@ -46,8 +46,9 @@ int nvgpu_clk_notification_queue_alloc(struct gk20a *g,
|
||||
u32 events_number) {
|
||||
queue->notifications = nvgpu_kcalloc(g, events_number,
|
||||
sizeof(struct nvgpu_clk_notification));
|
||||
if (!queue->notifications)
|
||||
if (!queue->notifications) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
queue->size = events_number;
|
||||
|
||||
nvgpu_atomic_set(&queue->head, 0);
|
||||
@@ -200,10 +201,11 @@ int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb)
|
||||
clk_cur = table->gpc2clk_points[j].gpc_mhz;
|
||||
|
||||
if ((clk_cur >= p0_info->min_mhz) &&
|
||||
(clk_cur <= p0_info->max_mhz))
|
||||
(clk_cur <= p0_info->max_mhz)) {
|
||||
VF_POINT_SET_PSTATE_SUPPORTED(
|
||||
&table->gpc2clk_points[j],
|
||||
CTRL_PERF_PSTATE_P0);
|
||||
}
|
||||
|
||||
j++;
|
||||
num_points++;
|
||||
@@ -217,9 +219,10 @@ int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb)
|
||||
|
||||
exit_vf_table:
|
||||
|
||||
if (status < 0)
|
||||
if (status < 0) {
|
||||
nvgpu_clk_arb_set_global_alarm(g,
|
||||
EVENT(ALARM_VF_TABLE_UPDATE_FAILED));
|
||||
}
|
||||
nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item);
|
||||
|
||||
return status;
|
||||
@@ -280,8 +283,9 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
|
||||
alarm_detected =
|
||||
NV_ACCESS_ONCE(notification->notification);
|
||||
|
||||
if (!(enabled_mask & alarm_detected))
|
||||
if (!(enabled_mask & alarm_detected)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
queue_index++;
|
||||
dev->queue.notifications[
|
||||
@@ -314,8 +318,9 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
|
||||
}
|
||||
|
||||
/* Check if there is a new VF update */
|
||||
if (queue_alarm_mask & EVENT(VF_UPDATE))
|
||||
if (queue_alarm_mask & EVENT(VF_UPDATE)) {
|
||||
poll_mask |= (NVGPU_POLLIN | NVGPU_POLLRDNORM);
|
||||
}
|
||||
|
||||
/* Notify sticky alarms that were not reported on previous run*/
|
||||
new_alarms_reported = (queue_alarm_mask |
|
||||
@@ -323,8 +328,9 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
|
||||
|
||||
if (new_alarms_reported & ~LOCAL_ALARM_MASK) {
|
||||
/* check that we are not re-reporting */
|
||||
if (new_alarms_reported & EVENT(ALARM_GPU_LOST))
|
||||
if (new_alarms_reported & EVENT(ALARM_GPU_LOST)) {
|
||||
poll_mask |= NVGPU_POLLHUP;
|
||||
}
|
||||
|
||||
poll_mask |= (NVGPU_POLLIN | NVGPU_POLLPRI);
|
||||
/* On next run do not report global alarms that were already
|
||||
@@ -374,10 +380,11 @@ static void nvgpu_clk_arb_worker_process_item(
|
||||
|
||||
clk_arb_dbg(g, " ");
|
||||
|
||||
if (work_item->item_type == CLK_ARB_WORK_UPDATE_VF_TABLE)
|
||||
if (work_item->item_type == CLK_ARB_WORK_UPDATE_VF_TABLE) {
|
||||
nvgpu_clk_arb_run_vf_table_cb(work_item->arb);
|
||||
else if (work_item->item_type == CLK_ARB_WORK_UPDATE_ARB)
|
||||
} else if (work_item->item_type == CLK_ARB_WORK_UPDATE_ARB) {
|
||||
g->ops.clk_arb.clk_arb_run_arbiter_cb(work_item->arb);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -472,8 +479,9 @@ static int nvgpu_clk_arb_poll_worker(void *arg)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
nvgpu_clk_arb_worker_process(g, &get);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -483,8 +491,9 @@ static int __nvgpu_clk_arb_worker_start(struct gk20a *g)
|
||||
char thread_name[64];
|
||||
int err = 0;
|
||||
|
||||
if (nvgpu_thread_is_running(&g->clk_arb_worker.poll_task))
|
||||
if (nvgpu_thread_is_running(&g->clk_arb_worker.poll_task)) {
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&g->clk_arb_worker.start_lock);
|
||||
|
||||
@@ -557,8 +566,9 @@ int nvgpu_clk_arb_worker_init(struct gk20a *g)
|
||||
nvgpu_init_list_node(&g->clk_arb_worker.items);
|
||||
nvgpu_spinlock_init(&g->clk_arb_worker.items_lock);
|
||||
err = nvgpu_mutex_init(&g->clk_arb_worker.start_lock);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
goto error_check;
|
||||
}
|
||||
|
||||
err = __nvgpu_clk_arb_worker_start(g);
|
||||
error_check:
|
||||
@@ -642,8 +652,9 @@ int nvgpu_clk_arb_init_session(struct gk20a *g,
|
||||
}
|
||||
|
||||
session = nvgpu_kzalloc(g, sizeof(struct nvgpu_clk_session));
|
||||
if (!session)
|
||||
if (!session) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
session->g = g;
|
||||
|
||||
nvgpu_ref_init(&session->refcount);
|
||||
@@ -730,8 +741,9 @@ void nvgpu_clk_arb_release_session(struct gk20a *g,
|
||||
|
||||
session->zombie = true;
|
||||
nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session);
|
||||
if (arb)
|
||||
if (arb) {
|
||||
nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item);
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g)
|
||||
@@ -753,10 +765,11 @@ void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock)
|
||||
{
|
||||
struct nvgpu_clk_arb *arb = g->clk_arb;
|
||||
|
||||
if (lock)
|
||||
if (lock) {
|
||||
nvgpu_mutex_acquire(&arb->pstate_lock);
|
||||
else
|
||||
} else {
|
||||
nvgpu_mutex_release(&arb->pstate_lock);
|
||||
}
|
||||
}
|
||||
|
||||
bool nvgpu_clk_arb_is_valid_domain(struct gk20a *g, u32 api_domain)
|
||||
@@ -805,8 +818,9 @@ int nvgpu_clk_arb_get_arbiter_clk_f_points(struct gk20a *g,
|
||||
case NVGPU_CLK_DOMAIN_GPCCLK:
|
||||
err = g->ops.clk_arb.get_arbiter_f_points(g,
|
||||
CTRL_CLK_DOMAIN_GPCCLK, max_points, fpoints);
|
||||
if (err || !fpoints)
|
||||
if (err || !fpoints) {
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
case NVGPU_CLK_DOMAIN_MCLK:
|
||||
return g->ops.clk_arb.get_arbiter_f_points(g,
|
||||
|
||||
@@ -39,8 +39,9 @@ int nvgpu_alloc_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
|
||||
void nvgpu_free_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
if (nvgpu_mem_is_valid(mem))
|
||||
if (nvgpu_mem_is_valid(mem)) {
|
||||
nvgpu_dma_free(g, mem);
|
||||
}
|
||||
|
||||
(void) memset(mem, 0, sizeof(*mem));
|
||||
}
|
||||
@@ -54,8 +55,9 @@ void nvgpu_free_sim_support(struct gk20a *g)
|
||||
|
||||
void nvgpu_remove_sim_support(struct gk20a *g)
|
||||
{
|
||||
if (g->sim)
|
||||
if (g->sim) {
|
||||
nvgpu_free_sim_support(g);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 sim_msg_header_size(void)
|
||||
@@ -246,8 +248,9 @@ static void nvgpu_sim_init_late(struct gk20a *g)
|
||||
{
|
||||
u64 phys;
|
||||
|
||||
if (!g->sim)
|
||||
if (!g->sim) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_info(g, "sim init late");
|
||||
/*mark send ring invalid*/
|
||||
@@ -291,16 +294,18 @@ int nvgpu_init_sim_support(struct gk20a *g)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (!g->sim)
|
||||
if (!g->sim) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* allocate sim event/msg buffers */
|
||||
err = nvgpu_alloc_sim_buffer(g, &g->sim->send_bfr);
|
||||
err = err || nvgpu_alloc_sim_buffer(g, &g->sim->recv_bfr);
|
||||
err = err || nvgpu_alloc_sim_buffer(g, &g->sim->msg_bfr);
|
||||
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
g->sim->sim_init_late = nvgpu_sim_init_late;
|
||||
g->sim->remove_support = nvgpu_remove_sim_support;
|
||||
|
||||
@@ -215,8 +215,9 @@ static void nvgpu_sim_init_late(struct gk20a *g)
|
||||
{
|
||||
u64 phys;
|
||||
|
||||
if (!g->sim)
|
||||
if (!g->sim) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_info(g, "sim init late pci");
|
||||
/* mark send ring invalid */
|
||||
@@ -258,16 +259,18 @@ int nvgpu_init_sim_support_pci(struct gk20a *g)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
|
||||
if(!g->sim)
|
||||
if(!g->sim) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* allocate sim event/msg buffers */
|
||||
err = nvgpu_alloc_sim_buffer(g, &g->sim->send_bfr);
|
||||
err = err || nvgpu_alloc_sim_buffer(g, &g->sim->recv_bfr);
|
||||
err = err || nvgpu_alloc_sim_buffer(g, &g->sim->msg_bfr);
|
||||
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
g->sim->sim_init_late = nvgpu_sim_init_late;
|
||||
g->sim->remove_support = nvgpu_remove_sim_support;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A syncpt cmdbuf
|
||||
*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -85,10 +85,11 @@ void gk20a_add_syncpt_incr_cmd(struct gk20a *g,
|
||||
|
||||
u32 gk20a_get_syncpt_incr_cmd_size(bool wfi_cmd)
|
||||
{
|
||||
if (wfi_cmd)
|
||||
if (wfi_cmd) {
|
||||
return 8U;
|
||||
else
|
||||
} else {
|
||||
return 6U;
|
||||
}
|
||||
}
|
||||
|
||||
void gk20a_free_syncpt_buf(struct channel_gk20a *c,
|
||||
@@ -101,4 +102,4 @@ int gk20a_alloc_syncpt_buf(struct channel_gk20a *c,
|
||||
u32 syncpt_id, struct nvgpu_mem *syncpt_buf)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B syncpt cmdbuf
|
||||
*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -37,8 +37,9 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
|
||||
if (vm->syncpt_ro_map_gpu_va)
|
||||
if (vm->syncpt_ro_map_gpu_va) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(vm,
|
||||
&g->syncpt_mem, g->syncpt_unit_size,
|
||||
@@ -68,8 +69,9 @@ int gv11b_alloc_syncpt_buf(struct channel_gk20a *c,
|
||||
nvgpu_mutex_acquire(&c->vm->syncpt_ro_map_lock);
|
||||
err = set_syncpt_ro_map_gpu_va_locked(c->vm);
|
||||
nvgpu_mutex_release(&c->vm->syncpt_ro_map_lock);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
nr_pages = DIV_ROUND_UP(g->syncpt_size, PAGE_SIZE);
|
||||
nvgpu_mem_create_from_phys(g, syncpt_buf,
|
||||
@@ -104,8 +106,9 @@ int gv11b_get_sync_ro_map(struct vm_gk20a *vm,
|
||||
nvgpu_mutex_acquire(&vm->syncpt_ro_map_lock);
|
||||
err = set_syncpt_ro_map_gpu_va_locked(vm);
|
||||
nvgpu_mutex_release(&vm->syncpt_ro_map_lock);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
*base_gpuva = vm->syncpt_ro_map_gpu_va;
|
||||
*sync_size = g->syncpt_size;
|
||||
|
||||
Reference in New Issue
Block a user