mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Fix LibC MISRA 17.7 in common
MISRA Rule-17.7 requires the return value of all functions to be used. Fix is either to use the return value or change the function to return void. This patch contains fix for all 17.7 violations instandard C functions in common code. JIRA NVGPU-1036 Change-Id: Id6dea92df371e71b22b54cd7a521fc22812f9b69 Signed-off-by: Nicolas Benech <nbenech@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1929899 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
27b47c20bd
commit
cb2a05dd92
@@ -73,7 +73,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(name, sizeof(name), "as_%d", as_share->id);
|
(void) snprintf(name, sizeof(name), "as_%d", as_share->id);
|
||||||
|
|
||||||
vm = nvgpu_vm_init(g, big_page_size,
|
vm = nvgpu_vm_init(g, big_page_size,
|
||||||
U64(big_page_size) << U64(10),
|
U64(big_page_size) << U64(10),
|
||||||
|
|||||||
@@ -500,7 +500,7 @@ int boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize PMU buffer with BOARDOBJGRP data. */
|
/* Initialize PMU buffer with BOARDOBJGRP data. */
|
||||||
memset(pcmd->buf, 0x0, pcmd->fbsize);
|
(void) memset(pcmd->buf, 0x0, pcmd->fbsize);
|
||||||
status = pboardobjgrp->pmudatainit(g, pboardobjgrp,
|
status = pboardobjgrp->pmudatainit(g, pboardobjgrp,
|
||||||
pcmd->buf);
|
pcmd->buf);
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
@@ -559,7 +559,7 @@ int boardobjgrp_pmuset_impl_v1(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize PMU buffer with BOARDOBJGRP data. */
|
/* Initialize PMU buffer with BOARDOBJGRP data. */
|
||||||
memset(pcmd->buf, 0x0, pcmd->fbsize);
|
(void) memset(pcmd->buf, 0x0, pcmd->fbsize);
|
||||||
status = pboardobjgrp->pmudatainit(g, pboardobjgrp,
|
status = pboardobjgrp->pmudatainit(g, pboardobjgrp,
|
||||||
pcmd->buf);
|
pcmd->buf);
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
@@ -643,7 +643,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp,
|
|||||||
* retrieve status
|
* retrieve status
|
||||||
*/
|
*/
|
||||||
|
|
||||||
memset(pcmd->buf, 0x0, pcmd->fbsize);
|
(void) memset(pcmd->buf, 0x0, pcmd->fbsize);
|
||||||
status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp,
|
status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp,
|
||||||
pcmd->buf, mask);
|
pcmd->buf, mask);
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
@@ -700,7 +700,7 @@ boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjg
|
|||||||
* Initialize PMU buffer with the mask of
|
* Initialize PMU buffer with the mask of
|
||||||
* BOARDOBJGRPs for which to retrieve status
|
* BOARDOBJGRPs for which to retrieve status
|
||||||
*/
|
*/
|
||||||
memset(pcmd->buf, 0x0, pcmd->fbsize);
|
(void) memset(pcmd->buf, 0x0, pcmd->fbsize);
|
||||||
status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp,
|
status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp,
|
||||||
pcmd->buf, mask);
|
pcmd->buf, mask);
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
@@ -953,9 +953,9 @@ static int boardobjgrp_pmucmdsend(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_log_info(g, " ");
|
nvgpu_log_info(g, " ");
|
||||||
|
|
||||||
memset(&payload, 0, sizeof(payload));
|
(void) memset(&payload, 0, sizeof(payload));
|
||||||
memset(&handlerparams, 0, sizeof(handlerparams));
|
(void) memset(&handlerparams, 0, sizeof(handlerparams));
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = pboardobjgrp->pmu.unitid;
|
cmd.hdr.unit_id = pboardobjgrp->pmu.unitid;
|
||||||
cmd.hdr.size = sizeof(struct nv_pmu_boardobj_cmd_grp) +
|
cmd.hdr.size = sizeof(struct nv_pmu_boardobj_cmd_grp) +
|
||||||
sizeof(struct pmu_hdr);
|
sizeof(struct pmu_hdr);
|
||||||
@@ -1022,7 +1022,8 @@ static int boardobjgrp_pmucmdsend_rpc(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_board_obj_grp_cmd));
|
(void) memset(&rpc, 0,
|
||||||
|
sizeof(struct nv_pmu_rpc_struct_board_obj_grp_cmd));
|
||||||
|
|
||||||
rpc.class_id = pboardobjgrp->pmu.classid;
|
rpc.class_id = pboardobjgrp->pmu.classid;
|
||||||
rpc.command_id = copy_out ?
|
rpc.command_id = copy_out ?
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
|
|||||||
|
|
||||||
for (gpc = 0; gpc < gr->gpc_count; gpc++) {
|
for (gpc = 0; gpc < gr->gpc_count; gpc++) {
|
||||||
for (tpc = 0; tpc < gr->gpc_tpc_count[gpc]; tpc++) {
|
for (tpc = 0; tpc < gr->gpc_tpc_count[gpc]; tpc++) {
|
||||||
snprintf(stats[gpc][tpc].name,
|
(void) snprintf(stats[gpc][tpc].name,
|
||||||
NVGPU_ECC_STAT_NAME_MAX_SIZE,
|
NVGPU_ECC_STAT_NAME_MAX_SIZE,
|
||||||
"gpc%d_tpc%d_%s", gpc, tpc, name);
|
"gpc%d_tpc%d_%s", gpc, tpc, name);
|
||||||
nvgpu_ecc_stat_add(g, &stats[gpc][tpc]);
|
nvgpu_ecc_stat_add(g, &stats[gpc][tpc]);
|
||||||
@@ -94,7 +94,7 @@ int nvgpu_ecc_counter_init_per_gpc(struct gk20a *g,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
for (gpc = 0; gpc < gr->gpc_count; gpc++) {
|
for (gpc = 0; gpc < gr->gpc_count; gpc++) {
|
||||||
snprintf(stats[gpc].name, NVGPU_ECC_STAT_NAME_MAX_SIZE,
|
(void) snprintf(stats[gpc].name, NVGPU_ECC_STAT_NAME_MAX_SIZE,
|
||||||
"gpc%d_%s", gpc, name);
|
"gpc%d_%s", gpc, name);
|
||||||
nvgpu_ecc_stat_add(g, &stats[gpc]);
|
nvgpu_ecc_stat_add(g, &stats[gpc]);
|
||||||
}
|
}
|
||||||
@@ -151,7 +151,7 @@ int nvgpu_ecc_counter_init_per_lts(struct gk20a *g,
|
|||||||
|
|
||||||
for (ltc = 0; ltc < g->ltc_count; ltc++) {
|
for (ltc = 0; ltc < g->ltc_count; ltc++) {
|
||||||
for (lts = 0; lts < gr->slices_per_ltc; lts++) {
|
for (lts = 0; lts < gr->slices_per_ltc; lts++) {
|
||||||
snprintf(stats[ltc][lts].name,
|
(void) snprintf(stats[ltc][lts].name,
|
||||||
NVGPU_ECC_STAT_NAME_MAX_SIZE,
|
NVGPU_ECC_STAT_NAME_MAX_SIZE,
|
||||||
"ltc%d_lts%d_%s", ltc, lts, name);
|
"ltc%d_lts%d_%s", ltc, lts, name);
|
||||||
nvgpu_ecc_stat_add(g, &stats[ltc][lts]);
|
nvgpu_ecc_stat_add(g, &stats[ltc][lts]);
|
||||||
@@ -175,7 +175,7 @@ int nvgpu_ecc_counter_init_per_fbpa(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_fbpa; i++) {
|
for (i = 0; i < num_fbpa; i++) {
|
||||||
snprintf(stats[i].name, NVGPU_ECC_STAT_NAME_MAX_SIZE,
|
(void) snprintf(stats[i].name, NVGPU_ECC_STAT_NAME_MAX_SIZE,
|
||||||
"fbpa%d_%s", i, name);
|
"fbpa%d_%s", i, name);
|
||||||
nvgpu_ecc_stat_add(g, &stats[i]);
|
nvgpu_ecc_stat_add(g, &stats[i]);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -469,7 +469,7 @@ void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn,
|
|||||||
nvgpu_mutex_destroy(&queue->mutex);
|
nvgpu_mutex_destroy(&queue->mutex);
|
||||||
|
|
||||||
/* clear data*/
|
/* clear data*/
|
||||||
memset(queue, 0, sizeof(struct nvgpu_falcon_queue));
|
(void) memset(queue, 0, sizeof(struct nvgpu_falcon_queue));
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn,
|
int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn,
|
||||||
|
|||||||
@@ -742,7 +742,7 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
|
|||||||
u32 chid = FIFO_INVAL_CHANNEL_ID;
|
u32 chid = FIFO_INVAL_CHANNEL_ID;
|
||||||
struct channel_gk20a *refch;
|
struct channel_gk20a *refch;
|
||||||
|
|
||||||
memset(mmfault, 0, sizeof(*mmfault));
|
(void) memset(mmfault, 0, sizeof(*mmfault));
|
||||||
|
|
||||||
rd32_val = nvgpu_mem_rd32(g, mem, offset +
|
rd32_val = nvgpu_mem_rd32(g, mem, offset +
|
||||||
gmmu_fault_buf_entry_inst_lo_w());
|
gmmu_fault_buf_entry_inst_lo_w());
|
||||||
@@ -1086,7 +1086,7 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
|
|||||||
int chid = FIFO_INVAL_CHANNEL_ID;
|
int chid = FIFO_INVAL_CHANNEL_ID;
|
||||||
struct channel_gk20a *refch;
|
struct channel_gk20a *refch;
|
||||||
|
|
||||||
memset(mmfault, 0, sizeof(*mmfault));
|
(void) memset(mmfault, 0, sizeof(*mmfault));
|
||||||
|
|
||||||
if ((fault_status & fb_mmu_fault_status_valid_set_f()) == 0U) {
|
if ((fault_status & fb_mmu_fault_status_valid_set_f()) == 0U) {
|
||||||
|
|
||||||
|
|||||||
@@ -408,7 +408,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
|
|||||||
|
|
||||||
nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem);
|
nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem);
|
||||||
nvgpu_big_free(g, ch->gpfifo.pipe);
|
nvgpu_big_free(g, ch->gpfifo.pipe);
|
||||||
memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
|
(void) memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
|
||||||
|
|
||||||
channel_gk20a_free_priv_cmdbuf(ch);
|
channel_gk20a_free_priv_cmdbuf(ch);
|
||||||
|
|
||||||
@@ -495,7 +495,7 @@ unbind:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if GK20A_CHANNEL_REFCOUNT_TRACKING
|
#if GK20A_CHANNEL_REFCOUNT_TRACKING
|
||||||
memset(ch->ref_actions, 0, sizeof(ch->ref_actions));
|
(void) memset(ch->ref_actions, 0, sizeof(ch->ref_actions));
|
||||||
ch->ref_actions_put = 0;
|
ch->ref_actions_put = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -804,7 +804,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c)
|
|||||||
|
|
||||||
nvgpu_dma_unmap_free(ch_vm, &q->mem);
|
nvgpu_dma_unmap_free(ch_vm, &q->mem);
|
||||||
|
|
||||||
memset(q, 0, sizeof(struct priv_cmd_queue));
|
(void) memset(q, 0, sizeof(struct priv_cmd_queue));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate a cmd buffer with given size. size is number of u32 entries */
|
/* allocate a cmd buffer with given size. size is number of u32 entries */
|
||||||
@@ -875,7 +875,7 @@ void free_priv_cmdbuf(struct channel_gk20a *c,
|
|||||||
struct priv_cmd_entry *e)
|
struct priv_cmd_entry *e)
|
||||||
{
|
{
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (channel_gk20a_is_prealloc_enabled(c)) {
|
||||||
memset(e, 0, sizeof(struct priv_cmd_entry));
|
(void) memset(e, 0, sizeof(struct priv_cmd_entry));
|
||||||
} else {
|
} else {
|
||||||
nvgpu_kfree(c->g, e);
|
nvgpu_kfree(c->g, e);
|
||||||
}
|
}
|
||||||
@@ -926,7 +926,7 @@ void channel_gk20a_free_job(struct channel_gk20a *c,
|
|||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (channel_gk20a_is_prealloc_enabled(c)) {
|
||||||
struct priv_cmd_entry *wait_cmd = job->wait_cmd;
|
struct priv_cmd_entry *wait_cmd = job->wait_cmd;
|
||||||
struct priv_cmd_entry *incr_cmd = job->incr_cmd;
|
struct priv_cmd_entry *incr_cmd = job->incr_cmd;
|
||||||
memset(job, 0, sizeof(*job));
|
(void) memset(job, 0, sizeof(*job));
|
||||||
job->wait_cmd = wait_cmd;
|
job->wait_cmd = wait_cmd;
|
||||||
job->incr_cmd = incr_cmd;
|
job->incr_cmd = incr_cmd;
|
||||||
} else {
|
} else {
|
||||||
@@ -1088,7 +1088,7 @@ clean_up_priv_cmd:
|
|||||||
clean_up_joblist:
|
clean_up_joblist:
|
||||||
nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs);
|
nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs);
|
||||||
clean_up:
|
clean_up:
|
||||||
memset(&c->joblist.pre_alloc, 0, sizeof(c->joblist.pre_alloc));
|
(void) memset(&c->joblist.pre_alloc, 0, sizeof(c->joblist.pre_alloc));
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1286,7 +1286,7 @@ clean_up_unmap:
|
|||||||
c->usermode_submit_enabled = false;
|
c->usermode_submit_enabled = false;
|
||||||
}
|
}
|
||||||
clean_up:
|
clean_up:
|
||||||
memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
|
(void) memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
|
||||||
clean_up_idle:
|
clean_up_idle:
|
||||||
if (c->deterministic) {
|
if (c->deterministic) {
|
||||||
nvgpu_rwsem_down_read(&g->deterministic_busy);
|
nvgpu_rwsem_down_read(&g->deterministic_busy);
|
||||||
@@ -1741,7 +1741,7 @@ static int __nvgpu_channel_worker_start(struct gk20a *g)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(thread_name, sizeof(thread_name),
|
(void) snprintf(thread_name, sizeof(thread_name),
|
||||||
"nvgpu_channel_poll_%s", g->name);
|
"nvgpu_channel_poll_%s", g->name);
|
||||||
|
|
||||||
err = nvgpu_thread_create(&g->channel_worker.poll_task, g,
|
err = nvgpu_thread_create(&g->channel_worker.poll_task, g,
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(new_buddy, 0, sizeof(struct nvgpu_buddy));
|
(void) memset(new_buddy, 0, sizeof(struct nvgpu_buddy));
|
||||||
|
|
||||||
new_buddy->parent = parent;
|
new_buddy->parent = parent;
|
||||||
new_buddy->start = start;
|
new_buddy->start = start;
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ void nvgpu_alloc_destroy(struct nvgpu_allocator *a)
|
|||||||
{
|
{
|
||||||
a->ops->fini(a);
|
a->ops->fini(a);
|
||||||
nvgpu_mutex_destroy(&a->lock);
|
nvgpu_mutex_destroy(&a->lock);
|
||||||
memset(a, 0, sizeof(*a));
|
(void) memset(a, 0, sizeof(*a));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
@@ -173,7 +173,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
|
|||||||
a->priv = priv;
|
a->priv = priv;
|
||||||
a->debug = dbg;
|
a->debug = dbg;
|
||||||
|
|
||||||
strncpy(a->name, name, sizeof(a->name));
|
(void) strncpy(a->name, name, sizeof(a->name));
|
||||||
a->name[sizeof(a->name) - 1U] = '\0';
|
a->name[sizeof(a->name) - 1U] = '\0';
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -240,7 +240,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
|
|||||||
u8 *src = (u8 *)mem->cpu_va + offset;
|
u8 *src = (u8 *)mem->cpu_va + offset;
|
||||||
|
|
||||||
WARN_ON(mem->cpu_va == NULL);
|
WARN_ON(mem->cpu_va == NULL);
|
||||||
memcpy(dest, src, size);
|
(void) memcpy(dest, src, size);
|
||||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||||
nvgpu_pramin_rd_n(g, mem, offset, size, dest);
|
nvgpu_pramin_rd_n(g, mem, offset, size, dest);
|
||||||
} else {
|
} else {
|
||||||
@@ -281,7 +281,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
|||||||
u8 *dest = (u8 *)mem->cpu_va + offset;
|
u8 *dest = (u8 *)mem->cpu_va + offset;
|
||||||
|
|
||||||
WARN_ON(mem->cpu_va == NULL);
|
WARN_ON(mem->cpu_va == NULL);
|
||||||
memcpy(dest, src, size);
|
(void) memcpy(dest, src, size);
|
||||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||||
nvgpu_pramin_wr_n(g, mem, offset, size, src);
|
nvgpu_pramin_wr_n(g, mem, offset, size, src);
|
||||||
if (!mem->skip_wmb) {
|
if (!mem->skip_wmb) {
|
||||||
@@ -305,7 +305,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
|||||||
u8 *dest = (u8 *)mem->cpu_va + offset;
|
u8 *dest = (u8 *)mem->cpu_va + offset;
|
||||||
|
|
||||||
WARN_ON(mem->cpu_va == NULL);
|
WARN_ON(mem->cpu_va == NULL);
|
||||||
memset(dest, c, size);
|
(void) memset(dest, c, size);
|
||||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||||
u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
|
u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
|
||||||
|
|
||||||
|
|||||||
@@ -294,7 +294,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(slab_page, 0, sizeof(*slab_page));
|
(void) memset(slab_page, 0, sizeof(*slab_page));
|
||||||
|
|
||||||
slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size);
|
slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size);
|
||||||
if (slab_page->page_addr == 0ULL) {
|
if (slab_page->page_addr == 0ULL) {
|
||||||
@@ -535,7 +535,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(alloc, 0, sizeof(*alloc));
|
(void) memset(alloc, 0, sizeof(*alloc));
|
||||||
|
|
||||||
alloc->length = pages << a->page_shift;
|
alloc->length = pages << a->page_shift;
|
||||||
alloc->sgt.ops = &page_alloc_sgl_ops;
|
alloc->sgt.ops = &page_alloc_sgl_ops;
|
||||||
@@ -1060,7 +1060,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(buddy_name, sizeof(buddy_name), "%s-src", name);
|
(void) snprintf(buddy_name, sizeof(buddy_name), "%s-src", name);
|
||||||
|
|
||||||
err = nvgpu_buddy_allocator_init(g, &a->source_allocator, NULL,
|
err = nvgpu_buddy_allocator_init(g, &a->source_allocator, NULL,
|
||||||
buddy_name, base, length, blk_size,
|
buddy_name, base, length, blk_size,
|
||||||
|
|||||||
@@ -169,7 +169,7 @@ void __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx)
|
|||||||
|
|
||||||
void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch)
|
void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch)
|
||||||
{
|
{
|
||||||
memset(mapping_batch, 0, sizeof(*mapping_batch));
|
(void) memset(mapping_batch, 0, sizeof(*mapping_batch));
|
||||||
mapping_batch->gpu_l2_flushed = false;
|
mapping_batch->gpu_l2_flushed = false;
|
||||||
mapping_batch->need_tlb_invalidate = false;
|
mapping_batch->need_tlb_invalidate = false;
|
||||||
}
|
}
|
||||||
@@ -337,7 +337,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Initialize the page table data structures. */
|
/* Initialize the page table data structures. */
|
||||||
strncpy(vm->name, name, min(strlen(name), sizeof(vm->name)));
|
(void) strncpy(vm->name, name, min(strlen(name), sizeof(vm->name)));
|
||||||
err = nvgpu_gmmu_init_page_table(vm);
|
err = nvgpu_gmmu_init_page_table(vm);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto clean_up_vgpu_vm;
|
goto clean_up_vgpu_vm;
|
||||||
@@ -418,7 +418,8 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
* User VMA.
|
* User VMA.
|
||||||
*/
|
*/
|
||||||
if (user_vma_start < user_vma_limit) {
|
if (user_vma_start < user_vma_limit) {
|
||||||
snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", name);
|
(void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s",
|
||||||
|
name);
|
||||||
err = nvgpu_buddy_allocator_init(g, &vm->user,
|
err = nvgpu_buddy_allocator_init(g, &vm->user,
|
||||||
vm, alloc_name,
|
vm, alloc_name,
|
||||||
user_vma_start,
|
user_vma_start,
|
||||||
@@ -444,7 +445,8 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
* User VMA for large pages when a split address range is used.
|
* User VMA for large pages when a split address range is used.
|
||||||
*/
|
*/
|
||||||
if (user_lp_vma_start < user_lp_vma_limit) {
|
if (user_lp_vma_start < user_lp_vma_limit) {
|
||||||
snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s_lp", name);
|
(void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s_lp",
|
||||||
|
name);
|
||||||
err = nvgpu_buddy_allocator_init(g, &vm->user_lp,
|
err = nvgpu_buddy_allocator_init(g, &vm->user_lp,
|
||||||
vm, alloc_name,
|
vm, alloc_name,
|
||||||
user_lp_vma_start,
|
user_lp_vma_start,
|
||||||
@@ -461,7 +463,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
/*
|
/*
|
||||||
* Kernel VMA. Must always exist for an address space.
|
* Kernel VMA. Must always exist for an address space.
|
||||||
*/
|
*/
|
||||||
snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-sys", name);
|
(void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-sys", name);
|
||||||
err = nvgpu_buddy_allocator_init(g, &vm->kernel,
|
err = nvgpu_buddy_allocator_init(g, &vm->kernel,
|
||||||
vm, alloc_name,
|
vm, alloc_name,
|
||||||
kernel_vma_start,
|
kernel_vma_start,
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ int nvgpu_css_enable_snapshot(struct channel_gk20a *ch,
|
|||||||
data->hw_end = data->hw_snapshot +
|
data->hw_end = data->hw_snapshot +
|
||||||
snapshot_size / sizeof(struct gk20a_cs_snapshot_fifo_entry);
|
snapshot_size / sizeof(struct gk20a_cs_snapshot_fifo_entry);
|
||||||
data->hw_get = data->hw_snapshot;
|
data->hw_get = data->hw_snapshot;
|
||||||
memset(data->hw_snapshot, 0xff, snapshot_size);
|
(void) memset(data->hw_snapshot, 0xff, snapshot_size);
|
||||||
|
|
||||||
g->ops.perf.membuf_reset_streaming(g);
|
g->ops.perf.membuf_reset_streaming(g);
|
||||||
g->ops.perf.enable_membuf(g, snapshot_size, data->hw_memdesc.gpu_va,
|
g->ops.perf.enable_membuf(g, snapshot_size, data->hw_memdesc.gpu_va,
|
||||||
@@ -149,7 +149,7 @@ int nvgpu_css_enable_snapshot(struct channel_gk20a *ch,
|
|||||||
failed_allocation:
|
failed_allocation:
|
||||||
if (data->hw_memdesc.size) {
|
if (data->hw_memdesc.size) {
|
||||||
nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc);
|
nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc);
|
||||||
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
|
(void) memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
|
||||||
}
|
}
|
||||||
data->hw_snapshot = NULL;
|
data->hw_snapshot = NULL;
|
||||||
|
|
||||||
@@ -168,7 +168,7 @@ void nvgpu_css_disable_snapshot(struct gr_gk20a *gr)
|
|||||||
g->ops.perf.disable_membuf(g);
|
g->ops.perf.disable_membuf(g);
|
||||||
|
|
||||||
nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc);
|
nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc);
|
||||||
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
|
(void) memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
|
||||||
data->hw_snapshot = NULL;
|
data->hw_snapshot = NULL;
|
||||||
|
|
||||||
nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n");
|
nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n");
|
||||||
@@ -320,11 +320,12 @@ next_hw_fifo_entry:
|
|||||||
|
|
||||||
/* re-set HW buffer after processing taking wrapping into account */
|
/* re-set HW buffer after processing taking wrapping into account */
|
||||||
if (css->hw_get < src) {
|
if (css->hw_get < src) {
|
||||||
memset(css->hw_get, 0xff, (src - css->hw_get) * sizeof(*src));
|
(void) memset(css->hw_get, 0xff,
|
||||||
|
(src - css->hw_get) * sizeof(*src));
|
||||||
} else {
|
} else {
|
||||||
memset(css->hw_snapshot, 0xff,
|
(void) memset(css->hw_snapshot, 0xff,
|
||||||
(src - css->hw_snapshot) * sizeof(*src));
|
(src - css->hw_snapshot) * sizeof(*src));
|
||||||
memset(css->hw_get, 0xff,
|
(void) memset(css->hw_get, 0xff,
|
||||||
(css->hw_end - css->hw_get) * sizeof(*src));
|
(css->hw_end - css->hw_get) * sizeof(*src));
|
||||||
}
|
}
|
||||||
gr->cs_data->hw_get = src;
|
gr->cs_data->hw_get = src;
|
||||||
@@ -408,7 +409,7 @@ static int css_gr_create_client_data(struct gk20a *g,
|
|||||||
* guest side
|
* guest side
|
||||||
*/
|
*/
|
||||||
if (cur->snapshot) {
|
if (cur->snapshot) {
|
||||||
memset(cur->snapshot, 0, sizeof(*cur->snapshot));
|
(void) memset(cur->snapshot, 0, sizeof(*cur->snapshot));
|
||||||
cur->snapshot->start = sizeof(*cur->snapshot);
|
cur->snapshot->start = sizeof(*cur->snapshot);
|
||||||
/* we should be ensure that can fit all fifo entries here */
|
/* we should be ensure that can fit all fifo entries here */
|
||||||
cur->snapshot->end =
|
cur->snapshot->end =
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ static int nvgpu_init_task_pg_init(struct gk20a *g)
|
|||||||
|
|
||||||
nvgpu_cond_init(&pmu->pg_init.wq);
|
nvgpu_cond_init(&pmu->pg_init.wq);
|
||||||
|
|
||||||
snprintf(thread_name, sizeof(thread_name),
|
(void) snprintf(thread_name, sizeof(thread_name),
|
||||||
"nvgpu_pg_init_%s", g->name);
|
"nvgpu_pg_init_%s", g->name);
|
||||||
|
|
||||||
err = nvgpu_thread_create(&pmu->pg_init.state_task, g,
|
err = nvgpu_thread_create(&pmu->pg_init.state_task, g,
|
||||||
@@ -411,7 +411,7 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
|
|||||||
BUG_ON(sizeof(pmu->gid_info.gid) !=
|
BUG_ON(sizeof(pmu->gid_info.gid) !=
|
||||||
sizeof(gid_data.gid));
|
sizeof(gid_data.gid));
|
||||||
|
|
||||||
memcpy(pmu->gid_info.gid, gid_data.gid,
|
(void) memcpy(pmu->gid_info.gid, gid_data.gid,
|
||||||
sizeof(pmu->gid_info.gid));
|
sizeof(pmu->gid_info.gid));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -658,7 +658,7 @@ int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
|
|||||||
void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
|
void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
nvgpu_dma_free(g, mem);
|
nvgpu_dma_free(g, mem);
|
||||||
memset(mem, 0, sizeof(struct nvgpu_mem));
|
(void) memset(mem, 0, sizeof(struct nvgpu_mem));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu)
|
struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu)
|
||||||
|
|||||||
@@ -33,9 +33,9 @@ void nvgpu_pmu_seq_init(struct nvgpu_pmu *pmu)
|
|||||||
{
|
{
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
memset(pmu->seq, 0,
|
(void) memset(pmu->seq, 0,
|
||||||
sizeof(struct pmu_sequence) * PMU_MAX_NUM_SEQUENCES);
|
sizeof(struct pmu_sequence) * PMU_MAX_NUM_SEQUENCES);
|
||||||
memset(pmu->pmu_seq_tbl, 0,
|
(void) memset(pmu->pmu_seq_tbl, 0,
|
||||||
sizeof(pmu->pmu_seq_tbl));
|
sizeof(pmu->pmu_seq_tbl));
|
||||||
|
|
||||||
for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) {
|
for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) {
|
||||||
@@ -527,7 +527,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
|
|||||||
} else if (seq->state != PMU_SEQ_STATE_CANCELLED) {
|
} else if (seq->state != PMU_SEQ_STATE_CANCELLED) {
|
||||||
if (seq->msg) {
|
if (seq->msg) {
|
||||||
if (seq->msg->hdr.size >= msg->hdr.size) {
|
if (seq->msg->hdr.size >= msg->hdr.size) {
|
||||||
memcpy(seq->msg, msg, msg->hdr.size);
|
(void) memcpy(seq->msg, msg, msg->hdr.size);
|
||||||
} else {
|
} else {
|
||||||
nvgpu_err(g, "sequence %d msg buffer too small",
|
nvgpu_err(g, "sequence %d msg buffer too small",
|
||||||
seq->id);
|
seq->id);
|
||||||
@@ -559,7 +559,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (seq->out_mem != NULL) {
|
if (seq->out_mem != NULL) {
|
||||||
memset(pv->pmu_allocation_get_fb_addr(pmu,
|
(void) memset(pv->pmu_allocation_get_fb_addr(pmu,
|
||||||
pv->get_pmu_seq_out_a_ptr(seq)), 0x0,
|
pv->get_pmu_seq_out_a_ptr(seq)), 0x0,
|
||||||
pv->pmu_allocation_get_fb_size(pmu,
|
pv->pmu_allocation_get_fb_size(pmu,
|
||||||
pv->get_pmu_seq_out_a_ptr(seq)));
|
pv->get_pmu_seq_out_a_ptr(seq)));
|
||||||
@@ -573,7 +573,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (seq->in_mem != NULL) {
|
if (seq->in_mem != NULL) {
|
||||||
memset(pv->pmu_allocation_get_fb_addr(pmu,
|
(void) memset(pv->pmu_allocation_get_fb_addr(pmu,
|
||||||
pv->get_pmu_seq_in_a_ptr(seq)), 0x0,
|
pv->get_pmu_seq_in_a_ptr(seq)), 0x0,
|
||||||
pv->pmu_allocation_get_fb_size(pmu,
|
pv->pmu_allocation_get_fb_size(pmu,
|
||||||
pv->get_pmu_seq_in_a_ptr(seq)));
|
pv->get_pmu_seq_in_a_ptr(seq)));
|
||||||
@@ -766,8 +766,9 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
|||||||
(struct rpc_handler_payload *)param;
|
(struct rpc_handler_payload *)param;
|
||||||
struct nv_pmu_rpc_struct_perfmon_query *rpc_param;
|
struct nv_pmu_rpc_struct_perfmon_query *rpc_param;
|
||||||
|
|
||||||
memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header));
|
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header));
|
||||||
memcpy(&rpc, rpc_payload->rpc_buff, sizeof(struct nv_pmu_rpc_header));
|
(void) memcpy(&rpc, rpc_payload->rpc_buff,
|
||||||
|
sizeof(struct nv_pmu_rpc_header));
|
||||||
|
|
||||||
if (rpc.flcn_status) {
|
if (rpc.flcn_status) {
|
||||||
nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x",
|
nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x",
|
||||||
@@ -922,15 +923,15 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
rpc_buff = rpc_payload->rpc_buff;
|
rpc_buff = rpc_payload->rpc_buff;
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
memset(&payload, 0, sizeof(struct pmu_payload));
|
(void) memset(&payload, 0, sizeof(struct pmu_payload));
|
||||||
|
|
||||||
cmd.hdr.unit_id = rpc->unit_id;
|
cmd.hdr.unit_id = rpc->unit_id;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct nv_pmu_rpc_cmd);
|
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct nv_pmu_rpc_cmd);
|
||||||
cmd.cmd.rpc.cmd_type = NV_PMU_RPC_CMD_ID;
|
cmd.cmd.rpc.cmd_type = NV_PMU_RPC_CMD_ID;
|
||||||
cmd.cmd.rpc.flags = rpc->flags;
|
cmd.cmd.rpc.flags = rpc->flags;
|
||||||
|
|
||||||
memcpy(rpc_buff, rpc, size_rpc);
|
(void) memcpy(rpc_buff, rpc, size_rpc);
|
||||||
payload.rpc.prpc = rpc_buff;
|
payload.rpc.prpc = rpc_buff;
|
||||||
payload.rpc.size_rpc = size_rpc;
|
payload.rpc.size_rpc = size_rpc;
|
||||||
payload.rpc.size_scratch = size_scratch;
|
payload.rpc.size_scratch = size_scratch;
|
||||||
@@ -954,7 +955,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
|
|||||||
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
|
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
|
||||||
&rpc_payload->complete, true);
|
&rpc_payload->complete, true);
|
||||||
/* copy back data to caller */
|
/* copy back data to caller */
|
||||||
memcpy(rpc, rpc_buff, size_rpc);
|
(void) memcpy(rpc, rpc_buff, size_rpc);
|
||||||
/* free allocated memory */
|
/* free allocated memory */
|
||||||
nvgpu_kfree(g, rpc_payload);
|
nvgpu_kfree(g, rpc_payload);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* init PERFMON */
|
/* init PERFMON */
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
|
|
||||||
cmd.hdr.unit_id = get_perfmon_id(pmu);
|
cmd.hdr.unit_id = get_perfmon_id(pmu);
|
||||||
if (cmd.hdr.unit_id == PMU_UNIT_INVALID) {
|
if (cmd.hdr.unit_id == PMU_UNIT_INVALID) {
|
||||||
@@ -115,7 +115,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
|
|||||||
*/
|
*/
|
||||||
pv->perfmon_cmd_init_set_mov_avg(&cmd.cmd.perfmon, 17);
|
pv->perfmon_cmd_init_set_mov_avg(&cmd.cmd.perfmon, 17);
|
||||||
|
|
||||||
memset(&payload, 0, sizeof(struct pmu_payload));
|
(void) memset(&payload, 0, sizeof(struct pmu_payload));
|
||||||
payload.in.buf = pv->get_perfmon_cntr_ptr(pmu);
|
payload.in.buf = pv->get_perfmon_cntr_ptr(pmu);
|
||||||
payload.in.size = pv->get_perfmon_cntr_sz(pmu);
|
payload.in.size = pv->get_perfmon_cntr_sz(pmu);
|
||||||
payload.in.offset = pv->get_perfmon_cmd_init_offsetofvar(COUNTER_ALLOC);
|
payload.in.offset = pv->get_perfmon_cmd_init_offsetofvar(COUNTER_ALLOC);
|
||||||
@@ -140,7 +140,7 @@ int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* PERFMON Start */
|
/* PERFMON Start */
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = get_perfmon_id(pmu);
|
cmd.hdr.unit_id = get_perfmon_id(pmu);
|
||||||
if (cmd.hdr.unit_id == PMU_UNIT_INVALID) {
|
if (cmd.hdr.unit_id == PMU_UNIT_INVALID) {
|
||||||
nvgpu_err(g, "failed to get perfmon UNIT ID, command skipped");
|
nvgpu_err(g, "failed to get perfmon UNIT ID, command skipped");
|
||||||
@@ -159,7 +159,7 @@ int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu)
|
|||||||
PMU_PERFMON_FLAG_ENABLE_DECREASE |
|
PMU_PERFMON_FLAG_ENABLE_DECREASE |
|
||||||
PMU_PERFMON_FLAG_CLEAR_PREV);
|
PMU_PERFMON_FLAG_CLEAR_PREV);
|
||||||
|
|
||||||
memset(&payload, 0, sizeof(struct pmu_payload));
|
(void) memset(&payload, 0, sizeof(struct pmu_payload));
|
||||||
|
|
||||||
/* TBD: PMU_PERFMON_PCT_TO_INC * 100 */
|
/* TBD: PMU_PERFMON_PCT_TO_INC * 100 */
|
||||||
pv->set_perfmon_cntr_ut(pmu, 3000); /* 30% */
|
pv->set_perfmon_cntr_ut(pmu, 3000); /* 30% */
|
||||||
@@ -190,7 +190,7 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* PERFMON Stop */
|
/* PERFMON Stop */
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = get_perfmon_id(pmu);
|
cmd.hdr.unit_id = get_perfmon_id(pmu);
|
||||||
if (cmd.hdr.unit_id == PMU_UNIT_INVALID) {
|
if (cmd.hdr.unit_id == PMU_UNIT_INVALID) {
|
||||||
nvgpu_err(g, "failed to get perfmon UNIT ID, command skipped");
|
nvgpu_err(g, "failed to get perfmon UNIT ID, command skipped");
|
||||||
@@ -312,7 +312,7 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu)
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_init));
|
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_init));
|
||||||
pmu->perfmon_ready = 0;
|
pmu->perfmon_ready = 0;
|
||||||
|
|
||||||
g->ops.pmu.pmu_init_perfmon_counter(g);
|
g->ops.pmu.pmu_init_perfmon_counter(g);
|
||||||
@@ -332,7 +332,7 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu)
|
|||||||
*/
|
*/
|
||||||
rpc.num_counters = 1;
|
rpc.num_counters = 1;
|
||||||
|
|
||||||
memset(rpc.counter, 0, sizeof(struct pmu_perfmon_counter_v3) *
|
(void) memset(rpc.counter, 0, sizeof(struct pmu_perfmon_counter_v3) *
|
||||||
NV_PMU_PERFMON_MAX_COUNTERS);
|
NV_PMU_PERFMON_MAX_COUNTERS);
|
||||||
/* Counter used to count GR busy cycles */
|
/* Counter used to count GR busy cycles */
|
||||||
rpc.counter[0].index = 3;
|
rpc.counter[0].index = 3;
|
||||||
@@ -360,7 +360,7 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu)
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_start));
|
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_start));
|
||||||
rpc.group_id = PMU_DOMAIN_GROUP_PSTATE;
|
rpc.group_id = PMU_DOMAIN_GROUP_PSTATE;
|
||||||
rpc.state_id = pmu->perfmon_state_id[PMU_DOMAIN_GROUP_PSTATE];
|
rpc.state_id = pmu->perfmon_state_id[PMU_DOMAIN_GROUP_PSTATE];
|
||||||
rpc.flags = PMU_PERFMON_FLAG_ENABLE_INCREASE |
|
rpc.flags = PMU_PERFMON_FLAG_ENABLE_INCREASE |
|
||||||
@@ -391,7 +391,7 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu)
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_stop));
|
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_stop));
|
||||||
/* PERFMON Stop */
|
/* PERFMON Stop */
|
||||||
nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n");
|
nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n");
|
||||||
PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0);
|
PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0);
|
||||||
@@ -414,7 +414,7 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu)
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
pmu->perfmon_query = 0;
|
pmu->perfmon_query = 0;
|
||||||
memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_query));
|
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_query));
|
||||||
/* PERFMON QUERY */
|
/* PERFMON QUERY */
|
||||||
nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n");
|
nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n");
|
||||||
PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0);
|
PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0);
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||||
sizeof(struct pmu_pg_cmd_elpg_cmd);
|
sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||||
@@ -314,7 +314,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||||
sizeof(struct pmu_pg_cmd_elpg_cmd);
|
sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||||
@@ -400,7 +400,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* init ELPG */
|
/* init ELPG */
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
|
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||||
@@ -416,7 +416,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
|
|||||||
|
|
||||||
/* alloc dmem for powergating state log */
|
/* alloc dmem for powergating state log */
|
||||||
pmu->stat_dmem_offset[pg_engine_id] = 0;
|
pmu->stat_dmem_offset[pg_engine_id] = 0;
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
|
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
|
||||||
cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
|
cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
|
||||||
@@ -440,7 +440,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
|
|||||||
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
|
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
|
||||||
pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
|
pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
|
||||||
}
|
}
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
|
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||||
@@ -534,7 +534,7 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
|
|||||||
|
|
||||||
gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
|
gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
|
g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
|
||||||
@@ -575,7 +575,7 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
|
|||||||
|
|
||||||
gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
|
gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
|
g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
|
||||||
@@ -664,7 +664,7 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g,
|
|||||||
u32 seq;
|
u32 seq;
|
||||||
pmu_callback p_callback = NULL;
|
pmu_callback p_callback = NULL;
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||||
|
|
||||||
/* Copy common members */
|
/* Copy common members */
|
||||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||||
@@ -685,7 +685,7 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g,
|
|||||||
nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
|
nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
|
||||||
cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
|
cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
|
||||||
p_ap_cmd->init_and_enable_ctrl.ctrl_id;
|
p_ap_cmd->init_and_enable_ctrl.ctrl_id;
|
||||||
memcpy(
|
(void) memcpy(
|
||||||
(void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
|
(void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
|
||||||
(void *)&(p_ap_cmd->init_and_enable_ctrl.params),
|
(void *)&(p_ap_cmd->init_and_enable_ctrl.params),
|
||||||
sizeof(struct pmu_ap_ctrl_init_params));
|
sizeof(struct pmu_ap_ctrl_init_params));
|
||||||
|
|||||||
@@ -84,10 +84,10 @@ static void sec2_seq_init(struct nvgpu_sec2 *sec2)
|
|||||||
|
|
||||||
nvgpu_log_fn(sec2->g, " ");
|
nvgpu_log_fn(sec2->g, " ");
|
||||||
|
|
||||||
memset(sec2->seq, 0,
|
(void) memset(sec2->seq, 0,
|
||||||
sizeof(struct sec2_sequence) * SEC2_MAX_NUM_SEQUENCES);
|
sizeof(struct sec2_sequence) * SEC2_MAX_NUM_SEQUENCES);
|
||||||
|
|
||||||
memset(sec2->sec2_seq_tbl, 0, sizeof(sec2->sec2_seq_tbl));
|
(void) memset(sec2->sec2_seq_tbl, 0, sizeof(sec2->sec2_seq_tbl));
|
||||||
|
|
||||||
for (i = 0; i < SEC2_MAX_NUM_SEQUENCES; i++) {
|
for (i = 0; i < SEC2_MAX_NUM_SEQUENCES; i++) {
|
||||||
sec2->seq[i].id = (u8)i;
|
sec2->seq[i].id = (u8)i;
|
||||||
@@ -218,7 +218,7 @@ static void sec2_load_ls_falcons(struct gk20a *g, struct nvgpu_sec2 *sec2,
|
|||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
/* send message to load falcon */
|
/* send message to load falcon */
|
||||||
memset(&cmd, 0, sizeof(struct nv_flcn_cmd_sec2));
|
(void) memset(&cmd, 0, sizeof(struct nv_flcn_cmd_sec2));
|
||||||
cmd.hdr.unit_id = NV_SEC2_UNIT_ACR;
|
cmd.hdr.unit_id = NV_SEC2_UNIT_ACR;
|
||||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||||
sizeof(struct nv_sec2_acr_cmd_bootstrap_falcon);
|
sizeof(struct nv_sec2_acr_cmd_bootstrap_falcon);
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ void nvgpu_free_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
if (nvgpu_mem_is_valid(mem))
|
if (nvgpu_mem_is_valid(mem))
|
||||||
nvgpu_dma_free(g, mem);
|
nvgpu_dma_free(g, mem);
|
||||||
|
|
||||||
memset(mem, 0, sizeof(*mem));
|
(void) memset(mem, 0, sizeof(*mem));
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvgpu_free_sim_support(struct gk20a *g)
|
void nvgpu_free_sim_support(struct gk20a *g)
|
||||||
@@ -233,7 +233,8 @@ static void nvgpu_sim_esc_readl(struct gk20a *g,
|
|||||||
err = issue_rpc_and_wait(g);
|
err = issue_rpc_and_wait(g);
|
||||||
|
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
memcpy(data, sim_msg_param(g, data_offset), sizeof(u32));
|
(void) memcpy(data, sim_msg_param(g, data_offset),
|
||||||
|
sizeof(u32));
|
||||||
} else {
|
} else {
|
||||||
*data = 0xffffffff;
|
*data = 0xffffffff;
|
||||||
WARN(1, "issue_rpc_and_wait failed err=%d", err);
|
WARN(1, "issue_rpc_and_wait failed err=%d", err);
|
||||||
|
|||||||
@@ -201,7 +201,8 @@ static void nvgpu_sim_esc_readl(struct gk20a *g,
|
|||||||
err = issue_rpc_and_wait(g);
|
err = issue_rpc_and_wait(g);
|
||||||
|
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
memcpy(data, sim_msg_param(g, data_offset + 0xc), sizeof(u32));
|
(void) memcpy(data, sim_msg_param(g, data_offset + 0xc),
|
||||||
|
sizeof(u32));
|
||||||
} else {
|
} else {
|
||||||
*data = 0xffffffff;
|
*data = 0xffffffff;
|
||||||
WARN(1, "issue_rpc_and_wait failed err=%d", err);
|
WARN(1, "issue_rpc_and_wait failed err=%d", err);
|
||||||
|
|||||||
@@ -400,7 +400,8 @@ u32 nvgpu_bios_get_nvlink_config_data(struct gk20a *g)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(&config, &g->bios.data[g->bios.nvlink_config_data_offset],
|
(void) memcpy(&config,
|
||||||
|
&g->bios.data[g->bios.nvlink_config_data_offset],
|
||||||
sizeof(config));
|
sizeof(config));
|
||||||
|
|
||||||
if (config.version != NVLINK_CONFIG_DATA_HDR_VER_10) {
|
if (config.version != NVLINK_CONFIG_DATA_HDR_VER_10) {
|
||||||
@@ -475,7 +476,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
|
|||||||
struct application_interface_table_hdr_v1 hdr;
|
struct application_interface_table_hdr_v1 hdr;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
memcpy((u8 *)&hdr, &g->bios.data[offset], sizeof(hdr));
|
nvgpu_memcpy((u8 *)&hdr, &g->bios.data[offset], sizeof(hdr));
|
||||||
|
|
||||||
nvgpu_log_fn(g, "appInfoHdr ver %d size %d entrySize %d entryCount %d",
|
nvgpu_log_fn(g, "appInfoHdr ver %d size %d entrySize %d entryCount %d",
|
||||||
hdr.version, hdr.header_size,
|
hdr.version, hdr.header_size,
|
||||||
@@ -731,7 +732,7 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
memcpy(&bit, &g->bios.data[offset], sizeof(bit));
|
(void) memcpy(&bit, &g->bios.data[offset], sizeof(bit));
|
||||||
|
|
||||||
nvgpu_log_info(g, "BIT header: %04x %08x", bit.id, bit.signature);
|
nvgpu_log_info(g, "BIT header: %04x %08x", bit.id, bit.signature);
|
||||||
nvgpu_log_info(g, "tokens: %d entries * %d bytes",
|
nvgpu_log_info(g, "tokens: %d entries * %d bytes",
|
||||||
@@ -739,7 +740,8 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
|
|||||||
|
|
||||||
offset += bit.header_size;
|
offset += bit.header_size;
|
||||||
for (i = 0; i < bit.token_entries; i++) {
|
for (i = 0; i < bit.token_entries; i++) {
|
||||||
memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token));
|
(void) memcpy(&bit_token, &g->bios.data[offset],
|
||||||
|
sizeof(bit_token));
|
||||||
|
|
||||||
nvgpu_log_info(g, "BIT token id %d ptr %d size %d ver %d",
|
nvgpu_log_info(g, "BIT token id %d ptr %d size %d ver %d",
|
||||||
bit_token.token_id, bit_token.data_ptr,
|
bit_token.token_id, bit_token.data_ptr,
|
||||||
|
|||||||
Reference in New Issue
Block a user