diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c index 8599da288..1b5e2b563 100644 --- a/drivers/gpu/nvgpu/common/as.c +++ b/drivers/gpu/nvgpu/common/as.c @@ -73,7 +73,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, } } - snprintf(name, sizeof(name), "as_%d", as_share->id); + (void) snprintf(name, sizeof(name), "as_%d", as_share->id); vm = nvgpu_vm_init(g, big_page_size, U64(big_page_size) << U64(10), diff --git a/drivers/gpu/nvgpu/common/boardobj/boardobjgrp.c b/drivers/gpu/nvgpu/common/boardobj/boardobjgrp.c index b2f140723..3bcda968a 100644 --- a/drivers/gpu/nvgpu/common/boardobj/boardobjgrp.c +++ b/drivers/gpu/nvgpu/common/boardobj/boardobjgrp.c @@ -500,7 +500,7 @@ int boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp) } /* Initialize PMU buffer with BOARDOBJGRP data. */ - memset(pcmd->buf, 0x0, pcmd->fbsize); + (void) memset(pcmd->buf, 0x0, pcmd->fbsize); status = pboardobjgrp->pmudatainit(g, pboardobjgrp, pcmd->buf); if (status != 0) { @@ -559,7 +559,7 @@ int boardobjgrp_pmuset_impl_v1(struct gk20a *g, } /* Initialize PMU buffer with BOARDOBJGRP data. */ - memset(pcmd->buf, 0x0, pcmd->fbsize); + (void) memset(pcmd->buf, 0x0, pcmd->fbsize); status = pboardobjgrp->pmudatainit(g, pboardobjgrp, pcmd->buf); if (status != 0) { @@ -643,7 +643,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp, * retrieve status */ - memset(pcmd->buf, 0x0, pcmd->fbsize); + (void) memset(pcmd->buf, 0x0, pcmd->fbsize); status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp, pcmd->buf, mask); if (status != 0) { @@ -700,7 +700,7 @@ boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjg * Initialize PMU buffer with the mask of * BOARDOBJGRPs for which to retrieve status */ - memset(pcmd->buf, 0x0, pcmd->fbsize); + (void) memset(pcmd->buf, 0x0, pcmd->fbsize); status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp, pcmd->buf, mask); if (status != 0) { @@ -953,9 +953,9 @@ static int boardobjgrp_pmucmdsend(struct gk20a *g, nvgpu_log_info(g, " "); - memset(&payload, 0, sizeof(payload)); - memset(&handlerparams, 0, sizeof(handlerparams)); - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&payload, 0, sizeof(payload)); + (void) memset(&handlerparams, 0, sizeof(handlerparams)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = pboardobjgrp->pmu.unitid; cmd.hdr.size = sizeof(struct nv_pmu_boardobj_cmd_grp) + sizeof(struct pmu_hdr); @@ -1022,7 +1022,8 @@ static int boardobjgrp_pmucmdsend_rpc(struct gk20a *g, nvgpu_log_fn(g, " "); - memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_board_obj_grp_cmd)); + (void) memset(&rpc, 0, + sizeof(struct nv_pmu_rpc_struct_board_obj_grp_cmd)); rpc.class_id = pboardobjgrp->pmu.classid; rpc.command_id = copy_out ? diff --git a/drivers/gpu/nvgpu/common/ecc.c b/drivers/gpu/nvgpu/common/ecc.c index 81ad37ae3..47ddfae16 100644 --- a/drivers/gpu/nvgpu/common/ecc.c +++ b/drivers/gpu/nvgpu/common/ecc.c @@ -71,7 +71,7 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g, for (gpc = 0; gpc < gr->gpc_count; gpc++) { for (tpc = 0; tpc < gr->gpc_tpc_count[gpc]; tpc++) { - snprintf(stats[gpc][tpc].name, + (void) snprintf(stats[gpc][tpc].name, NVGPU_ECC_STAT_NAME_MAX_SIZE, "gpc%d_tpc%d_%s", gpc, tpc, name); nvgpu_ecc_stat_add(g, &stats[gpc][tpc]); @@ -94,7 +94,7 @@ int nvgpu_ecc_counter_init_per_gpc(struct gk20a *g, return -ENOMEM; } for (gpc = 0; gpc < gr->gpc_count; gpc++) { - snprintf(stats[gpc].name, NVGPU_ECC_STAT_NAME_MAX_SIZE, + (void) snprintf(stats[gpc].name, NVGPU_ECC_STAT_NAME_MAX_SIZE, "gpc%d_%s", gpc, name); nvgpu_ecc_stat_add(g, &stats[gpc]); } @@ -151,7 +151,7 @@ int nvgpu_ecc_counter_init_per_lts(struct gk20a *g, for (ltc = 0; ltc < g->ltc_count; ltc++) { for (lts = 0; lts < gr->slices_per_ltc; lts++) { - snprintf(stats[ltc][lts].name, + (void) snprintf(stats[ltc][lts].name, NVGPU_ECC_STAT_NAME_MAX_SIZE, "ltc%d_lts%d_%s", ltc, lts, name); nvgpu_ecc_stat_add(g, &stats[ltc][lts]); @@ -175,7 +175,7 @@ int nvgpu_ecc_counter_init_per_fbpa(struct gk20a *g, } for (i = 0; i < num_fbpa; i++) { - snprintf(stats[i].name, NVGPU_ECC_STAT_NAME_MAX_SIZE, + (void) snprintf(stats[i].name, NVGPU_ECC_STAT_NAME_MAX_SIZE, "fbpa%d_%s", i, name); nvgpu_ecc_stat_add(g, &stats[i]); } diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c index 1d0a7a6cb..084136907 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c +++ b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c @@ -469,7 +469,7 @@ void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn, nvgpu_mutex_destroy(&queue->mutex); /* clear data*/ - memset(queue, 0, sizeof(struct nvgpu_falcon_queue)); + (void) memset(queue, 0, sizeof(struct nvgpu_falcon_queue)); } int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn, diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c index 3f99dc3e9..8df11ff83 100644 --- a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c +++ b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c @@ -742,7 +742,7 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g, u32 chid = FIFO_INVAL_CHANNEL_ID; struct channel_gk20a *refch; - memset(mmfault, 0, sizeof(*mmfault)); + (void) memset(mmfault, 0, sizeof(*mmfault)); rd32_val = nvgpu_mem_rd32(g, mem, offset + gmmu_fault_buf_entry_inst_lo_w()); @@ -1086,7 +1086,7 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g, int chid = FIFO_INVAL_CHANNEL_ID; struct channel_gk20a *refch; - memset(mmfault, 0, sizeof(*mmfault)); + (void) memset(mmfault, 0, sizeof(*mmfault)); if ((fault_status & fb_mmu_fault_status_valid_set_f()) == 0U) { diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c index eea371d0b..670ff7147 100644 --- a/drivers/gpu/nvgpu/common/fifo/channel.c +++ b/drivers/gpu/nvgpu/common/fifo/channel.c @@ -408,7 +408,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem); nvgpu_big_free(g, ch->gpfifo.pipe); - memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); + (void) memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); channel_gk20a_free_priv_cmdbuf(ch); @@ -495,7 +495,7 @@ unbind: } #if GK20A_CHANNEL_REFCOUNT_TRACKING - memset(ch->ref_actions, 0, sizeof(ch->ref_actions)); + (void) memset(ch->ref_actions, 0, sizeof(ch->ref_actions)); ch->ref_actions_put = 0; #endif @@ -804,7 +804,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c) nvgpu_dma_unmap_free(ch_vm, &q->mem); - memset(q, 0, sizeof(struct priv_cmd_queue)); + (void) memset(q, 0, sizeof(struct priv_cmd_queue)); } /* allocate a cmd buffer with given size. size is number of u32 entries */ @@ -875,7 +875,7 @@ void free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e) { if (channel_gk20a_is_prealloc_enabled(c)) { - memset(e, 0, sizeof(struct priv_cmd_entry)); + (void) memset(e, 0, sizeof(struct priv_cmd_entry)); } else { nvgpu_kfree(c->g, e); } @@ -926,7 +926,7 @@ void channel_gk20a_free_job(struct channel_gk20a *c, if (channel_gk20a_is_prealloc_enabled(c)) { struct priv_cmd_entry *wait_cmd = job->wait_cmd; struct priv_cmd_entry *incr_cmd = job->incr_cmd; - memset(job, 0, sizeof(*job)); + (void) memset(job, 0, sizeof(*job)); job->wait_cmd = wait_cmd; job->incr_cmd = incr_cmd; } else { @@ -1088,7 +1088,7 @@ clean_up_priv_cmd: clean_up_joblist: nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs); clean_up: - memset(&c->joblist.pre_alloc, 0, sizeof(c->joblist.pre_alloc)); + (void) memset(&c->joblist.pre_alloc, 0, sizeof(c->joblist.pre_alloc)); return err; } @@ -1286,7 +1286,7 @@ clean_up_unmap: c->usermode_submit_enabled = false; } clean_up: - memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); + (void) memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); clean_up_idle: if (c->deterministic) { nvgpu_rwsem_down_read(&g->deterministic_busy); @@ -1741,7 +1741,7 @@ static int __nvgpu_channel_worker_start(struct gk20a *g) return err; } - snprintf(thread_name, sizeof(thread_name), + (void) snprintf(thread_name, sizeof(thread_name), "nvgpu_channel_poll_%s", g->name); err = nvgpu_thread_create(&g->channel_worker.poll_task, g, diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c index 702e20e81..43253c4bb 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c @@ -133,7 +133,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a, return NULL; } - memset(new_buddy, 0, sizeof(struct nvgpu_buddy)); + (void) memset(new_buddy, 0, sizeof(struct nvgpu_buddy)); new_buddy->parent = parent; new_buddy->start = start; diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c index 11ddad2c1..0e2749712 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c @@ -131,7 +131,7 @@ void nvgpu_alloc_destroy(struct nvgpu_allocator *a) { a->ops->fini(a); nvgpu_mutex_destroy(&a->lock); - memset(a, 0, sizeof(*a)); + (void) memset(a, 0, sizeof(*a)); } #ifdef __KERNEL__ @@ -173,7 +173,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, a->priv = priv; a->debug = dbg; - strncpy(a->name, name, sizeof(a->name)); + (void) strncpy(a->name, name, sizeof(a->name)); a->name[sizeof(a->name) - 1U] = '\0'; return 0; diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c index 1ddaa9a00..e542c757b 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c @@ -240,7 +240,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, u8 *src = (u8 *)mem->cpu_va + offset; WARN_ON(mem->cpu_va == NULL); - memcpy(dest, src, size); + (void) memcpy(dest, src, size); } else if (mem->aperture == APERTURE_VIDMEM) { nvgpu_pramin_rd_n(g, mem, offset, size, dest); } else { @@ -281,7 +281,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u8 *dest = (u8 *)mem->cpu_va + offset; WARN_ON(mem->cpu_va == NULL); - memcpy(dest, src, size); + (void) memcpy(dest, src, size); } else if (mem->aperture == APERTURE_VIDMEM) { nvgpu_pramin_wr_n(g, mem, offset, size, src); if (!mem->skip_wmb) { @@ -305,7 +305,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u8 *dest = (u8 *)mem->cpu_va + offset; WARN_ON(mem->cpu_va == NULL); - memset(dest, c, size); + (void) memset(dest, c, size); } else if (mem->aperture == APERTURE_VIDMEM) { u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index 6c7cb96e4..c9fa929b9 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c @@ -294,7 +294,7 @@ static struct page_alloc_slab_page *alloc_slab_page( return NULL; } - memset(slab_page, 0, sizeof(*slab_page)); + (void) memset(slab_page, 0, sizeof(*slab_page)); slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); if (slab_page->page_addr == 0ULL) { @@ -535,7 +535,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages( goto fail; } - memset(alloc, 0, sizeof(*alloc)); + (void) memset(alloc, 0, sizeof(*alloc)); alloc->length = pages << a->page_shift; alloc->sgt.ops = &page_alloc_sgl_ops; @@ -1060,7 +1060,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } } - snprintf(buddy_name, sizeof(buddy_name), "%s-src", name); + (void) snprintf(buddy_name, sizeof(buddy_name), "%s-src", name); err = nvgpu_buddy_allocator_init(g, &a->source_allocator, NULL, buddy_name, base, length, blk_size, diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 64ab4a9a7..1f93d6928 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -169,7 +169,7 @@ void __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx) void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch) { - memset(mapping_batch, 0, sizeof(*mapping_batch)); + (void) memset(mapping_batch, 0, sizeof(*mapping_batch)); mapping_batch->gpu_l2_flushed = false; mapping_batch->need_tlb_invalidate = false; } @@ -337,7 +337,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, #endif /* Initialize the page table data structures. */ - strncpy(vm->name, name, min(strlen(name), sizeof(vm->name))); + (void) strncpy(vm->name, name, min(strlen(name), sizeof(vm->name))); err = nvgpu_gmmu_init_page_table(vm); if (err != 0) { goto clean_up_vgpu_vm; @@ -418,7 +418,8 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, * User VMA. */ if (user_vma_start < user_vma_limit) { - snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", name); + (void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", + name); err = nvgpu_buddy_allocator_init(g, &vm->user, vm, alloc_name, user_vma_start, @@ -444,7 +445,8 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, * User VMA for large pages when a split address range is used. */ if (user_lp_vma_start < user_lp_vma_limit) { - snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s_lp", name); + (void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s_lp", + name); err = nvgpu_buddy_allocator_init(g, &vm->user_lp, vm, alloc_name, user_lp_vma_start, @@ -461,7 +463,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, /* * Kernel VMA. Must always exist for an address space. */ - snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-sys", name); + (void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-sys", name); err = nvgpu_buddy_allocator_init(g, &vm->kernel, vm, alloc_name, kernel_vma_start, diff --git a/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c b/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c index 89c2e6db7..a77015edd 100644 --- a/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c +++ b/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c @@ -136,7 +136,7 @@ int nvgpu_css_enable_snapshot(struct channel_gk20a *ch, data->hw_end = data->hw_snapshot + snapshot_size / sizeof(struct gk20a_cs_snapshot_fifo_entry); data->hw_get = data->hw_snapshot; - memset(data->hw_snapshot, 0xff, snapshot_size); + (void) memset(data->hw_snapshot, 0xff, snapshot_size); g->ops.perf.membuf_reset_streaming(g); g->ops.perf.enable_membuf(g, snapshot_size, data->hw_memdesc.gpu_va, @@ -149,7 +149,7 @@ int nvgpu_css_enable_snapshot(struct channel_gk20a *ch, failed_allocation: if (data->hw_memdesc.size) { nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc); - memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); + (void) memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); } data->hw_snapshot = NULL; @@ -168,7 +168,7 @@ void nvgpu_css_disable_snapshot(struct gr_gk20a *gr) g->ops.perf.disable_membuf(g); nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc); - memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); + (void) memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); data->hw_snapshot = NULL; nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n"); @@ -320,11 +320,12 @@ next_hw_fifo_entry: /* re-set HW buffer after processing taking wrapping into account */ if (css->hw_get < src) { - memset(css->hw_get, 0xff, (src - css->hw_get) * sizeof(*src)); + (void) memset(css->hw_get, 0xff, + (src - css->hw_get) * sizeof(*src)); } else { - memset(css->hw_snapshot, 0xff, + (void) memset(css->hw_snapshot, 0xff, (src - css->hw_snapshot) * sizeof(*src)); - memset(css->hw_get, 0xff, + (void) memset(css->hw_get, 0xff, (css->hw_end - css->hw_get) * sizeof(*src)); } gr->cs_data->hw_get = src; @@ -408,7 +409,7 @@ static int css_gr_create_client_data(struct gk20a *g, * guest side */ if (cur->snapshot) { - memset(cur->snapshot, 0, sizeof(*cur->snapshot)); + (void) memset(cur->snapshot, 0, sizeof(*cur->snapshot)); cur->snapshot->start = sizeof(*cur->snapshot); /* we should be ensure that can fit all fifo entries here */ cur->snapshot->end = diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index 751f44553..c36b9f3f0 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -136,7 +136,7 @@ static int nvgpu_init_task_pg_init(struct gk20a *g) nvgpu_cond_init(&pmu->pg_init.wq); - snprintf(thread_name, sizeof(thread_name), + (void) snprintf(thread_name, sizeof(thread_name), "nvgpu_pg_init_%s", g->name); err = nvgpu_thread_create(&pmu->pg_init.state_task, g, @@ -411,7 +411,7 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu, BUG_ON(sizeof(pmu->gid_info.gid) != sizeof(gid_data.gid)); - memcpy(pmu->gid_info.gid, gid_data.gid, + (void) memcpy(pmu->gid_info.gid, gid_data.gid, sizeof(pmu->gid_info.gid)); } } @@ -658,7 +658,7 @@ int nvgpu_pmu_super_surface_alloc(struct gk20a *g, void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) { nvgpu_dma_free(g, mem); - memset(mem, 0, sizeof(struct nvgpu_mem)); + (void) memset(mem, 0, sizeof(struct nvgpu_mem)); } struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu) diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index d83c079dc..bb86c216d 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c @@ -33,9 +33,9 @@ void nvgpu_pmu_seq_init(struct nvgpu_pmu *pmu) { u32 i; - memset(pmu->seq, 0, + (void) memset(pmu->seq, 0, sizeof(struct pmu_sequence) * PMU_MAX_NUM_SEQUENCES); - memset(pmu->pmu_seq_tbl, 0, + (void) memset(pmu->pmu_seq_tbl, 0, sizeof(pmu->pmu_seq_tbl)); for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) { @@ -527,7 +527,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, } else if (seq->state != PMU_SEQ_STATE_CANCELLED) { if (seq->msg) { if (seq->msg->hdr.size >= msg->hdr.size) { - memcpy(seq->msg, msg, msg->hdr.size); + (void) memcpy(seq->msg, msg, msg->hdr.size); } else { nvgpu_err(g, "sequence %d msg buffer too small", seq->id); @@ -559,7 +559,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, } if (seq->out_mem != NULL) { - memset(pv->pmu_allocation_get_fb_addr(pmu, + (void) memset(pv->pmu_allocation_get_fb_addr(pmu, pv->get_pmu_seq_out_a_ptr(seq)), 0x0, pv->pmu_allocation_get_fb_size(pmu, pv->get_pmu_seq_out_a_ptr(seq))); @@ -573,7 +573,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, } if (seq->in_mem != NULL) { - memset(pv->pmu_allocation_get_fb_addr(pmu, + (void) memset(pv->pmu_allocation_get_fb_addr(pmu, pv->get_pmu_seq_in_a_ptr(seq)), 0x0, pv->pmu_allocation_get_fb_size(pmu, pv->get_pmu_seq_in_a_ptr(seq))); @@ -766,8 +766,9 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg, (struct rpc_handler_payload *)param; struct nv_pmu_rpc_struct_perfmon_query *rpc_param; - memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header)); - memcpy(&rpc, rpc_payload->rpc_buff, sizeof(struct nv_pmu_rpc_header)); + (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header)); + (void) memcpy(&rpc, rpc_payload->rpc_buff, + sizeof(struct nv_pmu_rpc_header)); if (rpc.flcn_status) { nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x", @@ -922,15 +923,15 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, } rpc_buff = rpc_payload->rpc_buff; - memset(&cmd, 0, sizeof(struct pmu_cmd)); - memset(&payload, 0, sizeof(struct pmu_payload)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&payload, 0, sizeof(struct pmu_payload)); cmd.hdr.unit_id = rpc->unit_id; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct nv_pmu_rpc_cmd); cmd.cmd.rpc.cmd_type = NV_PMU_RPC_CMD_ID; cmd.cmd.rpc.flags = rpc->flags; - memcpy(rpc_buff, rpc, size_rpc); + (void) memcpy(rpc_buff, rpc, size_rpc); payload.rpc.prpc = rpc_buff; payload.rpc.size_rpc = size_rpc; payload.rpc.size_scratch = size_scratch; @@ -954,7 +955,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), &rpc_payload->complete, true); /* copy back data to caller */ - memcpy(rpc, rpc_buff, size_rpc); + (void) memcpy(rpc, rpc_buff, size_rpc); /* free allocated memory */ nvgpu_kfree(g, rpc_payload); } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c index a72f5eb0c..007ccc22b 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c @@ -84,7 +84,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) } /* init PERFMON */ - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = get_perfmon_id(pmu); if (cmd.hdr.unit_id == PMU_UNIT_INVALID) { @@ -115,7 +115,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) */ pv->perfmon_cmd_init_set_mov_avg(&cmd.cmd.perfmon, 17); - memset(&payload, 0, sizeof(struct pmu_payload)); + (void) memset(&payload, 0, sizeof(struct pmu_payload)); payload.in.buf = pv->get_perfmon_cntr_ptr(pmu); payload.in.size = pv->get_perfmon_cntr_sz(pmu); payload.in.offset = pv->get_perfmon_cmd_init_offsetofvar(COUNTER_ALLOC); @@ -140,7 +140,7 @@ int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu) } /* PERFMON Start */ - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = get_perfmon_id(pmu); if (cmd.hdr.unit_id == PMU_UNIT_INVALID) { nvgpu_err(g, "failed to get perfmon UNIT ID, command skipped"); @@ -159,7 +159,7 @@ int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu) PMU_PERFMON_FLAG_ENABLE_DECREASE | PMU_PERFMON_FLAG_CLEAR_PREV); - memset(&payload, 0, sizeof(struct pmu_payload)); + (void) memset(&payload, 0, sizeof(struct pmu_payload)); /* TBD: PMU_PERFMON_PCT_TO_INC * 100 */ pv->set_perfmon_cntr_ut(pmu, 3000); /* 30% */ @@ -190,7 +190,7 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu) } /* PERFMON Stop */ - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = get_perfmon_id(pmu); if (cmd.hdr.unit_id == PMU_UNIT_INVALID) { nvgpu_err(g, "failed to get perfmon UNIT ID, command skipped"); @@ -312,7 +312,7 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu) nvgpu_log_fn(g, " "); - memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_init)); + (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_init)); pmu->perfmon_ready = 0; g->ops.pmu.pmu_init_perfmon_counter(g); @@ -332,7 +332,7 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu) */ rpc.num_counters = 1; - memset(rpc.counter, 0, sizeof(struct pmu_perfmon_counter_v3) * + (void) memset(rpc.counter, 0, sizeof(struct pmu_perfmon_counter_v3) * NV_PMU_PERFMON_MAX_COUNTERS); /* Counter used to count GR busy cycles */ rpc.counter[0].index = 3; @@ -360,7 +360,7 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu) nvgpu_log_fn(g, " "); - memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_start)); + (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_start)); rpc.group_id = PMU_DOMAIN_GROUP_PSTATE; rpc.state_id = pmu->perfmon_state_id[PMU_DOMAIN_GROUP_PSTATE]; rpc.flags = PMU_PERFMON_FLAG_ENABLE_INCREASE | @@ -391,7 +391,7 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu) nvgpu_log_fn(g, " "); - memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_stop)); + (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_stop)); /* PERFMON Stop */ nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0); @@ -414,7 +414,7 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu) nvgpu_log_fn(g, " "); pmu->perfmon_query = 0; - memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_query)); + (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_query)); /* PERFMON QUERY */ nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c index f3720b4ab..268687386 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c @@ -152,7 +152,7 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) nvgpu_log_fn(g, " "); - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd); @@ -314,7 +314,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g) } if (BIT(pg_engine_id) & pg_engine_id_list) { - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd); @@ -400,7 +400,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) } /* init ELPG */ - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd); cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD; @@ -416,7 +416,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) /* alloc dmem for powergating state log */ pmu->stat_dmem_offset[pg_engine_id] = 0; - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat); cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT; @@ -440,7 +440,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; } - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd); cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD; @@ -534,7 +534,7 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g) gr_engine_id = gk20a_fifo_get_gr_engine_id(g); - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg); @@ -575,7 +575,7 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g) gr_engine_id = gk20a_fifo_get_gr_engine_id(g); - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg); @@ -664,7 +664,7 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g, u32 seq; pmu_callback p_callback = NULL; - memset(&cmd, 0, sizeof(struct pmu_cmd)); + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); /* Copy common members */ cmd.hdr.unit_id = PMU_UNIT_PG; @@ -685,7 +685,7 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g, nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL"); cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id = p_ap_cmd->init_and_enable_ctrl.ctrl_id; - memcpy( + (void) memcpy( (void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params), (void *)&(p_ap_cmd->init_and_enable_ctrl.params), sizeof(struct pmu_ap_ctrl_init_params)); diff --git a/drivers/gpu/nvgpu/common/sec2/sec2.c b/drivers/gpu/nvgpu/common/sec2/sec2.c index 965fde4f4..4b3c38fde 100644 --- a/drivers/gpu/nvgpu/common/sec2/sec2.c +++ b/drivers/gpu/nvgpu/common/sec2/sec2.c @@ -84,10 +84,10 @@ static void sec2_seq_init(struct nvgpu_sec2 *sec2) nvgpu_log_fn(sec2->g, " "); - memset(sec2->seq, 0, + (void) memset(sec2->seq, 0, sizeof(struct sec2_sequence) * SEC2_MAX_NUM_SEQUENCES); - memset(sec2->sec2_seq_tbl, 0, sizeof(sec2->sec2_seq_tbl)); + (void) memset(sec2->sec2_seq_tbl, 0, sizeof(sec2->sec2_seq_tbl)); for (i = 0; i < SEC2_MAX_NUM_SEQUENCES; i++) { sec2->seq[i].id = (u8)i; @@ -218,7 +218,7 @@ static void sec2_load_ls_falcons(struct gk20a *g, struct nvgpu_sec2 *sec2, nvgpu_log_fn(g, " "); /* send message to load falcon */ - memset(&cmd, 0, sizeof(struct nv_flcn_cmd_sec2)); + (void) memset(&cmd, 0, sizeof(struct nv_flcn_cmd_sec2)); cmd.hdr.unit_id = NV_SEC2_UNIT_ACR; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct nv_sec2_acr_cmd_bootstrap_falcon); diff --git a/drivers/gpu/nvgpu/common/sim.c b/drivers/gpu/nvgpu/common/sim.c index df27f7936..f361d10fa 100644 --- a/drivers/gpu/nvgpu/common/sim.c +++ b/drivers/gpu/nvgpu/common/sim.c @@ -41,7 +41,7 @@ void nvgpu_free_sim_buffer(struct gk20a *g, struct nvgpu_mem *mem) if (nvgpu_mem_is_valid(mem)) nvgpu_dma_free(g, mem); - memset(mem, 0, sizeof(*mem)); + (void) memset(mem, 0, sizeof(*mem)); } void nvgpu_free_sim_support(struct gk20a *g) @@ -233,7 +233,8 @@ static void nvgpu_sim_esc_readl(struct gk20a *g, err = issue_rpc_and_wait(g); if (err == 0) { - memcpy(data, sim_msg_param(g, data_offset), sizeof(u32)); + (void) memcpy(data, sim_msg_param(g, data_offset), + sizeof(u32)); } else { *data = 0xffffffff; WARN(1, "issue_rpc_and_wait failed err=%d", err); diff --git a/drivers/gpu/nvgpu/common/sim_pci.c b/drivers/gpu/nvgpu/common/sim_pci.c index 3c4c4f877..75f58e114 100644 --- a/drivers/gpu/nvgpu/common/sim_pci.c +++ b/drivers/gpu/nvgpu/common/sim_pci.c @@ -201,7 +201,8 @@ static void nvgpu_sim_esc_readl(struct gk20a *g, err = issue_rpc_and_wait(g); if (err == 0) { - memcpy(data, sim_msg_param(g, data_offset + 0xc), sizeof(u32)); + (void) memcpy(data, sim_msg_param(g, data_offset + 0xc), + sizeof(u32)); } else { *data = 0xffffffff; WARN(1, "issue_rpc_and_wait failed err=%d", err); diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c index 121c0af76..9620e2fc6 100644 --- a/drivers/gpu/nvgpu/common/vbios/bios.c +++ b/drivers/gpu/nvgpu/common/vbios/bios.c @@ -400,7 +400,8 @@ u32 nvgpu_bios_get_nvlink_config_data(struct gk20a *g) return -EINVAL; } - memcpy(&config, &g->bios.data[g->bios.nvlink_config_data_offset], + (void) memcpy(&config, + &g->bios.data[g->bios.nvlink_config_data_offset], sizeof(config)); if (config.version != NVLINK_CONFIG_DATA_HDR_VER_10) { @@ -475,7 +476,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset) struct application_interface_table_hdr_v1 hdr; int i; - memcpy((u8 *)&hdr, &g->bios.data[offset], sizeof(hdr)); + nvgpu_memcpy((u8 *)&hdr, &g->bios.data[offset], sizeof(hdr)); nvgpu_log_fn(g, "appInfoHdr ver %d size %d entrySize %d entryCount %d", hdr.version, hdr.header_size, @@ -731,7 +732,7 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset) int i; nvgpu_log_fn(g, " "); - memcpy(&bit, &g->bios.data[offset], sizeof(bit)); + (void) memcpy(&bit, &g->bios.data[offset], sizeof(bit)); nvgpu_log_info(g, "BIT header: %04x %08x", bit.id, bit.signature); nvgpu_log_info(g, "tokens: %d entries * %d bytes", @@ -739,7 +740,8 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset) offset += bit.header_size; for (i = 0; i < bit.token_entries; i++) { - memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token)); + (void) memcpy(&bit_token, &g->bios.data[offset], + sizeof(bit_token)); nvgpu_log_info(g, "BIT token id %d ptr %d size %d ver %d", bit_token.token_id, bit_token.data_ptr,