mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Fix MISRA rule 8.3 violations
MISRA rule 8.3 requires that all declarations of a function shall use the same parameter names and type qualifiers. There are cases where the parameter names do not match between function prototype and declaration. This patch will fix some of these violations by renaming the prototype parameter. JIRA NVGPU-847 Change-Id: I980ca7ba8adc853de9c1b6f6c7e7b3e4ac12f88e Signed-off-by: smadhavan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1926980 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
74c678f4b8
commit
503b897b45
@@ -130,8 +130,8 @@ int clk_domain_get_f_or_v(
|
||||
u32 *pvoltuv,
|
||||
u8 railidx
|
||||
);
|
||||
int clk_get_fll_clks(struct gk20a *g, struct set_fll_clk *fllclk);
|
||||
int clk_set_fll_clks(struct gk20a *g, struct set_fll_clk *fllclk);
|
||||
int clk_get_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk);
|
||||
int clk_set_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk);
|
||||
int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx);
|
||||
u32 nvgpu_clk_vf_change_inject_data_fill_gv10x(struct gk20a *g,
|
||||
struct nv_pmu_clk_rpc *rpccall,
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs);
|
||||
|
||||
static int devinit_get_clocks_table(struct gk20a *g,
|
||||
struct clk_domains *pdomainobjs);
|
||||
struct clk_domains *pclkdomainobjs);
|
||||
|
||||
static int clk_domain_pmudatainit_super(struct gk20a *g, struct boardobj
|
||||
*board_obj_ptr, struct nv_pmu_boardobj *ppmudata);
|
||||
|
||||
@@ -26,6 +26,6 @@
|
||||
struct gk20a;
|
||||
struct nvgpu_mem;
|
||||
|
||||
int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst);
|
||||
int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -50,8 +50,8 @@
|
||||
|
||||
#include "gk20a/fence_gk20a.h"
|
||||
|
||||
static void free_channel(struct fifo_gk20a *f, struct channel_gk20a *c);
|
||||
static void gk20a_channel_dump_ref_actions(struct channel_gk20a *c);
|
||||
static void free_channel(struct fifo_gk20a *f, struct channel_gk20a *ch);
|
||||
static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch);
|
||||
|
||||
static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c);
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
|
||||
int gk20a_init_mm_setup_hw(struct gk20a *g);
|
||||
|
||||
u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
u64 map_offset,
|
||||
u64 vaddr,
|
||||
struct nvgpu_sgt *sgt,
|
||||
u64 buffer_offset,
|
||||
u64 size,
|
||||
|
||||
@@ -233,20 +233,20 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
||||
*/
|
||||
int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
||||
const char *name, u64 base, u64 length,
|
||||
u64 struct_size, u64 flags);
|
||||
u64 blk_size, u64 flags);
|
||||
|
||||
#define GPU_BALLOC_MAX_ORDER 31U
|
||||
|
||||
/*
|
||||
* Allocator APIs.
|
||||
*/
|
||||
u64 nvgpu_alloc(struct nvgpu_allocator *allocator, u64 len);
|
||||
u64 nvgpu_alloc(struct nvgpu_allocator *a, u64 len);
|
||||
u64 nvgpu_alloc_pte(struct nvgpu_allocator *a, u64 len, u32 page_size);
|
||||
void nvgpu_free(struct nvgpu_allocator *allocator, u64 addr);
|
||||
void nvgpu_free(struct nvgpu_allocator *a, u64 addr);
|
||||
|
||||
u64 nvgpu_alloc_fixed(struct nvgpu_allocator *allocator, u64 base, u64 len,
|
||||
u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len,
|
||||
u32 page_size);
|
||||
void nvgpu_free_fixed(struct nvgpu_allocator *allocator, u64 base, u64 len);
|
||||
void nvgpu_free_fixed(struct nvgpu_allocator *a, u64 base, u64 len);
|
||||
|
||||
int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a,
|
||||
struct nvgpu_alloc_carveout *co);
|
||||
@@ -259,7 +259,7 @@ u64 nvgpu_alloc_end(struct nvgpu_allocator *a);
|
||||
bool nvgpu_alloc_initialized(struct nvgpu_allocator *a);
|
||||
u64 nvgpu_alloc_space(struct nvgpu_allocator *a);
|
||||
|
||||
void nvgpu_alloc_destroy(struct nvgpu_allocator *allocator);
|
||||
void nvgpu_alloc_destroy(struct nvgpu_allocator *a);
|
||||
|
||||
#ifdef __KERNEL__
|
||||
void nvgpu_alloc_print_stats(struct nvgpu_allocator *a,
|
||||
|
||||
@@ -440,7 +440,7 @@ boardobjgrp_from_node(struct nvgpu_list_node *node)
|
||||
|
||||
int is_boardobjgrp_pmucmd_id_valid_v0(struct gk20a *g,
|
||||
struct boardobjgrp *pboardobjgrp,
|
||||
struct boardobjgrp_pmu_cmd *cmd);
|
||||
struct boardobjgrp_pmu_cmd *pcmd);
|
||||
int is_boardobjgrp_pmucmd_id_valid_v1(struct gk20a *g,
|
||||
struct boardobjgrp *pboardobjgrp,
|
||||
struct boardobjgrp_pmu_cmd *cmd);
|
||||
|
||||
@@ -367,8 +367,8 @@ void gk20a_disable_channel(struct channel_gk20a *ch);
|
||||
void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt);
|
||||
void gk20a_channel_abort_clean_up(struct channel_gk20a *ch);
|
||||
void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events);
|
||||
int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 size,
|
||||
struct priv_cmd_entry *entry);
|
||||
int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
|
||||
struct priv_cmd_entry *e);
|
||||
int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e);
|
||||
|
||||
int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch);
|
||||
|
||||
@@ -115,7 +115,7 @@ dbg_profiler_object_data_from_prof_obj_entry(struct nvgpu_list_node *node)
|
||||
};
|
||||
|
||||
/* used by the interrupt handler to post events */
|
||||
void nvgpu_dbg_gpu_post_events(struct channel_gk20a *fault_ch);
|
||||
void nvgpu_dbg_gpu_post_events(struct channel_gk20a *ch);
|
||||
|
||||
bool nvgpu_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch);
|
||||
int nvgpu_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch);
|
||||
|
||||
@@ -213,7 +213,7 @@ void nvgpu_init_mm_ce_context(struct gk20a *g);
|
||||
int nvgpu_init_mm_support(struct gk20a *g);
|
||||
int nvgpu_init_mm_setup_hw(struct gk20a *g);
|
||||
|
||||
u64 nvgpu_inst_block_addr(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
u64 nvgpu_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
void nvgpu_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
|
||||
int nvgpu_mm_suspend(struct gk20a *g);
|
||||
|
||||
@@ -508,8 +508,8 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
|
||||
|
||||
/* PMU RPC */
|
||||
int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
|
||||
u16 size_rpc, u16 size_scratch, pmu_callback callback, void *cb_param,
|
||||
bool is_copy_back);
|
||||
u16 size_rpc, u16 size_scratch, pmu_callback caller_cb,
|
||||
void *caller_cb_param, bool is_copy_back);
|
||||
|
||||
/* PMU wait*/
|
||||
int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
|
||||
|
||||
@@ -30,9 +30,9 @@ struct mm_gk20a;
|
||||
struct nvgpu_mem;
|
||||
|
||||
|
||||
void nvgpu_pramin_rd_n(struct gk20a *g, struct nvgpu_mem *mem, u32 start, u32 words, void *dest);
|
||||
void nvgpu_pramin_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 start, u32 words, void *src);
|
||||
void nvgpu_pramin_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 start, u32 words, u32 w);
|
||||
void nvgpu_pramin_rd_n(struct gk20a *g, struct nvgpu_mem *mem, u32 start, u32 size, void *dest);
|
||||
void nvgpu_pramin_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 start, u32 size, void *src);
|
||||
void nvgpu_pramin_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 start, u32 size, u32 w);
|
||||
|
||||
void nvgpu_init_pramin(struct mm_gk20a *mm);
|
||||
|
||||
|
||||
@@ -160,7 +160,7 @@ struct nvgpu_semaphore_sea {
|
||||
/*
|
||||
* Semaphore sea functions.
|
||||
*/
|
||||
struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *gk20a);
|
||||
struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g);
|
||||
void nvgpu_semaphore_sea_destroy(struct gk20a *g);
|
||||
int nvgpu_semaphore_sea_map(struct nvgpu_semaphore_pool *sea,
|
||||
struct vm_gk20a *vm);
|
||||
@@ -173,9 +173,9 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_get_sea(struct gk20a *g);
|
||||
*/
|
||||
int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
|
||||
struct nvgpu_semaphore_pool **pool);
|
||||
int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *pool,
|
||||
int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
|
||||
struct vm_gk20a *vm);
|
||||
void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *pool,
|
||||
void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
|
||||
struct vm_gk20a *vm);
|
||||
u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global);
|
||||
void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p);
|
||||
|
||||
@@ -225,12 +225,12 @@ bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size);
|
||||
int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm);
|
||||
|
||||
/* batching eliminates redundant cache flushes and invalidates */
|
||||
void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *batch);
|
||||
void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch);
|
||||
void nvgpu_vm_mapping_batch_finish(
|
||||
struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *batch);
|
||||
struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *mapping_batch);
|
||||
/* called when holding vm->update_gmmu_lock */
|
||||
void nvgpu_vm_mapping_batch_finish_locked(
|
||||
struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *batch);
|
||||
struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *mapping_batch);
|
||||
|
||||
/* get reference to all currently mapped buffers */
|
||||
int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
|
||||
|
||||
@@ -69,7 +69,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr);
|
||||
|
||||
struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr);
|
||||
int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
|
||||
u64 map_offset, u64 map_size, u32 pgsz_idx,
|
||||
u64 map_addr, u64 map_size, u32 pgsz_idx,
|
||||
struct nvgpu_vm_area **pvm_area);
|
||||
|
||||
#endif /* NVGPU_VM_AREA_H */
|
||||
|
||||
@@ -198,32 +198,32 @@ static u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
|
||||
}
|
||||
|
||||
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
|
||||
int __event_id)
|
||||
int event_id)
|
||||
{
|
||||
struct gk20a_event_id_data *event_id_data;
|
||||
u32 event_id;
|
||||
struct gk20a_event_id_data *channel_event_id_data;
|
||||
u32 channel_event_id;
|
||||
int err = 0;
|
||||
struct gk20a *g = tsg->g;
|
||||
|
||||
event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
|
||||
channel_event_id = nvgpu_event_id_to_ioctl_channel_event_id(event_id);
|
||||
if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
|
||||
return;
|
||||
|
||||
err = gk20a_tsg_get_event_data_from_id(tsg, event_id,
|
||||
&event_id_data);
|
||||
err = gk20a_tsg_get_event_data_from_id(tsg, channel_event_id,
|
||||
&channel_event_id_data);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
nvgpu_mutex_acquire(&event_id_data->lock);
|
||||
nvgpu_mutex_acquire(&channel_event_id_data->lock);
|
||||
|
||||
nvgpu_log_info(g,
|
||||
"posting event for event_id=%d on tsg=%d\n",
|
||||
event_id, tsg->tsgid);
|
||||
event_id_data->event_posted = true;
|
||||
channel_event_id, tsg->tsgid);
|
||||
channel_event_id_data->event_posted = true;
|
||||
|
||||
nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
|
||||
nvgpu_cond_broadcast_interruptible(&channel_event_id_data->event_id_wq);
|
||||
|
||||
nvgpu_mutex_release(&event_id_data->lock);
|
||||
nvgpu_mutex_release(&channel_event_id_data->lock);
|
||||
}
|
||||
|
||||
static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
|
||||
|
||||
@@ -23,6 +23,6 @@
|
||||
#include <nvgpu/tsg.h>
|
||||
|
||||
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
|
||||
int __event_id)
|
||||
int channel_event_id)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
|
||||
static struct vfe_equ *construct_vfe_equ(struct gk20a *g, void *pargs);
|
||||
static int devinit_get_vfe_equ_table(struct gk20a *g,
|
||||
struct vfe_equs *pequobjs);
|
||||
struct vfe_equs *pvfeequobjs);
|
||||
|
||||
static int _vfe_equs_pmudatainit(struct gk20a *g,
|
||||
struct boardobjgrp *pboardobjgrp,
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#include "vfe_var.h"
|
||||
|
||||
static int devinit_get_vfe_var_table(struct gk20a *g,
|
||||
struct vfe_vars *pvarobjs);
|
||||
struct vfe_vars *pvfevarobjs);
|
||||
static int vfe_var_construct_single(struct gk20a *g,
|
||||
struct boardobj **ppboardobj,
|
||||
u16 size, void *pargs);
|
||||
|
||||
Reference in New Issue
Block a user