mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: MISRA 10.3-Conversions to/from an enum
Fix violations where the conversion is from a non-enum type to enum type or vice-versa. JIRA NVGPU-659 Change-Id: I45f43c907b810cc86b2a4480809d0c6757ed3486 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1802322 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
05f45bcfc3
commit
da43fc5560
@@ -35,6 +35,7 @@
|
||||
|
||||
struct clk_domains;
|
||||
struct clk_domain;
|
||||
enum nv_pmu_clk_clkwhich;
|
||||
|
||||
/*data and function definition to talk to driver*/
|
||||
u32 clk_domain_sw_setup(struct gk20a *g);
|
||||
@@ -78,7 +79,7 @@ struct clk_domain {
|
||||
struct boardobj super;
|
||||
u32 api_domain;
|
||||
u32 part_mask;
|
||||
u8 domain;
|
||||
enum nv_pmu_clk_clkwhich domain;
|
||||
u8 perf_domain_index;
|
||||
u8 perf_domain_grp_idx;
|
||||
u8 ratio_domain;
|
||||
|
||||
@@ -70,7 +70,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
|
||||
u64 addr,
|
||||
u64 size,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool priv,
|
||||
enum nvgpu_aperture aperture)
|
||||
{
|
||||
@@ -137,7 +137,7 @@ u64 nvgpu_gmmu_map(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *mem,
|
||||
u64 size,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool priv,
|
||||
enum nvgpu_aperture aperture)
|
||||
{
|
||||
@@ -153,7 +153,7 @@ u64 nvgpu_gmmu_map_fixed(struct vm_gk20a *vm,
|
||||
u64 addr,
|
||||
u64 size,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool priv,
|
||||
enum nvgpu_aperture aperture)
|
||||
{
|
||||
@@ -680,7 +680,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
u8 kind_v,
|
||||
u32 ctag_offset,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool clear_ctags,
|
||||
bool sparse,
|
||||
bool priv,
|
||||
@@ -766,7 +766,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
u64 size,
|
||||
int pgsz_idx,
|
||||
bool va_allocated,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool sparse,
|
||||
struct vm_gk20a_mapping_batch *batch)
|
||||
{
|
||||
|
||||
@@ -2720,7 +2720,7 @@ void gk20a_fifo_isr(struct gk20a *g)
|
||||
return;
|
||||
}
|
||||
|
||||
u32 gk20a_fifo_nonstall_isr(struct gk20a *g)
|
||||
enum gk20a_nonstall_ops gk20a_fifo_nonstall_isr(struct gk20a *g)
|
||||
{
|
||||
u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
|
||||
u32 clear_intr = 0;
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
|
||||
struct gk20a_debug_output;
|
||||
struct mmu_fault_info;
|
||||
enum gk20a_nonstall_ops;
|
||||
|
||||
enum {
|
||||
NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW = 0,
|
||||
@@ -228,7 +229,7 @@ int gk20a_init_fifo_support(struct gk20a *g);
|
||||
int gk20a_init_fifo_setup_hw(struct gk20a *g);
|
||||
|
||||
void gk20a_fifo_isr(struct gk20a *g);
|
||||
u32 gk20a_fifo_nonstall_isr(struct gk20a *g);
|
||||
enum gk20a_nonstall_ops gk20a_fifo_nonstall_isr(struct gk20a *g);
|
||||
|
||||
int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
|
||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
|
||||
|
||||
@@ -139,6 +139,7 @@ enum gk20a_cbc_op {
|
||||
enum nvgpu_unit;
|
||||
|
||||
enum nvgpu_flush_op;
|
||||
enum gk20a_mem_rw_flag;
|
||||
|
||||
struct _resmgr_context;
|
||||
struct nvgpu_gpfifo_entry;
|
||||
@@ -924,7 +925,7 @@ struct gpu_ops {
|
||||
u8 kind_v,
|
||||
u32 ctag_offset,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool clear_ctags,
|
||||
bool sparse,
|
||||
bool priv,
|
||||
@@ -935,7 +936,7 @@ struct gpu_ops {
|
||||
u64 size,
|
||||
int pgsz_idx,
|
||||
bool va_allocated,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool sparse,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
int (*vm_bind_channel)(struct vm_gk20a *vm,
|
||||
|
||||
@@ -30,6 +30,8 @@
|
||||
#include <nvgpu/rbtree.h>
|
||||
#include <nvgpu/kref.h>
|
||||
|
||||
enum gk20a_mem_rw_flag;
|
||||
|
||||
struct gpfifo_desc {
|
||||
struct nvgpu_mem mem;
|
||||
u32 entry_num;
|
||||
@@ -141,7 +143,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
u8 kind_v,
|
||||
u32 ctag_offset,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool clear_ctags,
|
||||
bool sparse,
|
||||
bool priv,
|
||||
@@ -153,7 +155,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
u64 size,
|
||||
int pgsz_idx,
|
||||
bool va_allocated,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool sparse,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
|
||||
|
||||
@@ -168,7 +168,7 @@ struct nvgpu_gmmu_attrs {
|
||||
u32 kind_v;
|
||||
u64 ctag;
|
||||
bool cacheable;
|
||||
int rw_flag;
|
||||
enum gk20a_mem_rw_flag rw_flag;
|
||||
bool sparse;
|
||||
bool priv;
|
||||
bool coherent;
|
||||
@@ -227,7 +227,7 @@ u64 nvgpu_gmmu_map(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *mem,
|
||||
u64 size,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool priv,
|
||||
enum nvgpu_aperture aperture);
|
||||
|
||||
@@ -241,7 +241,7 @@ u64 nvgpu_gmmu_map_fixed(struct vm_gk20a *vm,
|
||||
u64 addr,
|
||||
u64 size,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool priv,
|
||||
enum nvgpu_aperture aperture);
|
||||
|
||||
|
||||
@@ -643,7 +643,8 @@ static u32 devinit_get_pwr_policy_table(struct gk20a *g,
|
||||
pwr_policy_data.pwrpolicy.limit_unit = (u8)
|
||||
BIOS_GET_FIELD(entry.flags0,
|
||||
NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT);
|
||||
pwr_policy_data.pwrpolicy.filter_type = (u8)
|
||||
pwr_policy_data.pwrpolicy.filter_type =
|
||||
(enum ctrl_pmgr_pwr_policy_filter_type)
|
||||
BIOS_GET_FIELD(entry.flags1,
|
||||
NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE);
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
u8 kind_v,
|
||||
u32 ctag_offset,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool clear_ctags,
|
||||
bool sparse,
|
||||
bool priv,
|
||||
|
||||
@@ -34,7 +34,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
u8 kind_v,
|
||||
u32 ctag_offset,
|
||||
u32 flags,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool clear_ctags,
|
||||
bool sparse,
|
||||
bool priv,
|
||||
|
||||
@@ -86,7 +86,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
u64 size,
|
||||
int pgsz_idx,
|
||||
bool va_allocated,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool sparse,
|
||||
struct vm_gk20a_mapping_batch *batch)
|
||||
{
|
||||
|
||||
@@ -28,13 +28,14 @@ struct channel_gk20a;
|
||||
struct vm_gk20a_mapping_batch;
|
||||
struct gk20a_as_share;
|
||||
struct vm_gk20a;
|
||||
enum gk20a_mem_rw_flag;
|
||||
|
||||
void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
u64 vaddr,
|
||||
u64 size,
|
||||
int pgsz_idx,
|
||||
bool va_allocated,
|
||||
int rw_flag,
|
||||
enum gk20a_mem_rw_flag rw_flag,
|
||||
bool sparse,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
int vgpu_vm_bind_channel(struct vm_gk20a *vm,
|
||||
|
||||
Reference in New Issue
Block a user