mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: common: fix MISRA violations
Fix 8.2 violation for not specifying parameter name in prototype of secure_alloc(). Fix 21.3 & 21.8 violations for using reserved names "free" and "exit." Fix 8.6 and 21.2 violations for __gk20a_do_idle() and __gk20a_do_unidle() by renaming the functions and wrapping them in a missing #ifdef CONFIG_PM. Fix 5.7 violation for reusing "class" as parameter name when already defined as a struct. JIRA NVGPU-3343 Change-Id: I976e95a32868fa0a657f4baf0845a32bd7aceb9e Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2117913 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
78c7e601f8
commit
5c09935297
@@ -628,8 +628,8 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount)
|
|||||||
|
|
||||||
nvgpu_ltc_remove_support(g);
|
nvgpu_ltc_remove_support(g);
|
||||||
|
|
||||||
if (g->free != NULL) {
|
if (g->gfree != NULL) {
|
||||||
g->free(g);
|
g->gfree(g);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1315,11 +1315,9 @@ struct gpu_ops {
|
|||||||
*/
|
*/
|
||||||
int (*secure_alloc)(struct gk20a *g, struct nvgpu_mem *desc_mem,
|
int (*secure_alloc)(struct gk20a *g, struct nvgpu_mem *desc_mem,
|
||||||
size_t size,
|
size_t size,
|
||||||
void (**)(struct gk20a *g, struct nvgpu_mem *mem));
|
void (**fn)(struct gk20a *g, struct nvgpu_mem *mem));
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
void (*exit)(struct gk20a *g, struct nvgpu_mem *mem,
|
|
||||||
struct nvgpu_sgl *sgl);
|
|
||||||
u32 (*data032_r)(u32 i);
|
u32 (*data032_r)(u32 i);
|
||||||
} pramin;
|
} pramin;
|
||||||
struct {
|
struct {
|
||||||
@@ -1891,7 +1889,7 @@ struct nvgpu_gpu_params {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct gk20a {
|
struct gk20a {
|
||||||
void (*free)(struct gk20a *g);
|
void (*gfree)(struct gk20a *g);
|
||||||
struct nvgpu_nvhost_dev *nvhost_dev;
|
struct nvgpu_nvhost_dev *nvhost_dev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2214,8 +2212,10 @@ void gk20a_busy_noresume(struct gk20a *g);
|
|||||||
void gk20a_idle_nosuspend(struct gk20a *g);
|
void gk20a_idle_nosuspend(struct gk20a *g);
|
||||||
int __must_check gk20a_busy(struct gk20a *g);
|
int __must_check gk20a_busy(struct gk20a *g);
|
||||||
void gk20a_idle(struct gk20a *g);
|
void gk20a_idle(struct gk20a *g);
|
||||||
int __gk20a_do_idle(struct gk20a *g, bool force_reset);
|
#ifdef CONFIG_PM
|
||||||
int __gk20a_do_unidle(struct gk20a *g);
|
int gk20a_do_idle_impl(struct gk20a *g, bool force_reset);
|
||||||
|
int gk20a_do_unidle_impl(struct gk20a *g);
|
||||||
|
#endif
|
||||||
|
|
||||||
int nvgpu_can_busy(struct gk20a *g);
|
int nvgpu_can_busy(struct gk20a *g);
|
||||||
int gk20a_wait_for_idle(struct gk20a *g);
|
int gk20a_wait_for_idle(struct gk20a *g);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -29,7 +29,7 @@ struct class;
|
|||||||
int nvgpu_probe(struct gk20a *g,
|
int nvgpu_probe(struct gk20a *g,
|
||||||
const char *debugfs_symlink,
|
const char *debugfs_symlink,
|
||||||
const char *interface_name,
|
const char *interface_name,
|
||||||
struct class *class);
|
struct class *device_class);
|
||||||
|
|
||||||
void nvgpu_kernel_restart(void *cmd);
|
void nvgpu_kernel_restart(void *cmd);
|
||||||
|
|
||||||
|
|||||||
@@ -243,7 +243,7 @@ static void nvgpu_init_mm_vars(struct gk20a *g)
|
|||||||
int nvgpu_probe(struct gk20a *g,
|
int nvgpu_probe(struct gk20a *g,
|
||||||
const char *debugfs_symlink,
|
const char *debugfs_symlink,
|
||||||
const char *interface_name,
|
const char *interface_name,
|
||||||
struct class *class)
|
struct class *device_class)
|
||||||
{
|
{
|
||||||
struct device *dev = dev_from_gk20a(g);
|
struct device *dev = dev_from_gk20a(g);
|
||||||
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
||||||
@@ -275,7 +275,7 @@ int nvgpu_probe(struct gk20a *g,
|
|||||||
nvgpu_init_mm_vars(g);
|
nvgpu_init_mm_vars(g);
|
||||||
|
|
||||||
/* platform probe can defer do user init only if probe succeeds */
|
/* platform probe can defer do user init only if probe succeeds */
|
||||||
err = gk20a_user_init(dev, interface_name, class);
|
err = gk20a_user_init(dev, interface_name, device_class);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@@ -317,5 +317,5 @@ static void nvgpu_free_gk20a(struct gk20a *g)
|
|||||||
|
|
||||||
void nvgpu_init_gk20a(struct gk20a *g)
|
void nvgpu_init_gk20a(struct gk20a *g)
|
||||||
{
|
{
|
||||||
g->free = nvgpu_free_gk20a;
|
g->gfree = nvgpu_free_gk20a;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -495,15 +495,15 @@ MODULE_DEVICE_TABLE(of, tegra_gk20a_of_match);
|
|||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
/**
|
/**
|
||||||
* __gk20a_do_idle() - force the GPU to idle and railgate
|
* gk20a_do_idle_impl() - force the GPU to idle and railgate
|
||||||
*
|
*
|
||||||
* In success, this call MUST be balanced by caller with __gk20a_do_unidle()
|
* In success, this call MUST be balanced by caller with gk20a_do_unidle_impl()
|
||||||
*
|
*
|
||||||
* Acquires two locks : &l->busy_lock and &platform->railgate_lock
|
* Acquires two locks : &l->busy_lock and &platform->railgate_lock
|
||||||
* In success, we hold these locks and return
|
* In success, we hold these locks and return
|
||||||
* In failure, we release these locks and return
|
* In failure, we release these locks and return
|
||||||
*/
|
*/
|
||||||
int __gk20a_do_idle(struct gk20a *g, bool force_reset)
|
int gk20a_do_idle_impl(struct gk20a *g, bool force_reset)
|
||||||
{
|
{
|
||||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||||
struct device *dev = dev_from_gk20a(g);
|
struct device *dev = dev_from_gk20a(g);
|
||||||
@@ -601,7 +601,7 @@ int __gk20a_do_idle(struct gk20a *g, bool force_reset)
|
|||||||
* if GPU is now idle, call prepare_poweroff() to save the
|
* if GPU is now idle, call prepare_poweroff() to save the
|
||||||
* state and then do explicit railgate
|
* state and then do explicit railgate
|
||||||
*
|
*
|
||||||
* __gk20a_do_unidle() needs to unrailgate, call
|
* gk20a_do_unidle_impl() needs to unrailgate, call
|
||||||
* finalize_poweron(), and then call pm_runtime_put_sync()
|
* finalize_poweron(), and then call pm_runtime_put_sync()
|
||||||
* to balance the GPU usage counter
|
* to balance the GPU usage counter
|
||||||
*/
|
*/
|
||||||
@@ -630,7 +630,7 @@ fail_timeout:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_do_idle() - wrap up for __gk20a_do_idle() to be called
|
* gk20a_do_idle() - wrap up for gk20a_do_idle_impl() to be called
|
||||||
* from outside of GPU driver
|
* from outside of GPU driver
|
||||||
*
|
*
|
||||||
* In success, this call MUST be balanced by caller with gk20a_do_unidle()
|
* In success, this call MUST be balanced by caller with gk20a_do_unidle()
|
||||||
@@ -639,13 +639,14 @@ static int gk20a_do_idle(void *_g)
|
|||||||
{
|
{
|
||||||
struct gk20a *g = (struct gk20a *)_g;
|
struct gk20a *g = (struct gk20a *)_g;
|
||||||
|
|
||||||
return __gk20a_do_idle(g, true);
|
return gk20a_do_idle_impl(g, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __gk20a_do_unidle() - unblock all the tasks blocked by __gk20a_do_idle()
|
* gk20a_do_unidle_impl() - unblock all the tasks blocked by
|
||||||
|
* gk20a_do_idle_impl()
|
||||||
*/
|
*/
|
||||||
int __gk20a_do_unidle(struct gk20a *g)
|
int gk20a_do_unidle_impl(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||||
struct device *dev = dev_from_gk20a(g);
|
struct device *dev = dev_from_gk20a(g);
|
||||||
@@ -680,13 +681,13 @@ int __gk20a_do_unidle(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_do_unidle() - wrap up for __gk20a_do_unidle()
|
* gk20a_do_unidle() - wrap up for gk20a_do_unidle_impl()
|
||||||
*/
|
*/
|
||||||
static int gk20a_do_unidle(void *_g)
|
static int gk20a_do_unidle(void *_g)
|
||||||
{
|
{
|
||||||
struct gk20a *g = (struct gk20a *)_g;
|
struct gk20a *g = (struct gk20a *)_g;
|
||||||
|
|
||||||
return __gk20a_do_unidle(g);
|
return gk20a_do_unidle_impl(g);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -769,7 +769,7 @@ static ssize_t force_idle_store(struct device *dev,
|
|||||||
if (g->forced_idle)
|
if (g->forced_idle)
|
||||||
return count; /* do nothing */
|
return count; /* do nothing */
|
||||||
else {
|
else {
|
||||||
err = __gk20a_do_idle(g, false);
|
err = gk20a_do_idle_impl(g, false);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
g->forced_idle = 1;
|
g->forced_idle = 1;
|
||||||
nvgpu_info(g, "gpu is idle : %d",
|
nvgpu_info(g, "gpu is idle : %d",
|
||||||
@@ -780,7 +780,7 @@ static ssize_t force_idle_store(struct device *dev,
|
|||||||
if (!g->forced_idle)
|
if (!g->forced_idle)
|
||||||
return count; /* do nothing */
|
return count; /* do nothing */
|
||||||
else {
|
else {
|
||||||
err = __gk20a_do_unidle(g);
|
err = gk20a_do_unidle_impl(g);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
g->forced_idle = 0;
|
g->forced_idle = 0;
|
||||||
nvgpu_info(g, "gpu is idle : %d",
|
nvgpu_info(g, "gpu is idle : %d",
|
||||||
|
|||||||
Reference in New Issue
Block a user