gpu: nvgpu: fix sparse warnings

Fix below sparse warnings :

warning: Using plain integer as NULL pointer
warning: symbol <variable/funcion> was not declared. Should it be static?
warning: Initializer entry defined twice

Also, remove dead functions

Bug 1573254

Change-Id: I29d71ecc01c841233cf6b26c9088ca8874773469
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/593363
Reviewed-by: Amit Sharma (SW-TEGRA) <amisharma@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
This commit is contained in:
Deepak Nibade
2014-11-04 18:44:28 +05:30
committed by Dan Willemsen
parent 797e4dd319
commit b3f575074b
28 changed files with 155 additions and 205 deletions

View File

@@ -46,7 +46,7 @@ int gk20a_as_alloc_share(struct gk20a_as *as,
gk20a_dbg_fn("");
*out = 0;
*out = NULL;
as_share = kzalloc(sizeof(*as_share), GFP_KERNEL);
if (!as_share)
return -ENOMEM;

View File

@@ -122,7 +122,7 @@ static void gk20a_cde_deallocate_contexts(struct gk20a *g)
}
}
void gk20a_cde_stop(struct gk20a *g)
static void gk20a_cde_stop(struct gk20a *g)
{
struct gk20a_cde_app *cde_app = &g->cde_app;

View File

@@ -603,9 +603,9 @@ static void gk20a_free_error_notifiers(struct channel_gk20a *ch)
if (ch->error_notifier_ref) {
dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va);
dma_buf_put(ch->error_notifier_ref);
ch->error_notifier_ref = 0;
ch->error_notifier = 0;
ch->error_notifier_va = 0;
ch->error_notifier_ref = NULL;
ch->error_notifier = NULL;
ch->error_notifier_va = NULL;
}
}
@@ -785,7 +785,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g)
if (ch == NULL) {
/* TBD: we want to make this virtualizable */
gk20a_err(dev_from_gk20a(g), "out of hw chids");
return 0;
return NULL;
}
ch->g = g;
@@ -795,7 +795,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g)
gk20a_err(dev_from_gk20a(g),
"failed to open gk20a channel, out of inst mem");
return 0;
return NULL;
}
g->ops.fifo.bind_channel(ch);
ch->pid = current->pid;
@@ -1265,18 +1265,6 @@ clean_up:
return err;
}
static inline int wfi_cmd_size(void)
{
return 2;
}
void add_wfi_cmd(struct priv_cmd_entry *cmd, int *i)
{
/* wfi */
cmd->ptr[(*i)++] = 0x2001001E;
/* handle, ignored */
cmd->ptr[(*i)++] = 0x00000000;
}
static inline bool check_gp_put(struct gk20a *g,
struct channel_gk20a *c)
{
@@ -1529,18 +1517,6 @@ void gk20a_channel_update(struct channel_gk20a *c, int nr_completed)
schedule_work(&c->update_fn_work);
}
void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
{
/* syncpoint_a */
ptr[0] = 0x2001001C;
/* payload */
ptr[1] = thresh;
/* syncpoint_b */
ptr[2] = 0x2001001D;
/* syncpt_id, switch_en, wait */
ptr[3] = (id << 8) | 0x10;
}
int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
struct nvgpu_gpfifo *gpfifo,
u32 num_entries,
@@ -1760,11 +1736,6 @@ clean_up:
return err;
}
void gk20a_remove_channel_support(struct channel_gk20a *c)
{
}
int gk20a_init_channel_support(struct gk20a *g, u32 chid)
{
struct channel_gk20a *c = g->fifo.channel+chid;
@@ -1772,7 +1743,6 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
c->in_use = false;
c->hw_chid = chid;
c->bound = false;
c->remove_support = gk20a_remove_channel_support;
mutex_init(&c->jobs_lock);
mutex_init(&c->submit_lock);
INIT_LIST_HEAD(&c->jobs);

View File

@@ -53,8 +53,8 @@ static void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
ptr[3] = (id << 8) | 0x10;
}
int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id,
u32 thresh, struct priv_cmd_entry **entry,
static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
u32 id, u32 thresh, struct priv_cmd_entry **entry,
struct gk20a_fence **fence)
{
struct gk20a_channel_syncpt *sp =
@@ -84,7 +84,7 @@ int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id,
return 0;
}
int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
struct priv_cmd_entry **entry,
struct gk20a_fence **fence)
{
@@ -221,7 +221,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
return 0;
}
int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
static int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
struct priv_cmd_entry **entry,
struct gk20a_fence **fence)
{
@@ -231,7 +231,7 @@ int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
entry, fence);
}
int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
static int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
struct priv_cmd_entry **entry,
struct gk20a_fence **fence)
{
@@ -243,7 +243,7 @@ int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
entry, fence);
}
int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
static int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
int wait_fence_fd,
struct priv_cmd_entry **entry,
struct gk20a_fence **fence,
@@ -257,7 +257,7 @@ int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
entry, fence);
}
void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
static void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
{
struct gk20a_channel_syncpt *sp =
container_of(s, struct gk20a_channel_syncpt, ops);

View File

@@ -58,7 +58,8 @@ static int alloc_session(struct dbg_session_gk20a **_dbg_s)
return 0;
}
int gk20a_dbg_gpu_do_dev_open(struct inode *inode, struct file *filp, bool is_profiler)
static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
struct file *filp, bool is_profiler)
{
struct dbg_session_gk20a *dbg_session;
struct gk20a *g;
@@ -504,7 +505,8 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
gk20a_dbg_fn("Copying regops from userspace");
if (copy_from_user(ops, (void *)(uintptr_t)args->ops, ops_size)) {
if (copy_from_user(ops, (void __user *)(uintptr_t)args->ops,
ops_size)) {
dev_err(dev, "copy_from_user failed!");
err = -EFAULT;
goto clean_up;
@@ -542,7 +544,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
gk20a_dbg_fn("Copying result to userspace");
if (copy_to_user((void *)(uintptr_t)args->ops, ops, ops_size)) {
if (copy_to_user((void __user *)(uintptr_t)args->ops, ops, ops_size)) {
dev_err(dev, "copy_to_user failed!");
err = -EFAULT;
goto clean_up;

View File

@@ -32,7 +32,7 @@
#include "hw_pbdma_gk20a.h"
unsigned int gk20a_debug_trace_cmdbuf;
struct platform_device *gk20a_device;
static struct platform_device *gk20a_device;
struct gk20a_debug_output {
void (*fn)(void *ctx, const char *str, size_t len);
@@ -89,7 +89,8 @@ static inline void gk20a_debug_write_to_seqfile(void *ctx, const char *str,
seq_write((struct seq_file *)ctx, str, len);
}
void gk20a_debug_output(struct gk20a_debug_output *o, const char *fmt, ...)
static void gk20a_debug_output(struct gk20a_debug_output *o,
const char *fmt, ...)
{
va_list args;
int len;
@@ -159,7 +160,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
gk20a_debug_output(o, "\n");
}
void gk20a_debug_show_dump(struct platform_device *pdev,
static void gk20a_debug_show_dump(struct platform_device *pdev,
struct gk20a_debug_output *o)
{
struct gk20a_platform *platform = gk20a_get_platform(pdev);

View File

@@ -152,7 +152,7 @@ static int init_engine_info(struct fifo_gk20a *f)
return 0;
}
void gk20a_remove_fifo_support(struct fifo_gk20a *f)
static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
{
struct gk20a *g = f->g;
struct device *d = dev_from_gk20a(g);

View File

@@ -150,7 +150,6 @@ static const struct file_operations gk20a_prof_ops = {
.unlocked_ioctl = gk20a_dbg_gpu_dev_ioctl,
/* .mmap = gk20a_prof_gpu_dev_mmap,*/
/*int (*mmap) (struct file *, struct vm_area_struct *);*/
.compat_ioctl = gk20a_dbg_gpu_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = gk20a_dbg_gpu_dev_ioctl,
#endif
@@ -206,11 +205,11 @@ static void kunmap_and_free_iopage(void **kvaddr, struct page **page)
{
if (*kvaddr) {
kunmap(*kvaddr);
*kvaddr = 0;
*kvaddr = NULL;
}
if (*page) {
__free_page(*page);
*page = 0;
*page = NULL;
}
}
@@ -606,11 +605,11 @@ static void gk20a_remove_support(struct platform_device *dev)
if (g->regs) {
iounmap(g->regs);
g->regs = 0;
g->regs = NULL;
}
if (g->bar1) {
iounmap(g->bar1);
g->bar1 = 0;
g->bar1 = NULL;
}
}
@@ -1063,11 +1062,11 @@ struct channel_gk20a *gk20a_get_channel_from_file(int fd)
struct channel_gk20a *ch;
struct file *f = fget(fd);
if (!f)
return 0;
return NULL;
if (f->f_op != &gk20a_channel_ops) {
fput(f);
return 0;
return NULL;
}
ch = (struct channel_gk20a *)f->private_data;
@@ -1119,7 +1118,7 @@ static void gk20a_pm_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM
const struct dev_pm_ops gk20a_pm_ops = {
static const struct dev_pm_ops gk20a_pm_ops = {
#if defined(CONFIG_PM_RUNTIME) && !defined(CONFIG_PM_GENERIC_DOMAINS)
.runtime_resume = gk20a_pm_enable_clk,
.runtime_suspend = gk20a_pm_disable_clk,
@@ -1261,7 +1260,7 @@ static int gk20a_pm_init(struct platform_device *dev)
return err;
}
int gk20a_secure_page_alloc(struct platform_device *pdev)
static int gk20a_secure_page_alloc(struct platform_device *pdev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
int err = 0;
@@ -1466,13 +1465,15 @@ static int __exit gk20a_remove(struct platform_device *dev)
gk20a_user_deinit(dev);
set_gk20a(dev, 0);
set_gk20a(dev, NULL);
#ifdef CONFIG_DEBUG_FS
debugfs_remove(g->debugfs_ltc_enabled);
debugfs_remove(g->debugfs_gr_idle_timeout_default);
debugfs_remove(g->debugfs_timeouts_enabled);
#endif
gk20a_remove_sysfs(&dev->dev);
kfree(g);
#ifdef CONFIG_PM_RUNTIME

View File

@@ -715,6 +715,7 @@ static inline int support_gk20a_pmu(struct platform_device *dev)
}
void gk20a_create_sysfs(struct platform_device *dev);
void gk20a_remove_sysfs(struct device *dev);
#define GK20A_BAR0_IORESOURCE_MEM 0
#define GK20A_BAR1_IORESOURCE_MEM 1

View File

@@ -29,7 +29,7 @@ struct gating_desc {
u32 disable;
};
/* slcg gr */
const struct gating_desc gk20a_slcg_gr[] = {
static const struct gating_desc gk20a_slcg_gr[] = {
{.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x03fffffe},
{.addr = 0x00409894, .prod = 0x00000040, .disable = 0x0003fffe},
{.addr = 0x004078c4, .prod = 0x00000000, .disable = 0x000001fe},
@@ -97,7 +97,7 @@ const struct gating_desc gk20a_slcg_gr[] = {
};
/* slcg perf */
const struct gating_desc gk20a_slcg_perf[] = {
static const struct gating_desc gk20a_slcg_perf[] = {
{.addr = 0x001be018, .prod = 0x000001ff, .disable = 0x00000000},
{.addr = 0x001bc018, .prod = 0x000001ff, .disable = 0x00000000},
{.addr = 0x001b8018, .prod = 0x000001ff, .disable = 0x00000000},
@@ -105,7 +105,7 @@ const struct gating_desc gk20a_slcg_perf[] = {
};
/* blcg gr */
const struct gating_desc gk20a_blcg_gr[] = {
static const struct gating_desc gk20a_blcg_gr[] = {
{.addr = 0x004041f0, .prod = 0x00004046, .disable = 0x00000000},
{.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000},
{.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000},
@@ -185,7 +185,7 @@ const struct gating_desc gk20a_blcg_gr[] = {
};
/* pg gr */
const struct gating_desc gk20a_pg_gr[] = {
static const struct gating_desc gk20a_pg_gr[] = {
{.addr = 0x004041f8, .prod = 0x10940000, .disable = 0x00000000},
{.addr = 0x004041fc, .prod = 0xff00a725, .disable = 0x00000000},
{.addr = 0x00409898, .prod = 0x10140000, .disable = 0x00000000},
@@ -291,7 +291,7 @@ const struct gating_desc gk20a_pg_gr[] = {
};
/* therm gr */
const struct gating_desc gk20a_slcg_therm[] = {
static const struct gating_desc gk20a_slcg_therm[] = {
{.addr = 0x000206b8, .prod = 0x00000000, .disable = 0x0000000f},
};

View File

@@ -96,7 +96,7 @@ static int gr_gk20a_get_netlist_name(int index, char *name)
return -1;
}
bool gr_gk20a_is_firmware_defined(void)
static bool gr_gk20a_is_firmware_defined(void)
{
#ifdef GK20A_NETLIST_IMAGE_FW_NAME
return true;

View File

@@ -491,7 +491,7 @@ struct fecs_method_op_gk20a {
};
int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
static int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
struct fecs_method_op_gk20a op)
{
struct gr_gk20a *gr = &g->gr;
@@ -524,7 +524,7 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
return ret;
}
int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret)
static int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret)
{
return gr_gk20a_submit_fecs_method_op(g,
(struct fecs_method_op_gk20a) {
@@ -544,14 +544,16 @@ int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret)
int gr_gk20a_disable_ctxsw(struct gk20a *g)
{
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_stop_ctxsw_v(), 0);
return gr_gk20a_ctrl_ctxsw(g,
gr_fecs_method_push_adr_stop_ctxsw_v(), NULL);
}
/* Start processing (continue) context switches at FECS */
int gr_gk20a_enable_ctxsw(struct gk20a *g)
{
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_start_ctxsw_v(), 0);
return gr_gk20a_ctrl_ctxsw(g,
gr_fecs_method_push_adr_start_ctxsw_v(), NULL);
}
@@ -2105,7 +2107,7 @@ void gr_gk20a_load_ctxsw_ucode_boot(struct gk20a *g, u64 addr_base,
gr_fecs_bootvec_vec_f(segments->boot_entry));
}
int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
static int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset)
{
gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(),
@@ -2176,7 +2178,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
gk20a_dbg_fn("");
ret = gr_gk20a_ctx_wait_ucode(g, 0, 0,
ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL,
GR_IS_UCODE_OP_EQUAL,
eUcodeHandshakeInitComplete,
GR_IS_UCODE_OP_SKIP, 0);
@@ -3794,7 +3796,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
return 0;
}
int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
{
int i, ret;
@@ -4453,7 +4455,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
return -ETIMEDOUT;
}
int gr_gk20a_init_ctxsw(struct gk20a *g)
static int gr_gk20a_init_ctxsw(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
u32 err = 0;
@@ -4481,7 +4483,7 @@ out:
return 0;
}
int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
{
struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load;
unsigned long end_jiffies = jiffies +
@@ -5859,7 +5861,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
u32 *priv_offset);
/* This function will decode a priv address and return the partition type and numbers. */
int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr,
static int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr,
int *addr_type, /* enum ctxsw_addr_type */
u32 *gpc_num, u32 *tpc_num, u32 *ppc_num, u32 *be_num,
u32 *broadcast_flags)
@@ -6154,7 +6156,7 @@ static void init_sm_dsm_reg_info(void)
* which makes it impossible to know externally whether a ctx
* write will actually occur. so later we should put a lazy,
* map-and-hold system in the patch write state */
int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
struct channel_ctx_gk20a *ch_ctx,
u32 addr, u32 data,
u8 *context)

View File

@@ -28,7 +28,7 @@
#include "clk_gk20a.h"
#include "regops_gk20a.h"
struct gpu_ops gk20a_ops = {
static struct gpu_ops gk20a_ops = {
.clock_gating = {
.slcg_gr_load_gating_prod =
gr_gk20a_slcg_gr_load_gating_prod,

View File

@@ -181,7 +181,7 @@ static void gk20a_ltc_init_fs_state(struct gk20a *g)
g->max_ltc_count = g->ltc_count = 1;
}
void gk20a_ltc_isr(struct gk20a *g)
static void gk20a_ltc_isr(struct gk20a *g)
{
u32 intr;

View File

@@ -263,7 +263,7 @@ static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
return 0;
}
void gk20a_remove_mm_support(struct mm_gk20a *mm)
static void gk20a_remove_mm_support(struct mm_gk20a *mm)
{
struct gk20a *g = mm->g;
struct device *d = dev_from_gk20a(g);
@@ -405,7 +405,7 @@ err_out:
return -ENOMEM;
}
void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle,
static void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle,
struct sg_table *sgt, u32 order,
size_t size)
{
@@ -415,7 +415,7 @@ void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle,
kfree(sgt);
}
int map_gmmu_phys_pages(void *handle, struct sg_table *sgt,
static int map_gmmu_phys_pages(void *handle, struct sg_table *sgt,
void **va, size_t size)
{
FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length);
@@ -423,7 +423,7 @@ int map_gmmu_phys_pages(void *handle, struct sg_table *sgt,
return 0;
}
void unmap_gmmu_phys_pages(void *handle, struct sg_table *sgt, void *va)
static void unmap_gmmu_phys_pages(void *handle, struct sg_table *sgt, void *va)
{
FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length);
}
@@ -913,7 +913,7 @@ static struct mapped_buffer_node *find_mapped_buffer_reverse_locked(
return mapped_buffer;
node = rb_next(&mapped_buffer->node);
}
return 0;
return NULL;
}
static struct mapped_buffer_node *find_mapped_buffer_locked(
@@ -931,7 +931,7 @@ static struct mapped_buffer_node *find_mapped_buffer_locked(
else
return mapped_buffer;
}
return 0;
return NULL;
}
static struct mapped_buffer_node *find_mapped_buffer_range_locked(
@@ -948,7 +948,7 @@ static struct mapped_buffer_node *find_mapped_buffer_range_locked(
else
node = node->rb_right;
}
return 0;
return NULL;
}
#define BFR_ATTRS (sizeof(nvmap_bfr_param)/sizeof(nvmap_bfr_param[0]))
@@ -1177,7 +1177,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
/* unmap here needs to know the page size we assigned at mapping */
err = update_gmmu_ptes_locked(vm,
pgsz_idx,
0, /* n/a for unmap */
NULL, /* n/a for unmap */
0,
vaddr,
vaddr + size - 1,
@@ -1209,7 +1209,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
bool user_mapped,
int rw_flag)
{
struct mapped_buffer_node *mapped_buffer = 0;
struct mapped_buffer_node *mapped_buffer = NULL;
mapped_buffer =
find_mapped_buffer_reverse_locked(&vm->mapped_buffers,
@@ -1278,7 +1278,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
struct gk20a *g = gk20a_from_vm(vm);
struct gk20a_allocator *ctag_allocator = &g->gr.comp_tags;
struct device *d = dev_from_vm(vm);
struct mapped_buffer_node *mapped_buffer = 0;
struct mapped_buffer_node *mapped_buffer = NULL;
bool inserted = false, va_allocated = false;
u32 gmmu_page_size = 0;
u64 map_offset = 0;
@@ -1991,7 +1991,7 @@ static int gk20a_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr,
return gk20a_vm_put_empty(vm, vaddr, num_pages, pgsz_idx);
}
void gk20a_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
static void gk20a_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
u64 size, u32 pgsz_idx) {
struct gk20a *g = vm->mm->g;
@@ -2536,7 +2536,7 @@ int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
ch->vm = vm;
err = channel_gk20a_commit_va(ch);
if (err)
ch->vm = 0;
ch->vm = NULL;
return err;
}

View File

@@ -37,7 +37,7 @@
#define TEGRA_DDR4_BW_PER_FREQ 16
extern struct device tegra_vpr_dev;
struct gk20a_platform t132_gk20a_tegra_platform;
static struct gk20a_platform t132_gk20a_tegra_platform;
struct gk20a_emc_params {
long bw_ratio;
@@ -142,7 +142,7 @@ fail:
* This function returns the minimum emc clock based on gpu frequency
*/
long gk20a_tegra_get_emc_rate(struct gk20a *g,
static long gk20a_tegra_get_emc_rate(struct gk20a *g,
struct gk20a_emc_params *emc_params, long freq)
{
long hz;
@@ -197,7 +197,7 @@ static void gk20a_tegra_prescale(struct platform_device *pdev)
*
*/
void gk20a_tegra_calibrate_emc(struct platform_device *pdev,
static void gk20a_tegra_calibrate_emc(struct platform_device *pdev,
struct gk20a_emc_params *emc_params)
{
enum tegra_chipid cid = tegra_get_chipid();
@@ -271,7 +271,7 @@ static int gk20a_tegra_unrailgate(struct platform_device *pdev)
return ret;
}
struct {
static struct {
char *name;
unsigned long default_rate;
} tegra_gk20a_clocks[] = {
@@ -472,7 +472,7 @@ static int gk20a_tegra_suspend(struct device *dev)
return 0;
}
struct gk20a_platform t132_gk20a_tegra_platform = {
static struct gk20a_platform t132_gk20a_tegra_platform = {
.has_syncpoints = true,
/* power management configuration */

View File

@@ -201,7 +201,7 @@ static void set_pmu_cmdline_args_falctracesize_v1(
pmu->args_v1.falc_trace_size = size;
}
int find_hex_in_string(char *strings, struct gk20a *g)
static int find_hex_in_string(char *strings, struct gk20a *g)
{
u32 i = 0, j = strlen(strings);
for (; i < j; i++) {
@@ -212,7 +212,7 @@ int find_hex_in_string(char *strings, struct gk20a *g)
return 0xFF;
}
void printtrace(struct pmu_gk20a *pmu)
static void printtrace(struct pmu_gk20a *pmu)
{
u32 i = 0, j = 0, k, l, m, count;
char *trace = pmu->trace_buf.cpuva;
@@ -1926,7 +1926,7 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu)
gk20a_allocator_destroy(&pmu->dmem);
}
int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
{
struct pmu_gk20a *pmu = &g->pmu;
@@ -2022,7 +2022,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
return err;
}
int gk20a_init_pmu_setup_sw(struct gk20a *g)
static int gk20a_init_pmu_setup_sw(struct gk20a *g)
{
struct pmu_gk20a *pmu = &g->pmu;
struct mm_gk20a *mm = &g->mm;
@@ -2219,7 +2219,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
}
}
int gk20a_init_pmu_setup_hw1(struct gk20a *g)
static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
{
struct pmu_gk20a *pmu = &g->pmu;
int err;

View File

@@ -577,7 +577,7 @@ static bool check_whitelists(struct dbg_session_gk20a *dbg_s,
if (!dbg_s->ch) {
gk20a_err(dbg_s->dev, "can't perform ctx regop unless bound");
op->status = REGOP(STATUS_UNSUPPORTED_OP);
return -ENODEV;
return valid;
}
/* binary search context list */
@@ -697,67 +697,68 @@ bool is_bar0_global_offset_whitelisted_gk20a(struct gk20a *g, u32 offset)
return valid;
}
const struct regop_offset_range *gk20a_get_global_whitelist_ranges(void)
static const struct regop_offset_range *gk20a_get_global_whitelist_ranges(void)
{
return gk20a_global_whitelist_ranges;
}
int gk20a_get_global_whitelist_ranges_count(void)
static int gk20a_get_global_whitelist_ranges_count(void)
{
return gk20a_global_whitelist_ranges_count;
}
const struct regop_offset_range *gk20a_get_context_whitelist_ranges(void)
static const struct regop_offset_range *gk20a_get_context_whitelist_ranges(void)
{
return gk20a_context_whitelist_ranges;
}
int gk20a_get_context_whitelist_ranges_count(void)
static int gk20a_get_context_whitelist_ranges_count(void)
{
return gk20a_context_whitelist_ranges_count;
}
const u32 *gk20a_get_runcontrol_whitelist(void)
static const u32 *gk20a_get_runcontrol_whitelist(void)
{
return gk20a_runcontrol_whitelist;
}
int gk20a_get_runcontrol_whitelist_count(void)
static int gk20a_get_runcontrol_whitelist_count(void)
{
return gk20a_runcontrol_whitelist_count;
}
const struct regop_offset_range *gk20a_get_runcontrol_whitelist_ranges(void)
static const
struct regop_offset_range *gk20a_get_runcontrol_whitelist_ranges(void)
{
return gk20a_runcontrol_whitelist_ranges;
}
int gk20a_get_runcontrol_whitelist_ranges_count(void)
static int gk20a_get_runcontrol_whitelist_ranges_count(void)
{
return gk20a_runcontrol_whitelist_ranges_count;
}
const u32 *gk20a_get_qctl_whitelist(void)
static const u32 *gk20a_get_qctl_whitelist(void)
{
return gk20a_qctl_whitelist;
}
int gk20a_get_qctl_whitelist_count(void)
static int gk20a_get_qctl_whitelist_count(void)
{
return gk20a_qctl_whitelist_count;
}
const struct regop_offset_range *gk20a_get_qctl_whitelist_ranges(void)
static const struct regop_offset_range *gk20a_get_qctl_whitelist_ranges(void)
{
return gk20a_qctl_whitelist_ranges;
}
int gk20a_get_qctl_whitelist_ranges_count(void)
static int gk20a_get_qctl_whitelist_ranges_count(void)
{
return gk20a_qctl_whitelist_ranges_count;
}
int gk20a_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
static int gk20a_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
{
/* The following regops are a hack/war to make up for the fact that we
* just scribbled into the ctxsw image w/o really knowing whether

View File

@@ -55,13 +55,13 @@ static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm);
/*Globals*/
static void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
get_ucode_details pmu_acr_supp_ucode_list[] = {
static get_ucode_details pmu_acr_supp_ucode_list[] = {
pmu_ucode_details,
fecs_ucode_details,
};
/*Once is LS mode, cpuctl_alias is only accessible*/
void start_gm20b_pmu(struct gk20a *g)
static void start_gm20b_pmu(struct gk20a *g)
{
/*disable irqs for hs falcon booting as we will poll for halt*/
mutex_lock(&g->pmu.isr_mutex);
@@ -272,7 +272,7 @@ int prepare_ucode_blob(struct gk20a *g)
return 0;
}
u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm,
static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm,
u32 falcon_id)
{
return (plsfm->disable_mask >> falcon_id) & 0x1;
@@ -364,7 +364,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
}
int pmu_populate_loader_cfg(struct gk20a *g,
static int pmu_populate_loader_cfg(struct gk20a *g,
struct lsfm_managed_ucode_img *lsfm,
union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size)
{
@@ -431,7 +431,7 @@ int pmu_populate_loader_cfg(struct gk20a *g,
return 0;
}
int flcn_populate_bl_dmem_desc(struct gk20a *g,
static int flcn_populate_bl_dmem_desc(struct gk20a *g,
struct lsfm_managed_ucode_img *lsfm,
union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size)
{
@@ -1019,7 +1019,7 @@ err_release_acr_fw:
return err;
}
u8 pmu_is_debug_mode_en(struct gk20a *g)
static u8 pmu_is_debug_mode_en(struct gk20a *g)
{
u32 ctl_stat = gk20a_readl(g, pwr_pmu_scpctl_stat_r());
return pwr_pmu_scpctl_stat_debug_mode_v(ctl_stat);
@@ -1125,8 +1125,8 @@ static int bl_bootstrap(struct pmu_gk20a *pmu,
return 0;
}
int gm20b_init_pmu_setup_hw1(struct gk20a *g, struct flcn_bl_dmem_desc *desc,
u32 bl_sz)
static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
struct flcn_bl_dmem_desc *desc, u32 bl_sz)
{
struct pmu_gk20a *pmu = &g->pmu;

View File

@@ -64,7 +64,7 @@ static bool gm20b_kind_zbc(u8 k)
k <= gmmu_pte_kind_s8_2s_v());
}
void gm20b_init_kind_attr(void)
static void gm20b_init_kind_attr(void)
{
u16 k;

View File

@@ -30,17 +30,17 @@ struct gating_desc {
u32 disable;
};
/* slcg bus */
const struct gating_desc gm20b_slcg_bus[] = {
static const struct gating_desc gm20b_slcg_bus[] = {
{.addr = 0x00001c04, .prod = 0x00000000, .disable = 0x000003fe},
};
/* slcg ce2 */
const struct gating_desc gm20b_slcg_ce2[] = {
static const struct gating_desc gm20b_slcg_ce2[] = {
{.addr = 0x00106f28, .prod = 0x00000000, .disable = 0x000007fe},
};
/* slcg chiplet */
const struct gating_desc gm20b_slcg_chiplet[] = {
static const struct gating_desc gm20b_slcg_chiplet[] = {
{.addr = 0x0010c07c, .prod = 0x00000000, .disable = 0x00000007},
{.addr = 0x0010e07c, .prod = 0x00000000, .disable = 0x00000007},
{.addr = 0x0010d07c, .prod = 0x00000000, .disable = 0x00000007},
@@ -48,23 +48,23 @@ const struct gating_desc gm20b_slcg_chiplet[] = {
};
/* slcg ctxsw firmware */
const struct gating_desc gm20b_slcg_ctxsw_firmware[] = {
static const struct gating_desc gm20b_slcg_ctxsw_firmware[] = {
{.addr = 0x00005f00, .prod = 0x00020008, .disable = 0x0003fffe},
};
/* slcg fb */
const struct gating_desc gm20b_slcg_fb[] = {
static const struct gating_desc gm20b_slcg_fb[] = {
{.addr = 0x00100d14, .prod = 0xfffffffe, .disable = 0xfffffffe},
{.addr = 0x00100c9c, .prod = 0x000001fe, .disable = 0x000001fe},
};
/* slcg fifo */
const struct gating_desc gm20b_slcg_fifo[] = {
static const struct gating_desc gm20b_slcg_fifo[] = {
{.addr = 0x000026ac, .prod = 0x00000100, .disable = 0x0001fffe},
};
/* slcg gr */
const struct gating_desc gm20b_slcg_gr[] = {
static const struct gating_desc gm20b_slcg_gr[] = {
{.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x03fffffe},
{.addr = 0x0040917c, .prod = 0x00020008, .disable = 0x0003fffe},
{.addr = 0x00409894, .prod = 0x00000000, .disable = 0x0003fffe},
@@ -126,13 +126,13 @@ const struct gating_desc gm20b_slcg_gr[] = {
};
/* slcg ltc */
const struct gating_desc gm20b_slcg_ltc[] = {
static const struct gating_desc gm20b_slcg_ltc[] = {
{.addr = 0x0017e050, .prod = 0x00000000, .disable = 0xfffffffe},
{.addr = 0x0017e35c, .prod = 0x00000000, .disable = 0xfffffffe},
};
/* slcg perf */
const struct gating_desc gm20b_slcg_perf[] = {
static const struct gating_desc gm20b_slcg_perf[] = {
{.addr = 0x001be018, .prod = 0x000001ff, .disable = 0x00000000},
{.addr = 0x001bc018, .prod = 0x000001ff, .disable = 0x00000000},
{.addr = 0x001b8018, .prod = 0x000001ff, .disable = 0x00000000},
@@ -140,12 +140,12 @@ const struct gating_desc gm20b_slcg_perf[] = {
};
/* slcg PriRing */
const struct gating_desc gm20b_slcg_priring[] = {
static const struct gating_desc gm20b_slcg_priring[] = {
{.addr = 0x001200a8, .prod = 0x00000000, .disable = 0x00000001},
};
/* slcg pwr_csb */
const struct gating_desc gm20b_slcg_pwr_csb[] = {
static const struct gating_desc gm20b_slcg_pwr_csb[] = {
{.addr = 0x0000017c, .prod = 0x00020008, .disable = 0x0003fffe},
{.addr = 0x00000e74, .prod = 0x00000000, .disable = 0x0000000f},
{.addr = 0x00000a74, .prod = 0x00000000, .disable = 0x00007ffe},
@@ -153,35 +153,35 @@ const struct gating_desc gm20b_slcg_pwr_csb[] = {
};
/* slcg pmu */
const struct gating_desc gm20b_slcg_pmu[] = {
static const struct gating_desc gm20b_slcg_pmu[] = {
{.addr = 0x0010a17c, .prod = 0x00020008, .disable = 0x0003fffe},
{.addr = 0x0010aa74, .prod = 0x00000000, .disable = 0x00007ffe},
{.addr = 0x0010ae74, .prod = 0x00000000, .disable = 0x0000000f},
};
/* therm gr */
const struct gating_desc gm20b_slcg_therm[] = {
static const struct gating_desc gm20b_slcg_therm[] = {
{.addr = 0x000206b8, .prod = 0x00000000, .disable = 0x0000000f},
};
/* slcg Xbar */
const struct gating_desc gm20b_slcg_xbar[] = {
static const struct gating_desc gm20b_slcg_xbar[] = {
{.addr = 0x0013cbe4, .prod = 0x00000000, .disable = 0x1ffffffe},
{.addr = 0x0013cc04, .prod = 0x00000000, .disable = 0x1ffffffe},
};
/* blcg bus */
const struct gating_desc gm20b_blcg_bus[] = {
static const struct gating_desc gm20b_blcg_bus[] = {
{.addr = 0x00001c00, .prod = 0x00000042, .disable = 0x00000000},
};
/* blcg ctxsw firmware */
const struct gating_desc gm20b_blcg_ctxsw_firmware[] = {
static const struct gating_desc gm20b_blcg_ctxsw_firmware[] = {
{.addr = 0x00022400, .prod = 0x00000000, .disable = 0x00000000},
};
/* blcg fb */
const struct gating_desc gm20b_blcg_fb[] = {
static const struct gating_desc gm20b_blcg_fb[] = {
{.addr = 0x00100d10, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00100d30, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00100d3c, .prod = 0x00000242, .disable = 0x00000000},
@@ -191,12 +191,12 @@ const struct gating_desc gm20b_blcg_fb[] = {
};
/* blcg fifo */
const struct gating_desc gm20b_blcg_fifo[] = {
static const struct gating_desc gm20b_blcg_fifo[] = {
{.addr = 0x000026a4, .prod = 0x0000c242, .disable = 0x00000000},
};
/* blcg gr */
const struct gating_desc gm20b_blcg_gr[] = {
static const struct gating_desc gm20b_blcg_gr[] = {
{.addr = 0x004041f0, .prod = 0x00004046, .disable = 0x00000000},
{.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000},
{.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000},
@@ -261,7 +261,7 @@ const struct gating_desc gm20b_blcg_gr[] = {
};
/* blcg ltc */
const struct gating_desc gm20b_blcg_ltc[] = {
static const struct gating_desc gm20b_blcg_ltc[] = {
{.addr = 0x0017e030, .prod = 0x00000044, .disable = 0x00000000},
{.addr = 0x0017e040, .prod = 0x00000044, .disable = 0x00000000},
{.addr = 0x0017e3e0, .prod = 0x00000044, .disable = 0x00000000},
@@ -269,23 +269,23 @@ const struct gating_desc gm20b_blcg_ltc[] = {
};
/* blcg pwr_csb */
const struct gating_desc gm20b_blcg_pwr_csb[] = {
static const struct gating_desc gm20b_blcg_pwr_csb[] = {
{.addr = 0x00000a70, .prod = 0x00000045, .disable = 0x00000000},
};
/* blcg pmu */
const struct gating_desc gm20b_blcg_pmu[] = {
static const struct gating_desc gm20b_blcg_pmu[] = {
{.addr = 0x0010aa70, .prod = 0x00000045, .disable = 0x00000000},
};
/* blcg Xbar */
const struct gating_desc gm20b_blcg_xbar[] = {
static const struct gating_desc gm20b_blcg_xbar[] = {
{.addr = 0x0013cbe0, .prod = 0x00000042, .disable = 0x00000000},
{.addr = 0x0013cc00, .prod = 0x00000042, .disable = 0x00000000},
};
/* pg gr */
const struct gating_desc gm20b_pg_gr[] = {
static const struct gating_desc gm20b_pg_gr[] = {
};
/* static inline functions */
@@ -440,21 +440,6 @@ void gm20b_slcg_priring_load_gating_prod(struct gk20a *g,
}
}
void gm20b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gm20b_slcg_pwr_csb) / sizeof(struct gating_desc);
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr,
gm20b_slcg_pwr_csb[i].prod);
else
gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr,
gm20b_slcg_pwr_csb[i].disable);
}
}
void gm20b_slcg_pmu_load_gating_prod(struct gk20a *g,
bool prod)
{
@@ -620,21 +605,6 @@ void gm20b_blcg_pmu_load_gating_prod(struct gk20a *g,
}
}
void gm20b_blcg_xbar_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gm20b_blcg_xbar) / sizeof(struct gating_desc);
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gm20b_blcg_xbar[i].addr,
gm20b_blcg_xbar[i].prod);
else
gk20a_writel(g, gm20b_blcg_xbar[i].addr,
gm20b_blcg_xbar[i].disable);
}
}
void gr_gm20b_pg_gr_load_gating_prod(struct gk20a *g,
bool prod)
{

View File

@@ -57,7 +57,7 @@ static int gr_gm20b_get_netlist_name(int index, char *name)
return -1;
}
bool gr_gm20b_is_firmware_defined(void)
static bool gr_gm20b_is_firmware_defined(void)
{
#ifdef GM20B_NETLIST_IMAGE_FW_NAME
return true;

View File

@@ -342,7 +342,7 @@ static void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
}
}
void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
static void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
{
struct gr_gk20a *gr = &g->gr;
u32 gpc_index, ppc_index, stride, val;

View File

@@ -34,7 +34,7 @@
#define FUSE_OPT_PRIV_SEC_DIS_0 0x264
#define PRIV_SECURITY_DISABLE 0x01
struct gpu_ops gm20b_ops = {
static struct gpu_ops gm20b_ops = {
.clock_gating = {
.slcg_bus_load_gating_prod =
gm20b_slcg_bus_load_gating_prod,

View File

@@ -197,7 +197,7 @@ static void gm20b_ltc_init_fs_state(struct gk20a *g)
gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg);
}
void gm20b_ltc_isr(struct gk20a *g)
static void gm20b_ltc_isr(struct gk20a *g)
{
u32 mc_intr, ltc_intr;
int ltc, slice;
@@ -266,7 +266,7 @@ static void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g)
"g_elpg_flush too many retries");
}
u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
static u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
{
u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
if (val == 2) {
@@ -282,7 +282,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
/*
* Performs a full flush of the L2 cache.
*/
void gm20b_flush_ltc(struct gk20a *g)
static void gm20b_flush_ltc(struct gk20a *g)
{
u32 op_pending;
unsigned long now, timeout;

View File

@@ -220,7 +220,7 @@ fail:
return ret;
}
void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
static void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
u64 size, u32 pgsz_idx) {
u64 vaddr_hi;
u32 pde_lo, pde_hi, pde_i;
@@ -253,14 +253,15 @@ void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
return;
}
bool gm20b_mm_mmu_debug_mode_enabled(struct gk20a *g)
static bool gm20b_mm_mmu_debug_mode_enabled(struct gk20a *g)
{
u32 debug_ctrl = gk20a_readl(g, gr_gpcs_pri_mmu_debug_ctrl_r());
return gr_gpcs_pri_mmu_debug_ctrl_debug_v(debug_ctrl) ==
gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v();
}
void gm20b_mm_set_big_page_size(struct gk20a *g, void *inst_ptr, int size)
static void gm20b_mm_set_big_page_size(struct gk20a *g,
void *inst_ptr, int size)
{
u32 val;

View File

@@ -131,7 +131,7 @@ static struct pg_init_sequence_list _pginitseq_gm20b[] = {
{ 0x0010e040, 0x00000000},
};
int gm20b_pmu_setup_elpg(struct gk20a *g)
static int gm20b_pmu_setup_elpg(struct gk20a *g)
{
int ret = 0;
u32 reg_writes;
@@ -153,7 +153,7 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
return ret;
}
void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{
gk20a_dbg_fn("");
@@ -166,7 +166,7 @@ void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
}
int gm20b_pmu_init_acr(struct gk20a *g)
static int gm20b_pmu_init_acr(struct gk20a *g)
{
struct pmu_gk20a *pmu = &g->pmu;
struct pmu_cmd cmd;
@@ -190,7 +190,7 @@ int gm20b_pmu_init_acr(struct gk20a *g)
return 0;
}
void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
static void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{

View File

@@ -434,67 +434,68 @@ static const struct regop_offset_range gm20b_qctl_whitelist_ranges[] = {
static const u32 gm20b_qctl_whitelist_ranges_count =
ARRAY_SIZE(gm20b_qctl_whitelist_ranges);
const struct regop_offset_range *gm20b_get_global_whitelist_ranges(void)
static const struct regop_offset_range *gm20b_get_global_whitelist_ranges(void)
{
return gm20b_global_whitelist_ranges;
}
int gm20b_get_global_whitelist_ranges_count(void)
static int gm20b_get_global_whitelist_ranges_count(void)
{
return gm20b_global_whitelist_ranges_count;
}
const struct regop_offset_range *gm20b_get_context_whitelist_ranges(void)
static const struct regop_offset_range *gm20b_get_context_whitelist_ranges(void)
{
return gm20b_context_whitelist_ranges;
}
int gm20b_get_context_whitelist_ranges_count(void)
static int gm20b_get_context_whitelist_ranges_count(void)
{
return gm20b_context_whitelist_ranges_count;
}
const u32 *gm20b_get_runcontrol_whitelist(void)
static const u32 *gm20b_get_runcontrol_whitelist(void)
{
return gm20b_runcontrol_whitelist;
}
int gm20b_get_runcontrol_whitelist_count(void)
static int gm20b_get_runcontrol_whitelist_count(void)
{
return gm20b_runcontrol_whitelist_count;
}
const struct regop_offset_range *gm20b_get_runcontrol_whitelist_ranges(void)
static const
struct regop_offset_range *gm20b_get_runcontrol_whitelist_ranges(void)
{
return gm20b_runcontrol_whitelist_ranges;
}
int gm20b_get_runcontrol_whitelist_ranges_count(void)
static int gm20b_get_runcontrol_whitelist_ranges_count(void)
{
return gm20b_runcontrol_whitelist_ranges_count;
}
const u32 *gm20b_get_qctl_whitelist(void)
static const u32 *gm20b_get_qctl_whitelist(void)
{
return gm20b_qctl_whitelist;
}
int gm20b_get_qctl_whitelist_count(void)
static int gm20b_get_qctl_whitelist_count(void)
{
return gm20b_qctl_whitelist_count;
}
const struct regop_offset_range *gm20b_get_qctl_whitelist_ranges(void)
static const struct regop_offset_range *gm20b_get_qctl_whitelist_ranges(void)
{
return gm20b_qctl_whitelist_ranges;
}
int gm20b_get_qctl_whitelist_ranges_count(void)
static int gm20b_get_qctl_whitelist_ranges_count(void)
{
return gm20b_qctl_whitelist_ranges_count;
}
int gm20b_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
static int gm20b_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
{
/* Not needed on gm20b */
return 0;