gpu: nvgpu: Fix signed comparison bugs

Fix small problems related to signed versus unsigned comparisons
throughout the driver. Bump up the warning level to prevent such
problems from occuring in future.

Change-Id: I8ff5efb419f664e8a2aedadd6515ae4d18502ae0
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1252068
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2016-11-09 15:53:16 -08:00
committed by mobile promotions
parent 5494e846c7
commit d29afd2c9e
38 changed files with 143 additions and 128 deletions

View File

@@ -263,7 +263,7 @@ static int gk20a_as_ioctl_get_va_regions(
unsigned int write_entries; unsigned int write_entries;
struct nvgpu_as_va_region __user *user_region_ptr; struct nvgpu_as_va_region __user *user_region_ptr;
struct vm_gk20a *vm = as_share->vm; struct vm_gk20a *vm = as_share->vm;
int page_sizes = gmmu_page_size_kernel; unsigned int page_sizes = gmmu_page_size_kernel;
gk20a_dbg_fn(""); gk20a_dbg_fn("");

View File

@@ -47,7 +47,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g);
static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx) static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx)
{ {
int i; unsigned int i;
for (i = 0; i < cde_ctx->num_bufs; i++) { for (i = 0; i < cde_ctx->num_bufs; i++) {
struct mem_desc *mem = cde_ctx->mem + i; struct mem_desc *mem = cde_ctx->mem + i;
@@ -361,7 +361,8 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
struct mem_desc *target_mem; struct mem_desc *target_mem;
u32 *target_mem_ptr; u32 *target_mem_ptr;
u64 new_data; u64 new_data;
int user_id = 0, i, err; int user_id = 0, err;
unsigned int i;
for (i = 0; i < cde_ctx->num_params; i++) { for (i = 0; i < cde_ctx->num_params; i++) {
struct gk20a_cde_hdr_param *param = cde_ctx->params + i; struct gk20a_cde_hdr_param *param = cde_ctx->params + i;
@@ -515,7 +516,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
{ {
struct nvgpu_gpfifo **gpfifo, *gpfifo_elem; struct nvgpu_gpfifo **gpfifo, *gpfifo_elem;
u32 *num_entries; u32 *num_entries;
int i; unsigned int i;
/* check command type */ /* check command type */
if (op == TYPE_BUF_COMMAND_INIT) { if (op == TYPE_BUF_COMMAND_INIT) {
@@ -615,7 +616,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
struct gk20a_cde_hdr_elem *elem; struct gk20a_cde_hdr_elem *elem;
u32 min_size = 0; u32 min_size = 0;
int err = 0; int err = 0;
int i; unsigned int i;
min_size += 2 * sizeof(u32); min_size += 2 * sizeof(u32);
if (img->size < min_size) { if (img->size < min_size) {

View File

@@ -67,8 +67,8 @@ struct gk20a_cde_hdr_replace {
u32 source_buf; u32 source_buf;
s32 shift; s32 shift;
u32 type; u32 type;
s64 target_byte_offset; u64 target_byte_offset;
s64 source_byte_offset; u64 source_byte_offset;
u64 mask; u64 mask;
}; };
@@ -113,7 +113,7 @@ struct gk20a_cde_hdr_param {
s32 shift; s32 shift;
u32 type; u32 type;
s64 data_offset; s64 data_offset;
s64 target_byte_offset; u64 target_byte_offset;
u64 mask; u64 mask;
}; };
@@ -223,11 +223,11 @@ struct gk20a_cde_ctx {
/* buf converter configuration */ /* buf converter configuration */
struct mem_desc mem[MAX_CDE_BUFS]; struct mem_desc mem[MAX_CDE_BUFS];
int num_bufs; unsigned int num_bufs;
/* buffer patching params (where should patching be done) */ /* buffer patching params (where should patching be done) */
struct gk20a_cde_hdr_param params[MAX_CDE_PARAMS]; struct gk20a_cde_hdr_param params[MAX_CDE_PARAMS];
int num_params; unsigned int num_params;
/* storage for user space parameter values */ /* storage for user space parameter values */
u32 user_param_values[MAX_CDE_USER_PARAMS]; u32 user_param_values[MAX_CDE_USER_PARAMS];

View File

@@ -211,10 +211,10 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
kfree(ce_ctx); kfree(ce_ctx);
} }
static inline int gk20a_ce_get_method_size(int request_operation) static inline unsigned int gk20a_ce_get_method_size(int request_operation)
{ {
/* failure size */ /* failure size */
int methodsize = ~0; unsigned int methodsize = UINT_MAX;
if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER) if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER)
methodsize = 10 * 2 * sizeof(u32); methodsize = 10 * 2 * sizeof(u32);
@@ -518,7 +518,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED; ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED;
end: end:
if (ctx_id == ~0) { if (ctx_id == (u32)~0) {
mutex_lock(&ce_app->app_mutex); mutex_lock(&ce_app->app_mutex);
gk20a_ce_delete_gpu_context(ce_ctx); gk20a_ce_delete_gpu_context(ce_ctx);
mutex_unlock(&ce_app->app_mutex); mutex_unlock(&ce_app->app_mutex);

View File

@@ -223,7 +223,7 @@ u32 channel_gk20a_pbdma_acquire_val(struct channel_gk20a *c)
{ {
u32 val, exp, man; u32 val, exp, man;
u64 timeout; u64 timeout;
int val_len; unsigned int val_len;
val = pbdma_acquire_retry_man_2_f() | val = pbdma_acquire_retry_man_2_f() |
pbdma_acquire_retry_exp_2_f(); pbdma_acquire_retry_exp_2_f();
@@ -238,7 +238,7 @@ u32 channel_gk20a_pbdma_acquire_val(struct channel_gk20a *c)
val_len = fls(timeout >> 32) + 32; val_len = fls(timeout >> 32) + 32;
if (val_len == 32) if (val_len == 32)
val_len = fls(timeout); val_len = fls(timeout);
if (val_len > 16 + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */ if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */
exp = pbdma_acquire_timeout_exp_max_v(); exp = pbdma_acquire_timeout_exp_max_v();
man = pbdma_acquire_timeout_man_max_v(); man = pbdma_acquire_timeout_man_max_v();
} else if (val_len > 16) { } else if (val_len > 16) {
@@ -1618,7 +1618,8 @@ bool channel_gk20a_is_prealloc_enabled(struct channel_gk20a *c)
static int channel_gk20a_prealloc_resources(struct channel_gk20a *c, static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
unsigned int num_jobs) unsigned int num_jobs)
{ {
int i, err; unsigned int i;
int err;
size_t size; size_t size;
struct priv_cmd_entry *entries = NULL; struct priv_cmd_entry *entries = NULL;
@@ -3044,7 +3045,7 @@ const struct file_operations gk20a_event_id_ops = {
}; };
static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch, static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
int event_id, u32 event_id,
struct gk20a_event_id_data **event_id_data) struct gk20a_event_id_data **event_id_data)
{ {
struct gk20a_event_id_data *local_event_id_data; struct gk20a_event_id_data *local_event_id_data;
@@ -3069,7 +3070,7 @@ static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
} }
void gk20a_channel_event_id_post_event(struct channel_gk20a *ch, void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
int event_id) u32 event_id)
{ {
struct gk20a_event_id_data *event_id_data; struct gk20a_event_id_data *event_id_data;
int err = 0; int err = 0;

View File

@@ -188,7 +188,7 @@ struct channel_gk20a {
bool has_timedout; bool has_timedout;
u32 timeout_ms_max; u32 timeout_ms_max;
bool timeout_debug_dump; bool timeout_debug_dump;
u32 timeslice_us; unsigned int timeslice_us;
struct dma_buf *error_notifier_ref; struct dma_buf *error_notifier_ref;
struct nvgpu_notification *error_notifier; struct nvgpu_notification *error_notifier;
@@ -309,11 +309,11 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
int timeslice_period, int timeslice_period,
int *__timeslice_timeout, int *__timeslice_scale); int *__timeslice_timeout, int *__timeslice_scale);
int gk20a_channel_set_priority(struct channel_gk20a *ch, u32 priority); int gk20a_channel_set_priority(struct channel_gk20a *ch, u32 priority);
int gk20a_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice); int gk20a_channel_set_timeslice(struct channel_gk20a *ch, unsigned int timeslice);
int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch, int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
u32 level); u32 level);
void gk20a_channel_event_id_post_event(struct channel_gk20a *ch, void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
int event_id); u32 event_id);
void gk20a_channel_setup_ramfc_for_privileged_channel(struct channel_gk20a *c); void gk20a_channel_setup_ramfc_for_privileged_channel(struct channel_gk20a *c);

View File

@@ -192,7 +192,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
bool need_sync_fence) bool need_sync_fence)
{ {
u32 thresh; u32 thresh;
int incr_cmd_size; size_t incr_cmd_size;
int off; int off;
int err; int err;
struct gk20a_channel_syncpt *sp = struct gk20a_channel_syncpt *sp =

View File

@@ -581,7 +581,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
struct nvgpu_gpu_get_cpu_time_correlation_info_args *args) struct nvgpu_gpu_get_cpu_time_correlation_info_args *args)
{ {
int err = 0; int err = 0;
int i = 0; unsigned int i = 0;
u32 gpu_timestamp_hi_new = 0; u32 gpu_timestamp_hi_new = 0;
u32 gpu_timestamp_hi_old = 0; u32 gpu_timestamp_hi_old = 0;

View File

@@ -177,7 +177,7 @@ void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
u32 chid; u32 chid;
int i; unsigned int i;
struct ch_state **ch_state; struct ch_state **ch_state;

View File

@@ -51,12 +51,12 @@ static void gk20a_fb_set_mmu_page_size(struct gk20a *g)
gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl); gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl);
} }
static int gk20a_fb_compression_page_size(struct gk20a *g) static unsigned int gk20a_fb_compression_page_size(struct gk20a *g)
{ {
return SZ_128K; return SZ_128K;
} }
static int gk20a_fb_compressible_page_size(struct gk20a *g) static unsigned int gk20a_fb_compressible_page_size(struct gk20a *g)
{ {
return SZ_64K; return SZ_64K;
} }

View File

@@ -114,14 +114,14 @@ int gk20a_fence_install_fd(struct gk20a_fence *f)
#endif #endif
} }
int gk20a_alloc_fence_pool(struct channel_gk20a *c, int count) int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count)
{ {
int err; int err;
size_t size; size_t size;
struct gk20a_fence *fence_pool = NULL; struct gk20a_fence *fence_pool = NULL;
size = sizeof(struct gk20a_fence); size = sizeof(struct gk20a_fence);
if (count <= ULONG_MAX / size) { if (count <= UINT_MAX / size) {
size = count * size; size = count * size;
fence_pool = vzalloc(size); fence_pool = vzalloc(size);
} }

View File

@@ -67,7 +67,7 @@ int gk20a_fence_from_syncpt(
int gk20a_alloc_fence_pool( int gk20a_alloc_fence_pool(
struct channel_gk20a *c, struct channel_gk20a *c,
int size); unsigned int count);
void gk20a_free_fence_pool( void gk20a_free_fence_pool(
struct channel_gk20a *c); struct channel_gk20a *c);

View File

@@ -252,7 +252,7 @@ bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id)
static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
{ {
u32 fault_id = ~0; u32 fault_id = FIFO_INVAL_ENGINE_ID;
struct fifo_engine_info_gk20a *engine_info; struct fifo_engine_info_gk20a *engine_info;
engine_info = gk20a_fifo_get_engine_info(g, engine_id); engine_info = gk20a_fifo_get_engine_info(g, engine_id);
@@ -312,7 +312,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
u32 i; u32 i;
u32 max_info_entries = top_device_info__size_1_v(); u32 max_info_entries = top_device_info__size_1_v();
u32 engine_enum = ENGINE_INVAL_GK20A; u32 engine_enum = ENGINE_INVAL_GK20A;
u32 engine_id = ~0; u32 engine_id = FIFO_INVAL_ENGINE_ID;
u32 runlist_id = ~0; u32 runlist_id = ~0;
u32 pbdma_id = ~0; u32 pbdma_id = ~0;
u32 intr_id = ~0; u32 intr_id = ~0;
@@ -428,7 +428,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g) u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g)
{ {
u32 eng_intr_mask = 0; u32 eng_intr_mask = 0;
int i; unsigned int i;
u32 active_engine_id = 0; u32 active_engine_id = 0;
u32 engine_enum = ENGINE_INVAL_GK20A; u32 engine_enum = ENGINE_INVAL_GK20A;
@@ -588,7 +588,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
{ {
struct fifo_runlist_info_gk20a *runlist; struct fifo_runlist_info_gk20a *runlist;
struct device *d = dev_from_gk20a(g); struct device *d = dev_from_gk20a(g);
s32 runlist_id = -1; unsigned int runlist_id;
u32 i; u32 i;
size_t runlist_size; size_t runlist_size;
@@ -653,7 +653,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
u32 intr_stall; u32 intr_stall;
u32 mask; u32 mask;
u32 timeout; u32 timeout;
int i; unsigned int i;
struct gk20a_platform *platform = dev_get_drvdata(g->dev); struct gk20a_platform *platform = dev_get_drvdata(g->dev);
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
@@ -777,7 +777,8 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
struct device *d = dev_from_gk20a(g); struct device *d = dev_from_gk20a(g);
int chid, i, err = 0; unsigned int chid, i;
int err = 0;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
@@ -974,7 +975,7 @@ static struct channel_gk20a *
channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr) channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr)
{ {
struct gk20a *g = f->g; struct gk20a *g = f->g;
int ci; unsigned int ci;
if (unlikely(!f->channel)) if (unlikely(!f->channel))
return NULL; return NULL;
for (ci = 0; ci < f->num_channels; ci++) { for (ci = 0; ci < f->num_channels; ci++) {
@@ -1461,7 +1462,7 @@ static bool gk20a_fifo_handle_mmu_fault(
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
"sm debugger attached," "sm debugger attached,"
" deferring channel recovery to channel free"); " deferring channel recovery to channel free");
} else if (engine_id != ~0) { } else if (engine_id != FIFO_INVAL_ENGINE_ID) {
was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex); was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex);
mutex_lock(&g->fifo.gr_reset_mutex); mutex_lock(&g->fifo.gr_reset_mutex);
/* if lock is already taken, a reset is taking place /* if lock is already taken, a reset is taking place
@@ -1565,7 +1566,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g,
} }
mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id);
if (mmu_id != ~0) if (mmu_id != FIFO_INVAL_ENGINE_ID)
gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id),
fifo_trigger_mmu_fault_id_f(mmu_id) | fifo_trigger_mmu_fault_id_f(mmu_id) |
fifo_trigger_mmu_fault_enable_f(1)); fifo_trigger_mmu_fault_enable_f(1));
@@ -1595,7 +1596,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g,
static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg) static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
{ {
int i; unsigned int i;
u32 engines = 0; u32 engines = 0;
for (i = 0; i < g->fifo.num_engines; i++) { for (i = 0; i < g->fifo.num_engines; i++) {
@@ -1712,7 +1713,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
for_each_set_bit(engine_id, &engine_ids, 32) { for_each_set_bit(engine_id, &engine_ids, 32) {
u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id);
if (mmu_id != ~0) if (mmu_id != FIFO_INVAL_ENGINE_ID)
mmu_fault_engines |= BIT(mmu_id); mmu_fault_engines |= BIT(mmu_id);
} }
} else { } else {
@@ -1736,7 +1737,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id); u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id);
engine_ids |= BIT(active_engine_id); engine_ids |= BIT(active_engine_id);
if (mmu_id != ~0) if (mmu_id != FIFO_INVAL_ENGINE_ID)
mmu_fault_engines |= BIT(mmu_id); mmu_fault_engines |= BIT(mmu_id);
} }
} }
@@ -2063,7 +2064,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
&& print_channel_reset_log; && print_channel_reset_log;
if (print_channel_reset_log) { if (print_channel_reset_log) {
int engine_id; unsigned int engine_id;
gk20a_err(dev_from_gk20a(g), gk20a_err(dev_from_gk20a(g),
"channel reset initiated from %s; intr=0x%08x", "channel reset initiated from %s; intr=0x%08x",
__func__, fifo_intr); __func__, fifo_intr);
@@ -2497,7 +2498,7 @@ int gk20a_fifo_enable_engine_activity(struct gk20a *g,
int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
{ {
int i; unsigned int i;
int err = 0, ret = 0; int err = 0, ret = 0;
for (i = 0; i < g->fifo.num_engines; i++) { for (i = 0; i < g->fifo.num_engines; i++) {
@@ -2519,7 +2520,8 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
bool wait_for_idle) bool wait_for_idle)
{ {
u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
u32 pbdma_chid = ~0, engine_chid = ~0, disable; u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID;
u32 engine_chid = FIFO_INVAL_CHANNEL_ID, disable;
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
u32 mutex_ret; u32 mutex_ret;
u32 err = 0; u32 err = 0;
@@ -2551,7 +2553,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v()) chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v())
pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat); pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat);
if (pbdma_chid != ~0) { if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) {
err = g->ops.fifo.preempt_channel(g, pbdma_chid); err = g->ops.fifo.preempt_channel(g, pbdma_chid);
if (err) if (err)
goto clean_up; goto clean_up;
@@ -2567,7 +2569,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v()) ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v())
engine_chid = fifo_engine_status_next_id_v(eng_stat); engine_chid = fifo_engine_status_next_id_v(eng_stat);
if (engine_chid != ~0 && engine_chid != pbdma_chid) { if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) {
err = g->ops.fifo.preempt_channel(g, engine_chid); err = g->ops.fifo.preempt_channel(g, engine_chid);
if (err) if (err)
goto clean_up; goto clean_up;
@@ -2591,7 +2593,7 @@ clean_up:
int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
bool wait_for_idle) bool wait_for_idle)
{ {
int i; unsigned int i;
int err = 0, ret = 0; int err = 0, ret = 0;
u32 active_engine_id; u32 active_engine_id;
@@ -2609,7 +2611,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
} }
if (err) { if (err) {
while (--i >= 0) { while (i-- != 0) {
active_engine_id = g->fifo.active_engines_list[i]; active_engine_id = g->fifo.active_engines_list[i];
err = gk20a_fifo_enable_engine_activity(g, err = gk20a_fifo_enable_engine_activity(g,
&g->fifo.engine_info[active_engine_id]); &g->fifo.engine_info[active_engine_id]);
@@ -2626,7 +2628,7 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
u32 engines = 0; u32 engines = 0;
int i; unsigned int i;
for (i = 0; i < f->num_engines; i++) { for (i = 0; i < f->num_engines; i++) {
u32 active_engine_id = g->fifo.active_engines_list[i]; u32 active_engine_id = g->fifo.active_engines_list[i];
@@ -2852,7 +2854,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
u32 hw_chid, bool add, u32 hw_chid, bool add,
bool wait_for_finish) bool wait_for_finish)
{ {
u32 ret = 0; int ret = 0;
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
struct fifo_runlist_info_gk20a *runlist = NULL; struct fifo_runlist_info_gk20a *runlist = NULL;
u32 *runlist_entry_base = NULL; u32 *runlist_entry_base = NULL;
@@ -2867,7 +2869,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
/* valid channel, add/remove it from active list. /* valid channel, add/remove it from active list.
Otherwise, keep active list untouched for suspend/resume. */ Otherwise, keep active list untouched for suspend/resume. */
if (hw_chid != ~0) { if (hw_chid != FIFO_INVAL_CHANNEL_ID) {
ch = &f->channel[hw_chid]; ch = &f->channel[hw_chid];
if (gk20a_is_channel_marked_as_tsg(ch)) if (gk20a_is_channel_marked_as_tsg(ch))
tsg = &f->tsg[ch->tsgid]; tsg = &f->tsg[ch->tsgid];
@@ -2909,7 +2911,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
goto clean_up; goto clean_up;
} }
if (hw_chid != ~0 || /* add/remove a valid channel */ if (hw_chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */
add /* resume to add all channels back */) { add /* resume to add all channels back */) {
u32 max_entries = f->num_runlist_entries; u32 max_entries = f->num_runlist_entries;
u32 *runlist_end; u32 *runlist_end;
@@ -3055,7 +3057,7 @@ bool gk20a_fifo_mmu_fault_pending(struct gk20a *g)
bool gk20a_fifo_is_engine_busy(struct gk20a *g) bool gk20a_fifo_is_engine_busy(struct gk20a *g)
{ {
int i; unsigned int i;
for (i = 0; i < fifo_engine_status__size_1_v(); i++) { for (i = 0; i < fifo_engine_status__size_1_v(); i++) {
u32 status = gk20a_readl(g, fifo_engine_status_r(i)); u32 status = gk20a_readl(g, fifo_engine_status_r(i));

View File

@@ -26,7 +26,9 @@
#define MAX_RUNLIST_BUFFERS 2 #define MAX_RUNLIST_BUFFERS 2
#define FIFO_INVAL_ENGINE_ID ~0 #define FIFO_INVAL_ENGINE_ID ((u32)~0)
#define FIFO_INVAL_CHANNEL_ID ((u32)~0)
#define FIFO_INVAL_TSG_ID ((u32)~0)
/* generally corresponds to the "pbdma" engine */ /* generally corresponds to the "pbdma" engine */
@@ -96,11 +98,11 @@ struct fifo_engine_info_gk20a {
struct fifo_gk20a { struct fifo_gk20a {
struct gk20a *g; struct gk20a *g;
int num_channels; unsigned int num_channels;
int runlist_entry_size; unsigned int runlist_entry_size;
int num_runlist_entries; unsigned int num_runlist_entries;
int num_pbdma; unsigned int num_pbdma;
u32 *pbdma_map; u32 *pbdma_map;
struct fifo_engine_info_gk20a *engine_info; struct fifo_engine_info_gk20a *engine_info;
@@ -114,7 +116,7 @@ struct fifo_gk20a {
struct mem_desc userd; struct mem_desc userd;
u32 userd_entry_size; u32 userd_entry_size;
int used_channels; unsigned int used_channels;
struct channel_gk20a *channel; struct channel_gk20a *channel;
/* zero-kref'd channels here */ /* zero-kref'd channels here */
struct list_head free_chs; struct list_head free_chs;

View File

@@ -322,8 +322,8 @@ struct gpu_ops {
void (*init_kind_attr)(struct gk20a *g); void (*init_kind_attr)(struct gk20a *g);
void (*set_mmu_page_size)(struct gk20a *g); void (*set_mmu_page_size)(struct gk20a *g);
bool (*set_use_full_comp_tag_line)(struct gk20a *g); bool (*set_use_full_comp_tag_line)(struct gk20a *g);
int (*compression_page_size)(struct gk20a *g); unsigned int (*compression_page_size)(struct gk20a *g);
int (*compressible_page_size)(struct gk20a *g); unsigned int (*compressible_page_size)(struct gk20a *g);
void (*dump_vpr_wpr_info)(struct gk20a *g); void (*dump_vpr_wpr_info)(struct gk20a *g);
} fb; } fb;
struct { struct {

View File

@@ -842,7 +842,7 @@ static const struct gk20a_allocator_ops page_ops = {
static int gk20a_page_alloc_init_slabs(struct gk20a_page_allocator *a) static int gk20a_page_alloc_init_slabs(struct gk20a_page_allocator *a)
{ {
size_t nr_slabs = ilog2(a->page_size >> 12); size_t nr_slabs = ilog2(a->page_size >> 12);
int i; unsigned int i;
a->slabs = kcalloc(nr_slabs, a->slabs = kcalloc(nr_slabs,
sizeof(struct page_alloc_slab), sizeof(struct page_alloc_slab),

View File

@@ -124,7 +124,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
void gk20a_fecs_dump_falcon_stats(struct gk20a *g) void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
{ {
int i; unsigned int i;
gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d", gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d",
gk20a_readl(g, gr_fecs_os_r())); gk20a_readl(g, gr_fecs_os_r()));
@@ -1395,9 +1395,9 @@ int gr_gk20a_init_fs_state(struct gk20a *g)
fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0);
if (g->tpc_fs_mask_user && if (g->tpc_fs_mask_user &&
fuse_tpc_mask == (0x1 << gr->max_tpc_count) - 1) { fuse_tpc_mask == (0x1U << gr->max_tpc_count) - 1U) {
u32 val = g->tpc_fs_mask_user; u32 val = g->tpc_fs_mask_user;
val &= (0x1 << gr->max_tpc_count) - 1; val &= (0x1U << gr->max_tpc_count) - 1U;
gk20a_writel(g, gr_cwd_fs_r(), gk20a_writel(g, gr_cwd_fs_r(),
gr_cwd_fs_num_gpcs_f(gr->gpc_count) | gr_cwd_fs_num_gpcs_f(gr->gpc_count) |
gr_cwd_fs_num_tpcs_f(hweight32(val))); gr_cwd_fs_num_tpcs_f(hweight32(val)));
@@ -1444,7 +1444,7 @@ static u32 gk20a_init_sw_bundle(struct gk20a *g)
struct av_list_gk20a *sw_bundle_init = &g->gr.ctx_vars.sw_bundle_init; struct av_list_gk20a *sw_bundle_init = &g->gr.ctx_vars.sw_bundle_init;
u32 last_bundle_data = 0; u32 last_bundle_data = 0;
u32 err = 0; u32 err = 0;
int i; unsigned int i;
unsigned long end_jiffies = jiffies + unsigned long end_jiffies = jiffies +
msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
@@ -2110,7 +2110,7 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
u32 *bootimage, u32 *bootimage,
u32 *code, u32 *data) u32 *code, u32 *data)
{ {
int i; unsigned int i;
gk20a_mem_wr_n(g, dst, segments->boot.offset, bootimage, gk20a_mem_wr_n(g, dst, segments->boot.offset, bootimage,
segments->boot.size); segments->boot.size);
@@ -4048,7 +4048,8 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
{ {
int i, ret; unsigned int i;
int ret;
for (i = 0; i < gr->max_used_color_index; i++) { for (i = 0; i < gr->max_used_color_index; i++) {
struct zbc_color_table *c_tbl = &gr->zbc_col_tbl[i]; struct zbc_color_table *c_tbl = &gr->zbc_col_tbl[i];
@@ -4898,7 +4899,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size, DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size,
PAGE_SIZE); PAGE_SIZE);
u32 *whitelist = NULL; u32 *whitelist = NULL;
int num_entries = 0; unsigned int num_entries = 0;
if (gk20a_mem_begin(g, mem)) { if (gk20a_mem_begin(g, mem)) {
gk20a_err(dev_from_gk20a(g), gk20a_err(dev_from_gk20a(g),
@@ -6996,7 +6997,7 @@ static void gr_gk20a_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset)
gk20a_writel(g, gpc_tpc_addr, reg); gk20a_writel(g, gpc_tpc_addr, reg);
} }
#define ILLEGAL_ID (~0) #define ILLEGAL_ID ((u32)~0)
static inline bool check_main_image_header_magic(u8 *context) static inline bool check_main_image_header_magic(u8 *context)
{ {
@@ -8762,7 +8763,8 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable) struct channel_gk20a *ch, u64 sms, bool enable)
{ {
struct nvgpu_dbg_gpu_reg_op *ops; struct nvgpu_dbg_gpu_reg_op *ops;
int i = 0, sm_id, err; unsigned int i = 0, sm_id;
int err;
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);

View File

@@ -334,8 +334,8 @@ struct gr_gk20a {
s32 max_default_color_index; s32 max_default_color_index;
s32 max_default_depth_index; s32 max_default_depth_index;
s32 max_used_color_index; u32 max_used_color_index;
s32 max_used_depth_index; u32 max_used_depth_index;
#define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */ #define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */
struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE]; struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE];

View File

@@ -789,10 +789,10 @@ static void gk20a_remove_mm_ce_support(struct mm_gk20a *mm)
struct gk20a *g = gk20a_from_mm(mm); struct gk20a *g = gk20a_from_mm(mm);
struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct gk20a_platform *platform = gk20a_get_platform(g->dev);
if (mm->vidmem.ce_ctx_id != ~0) if (mm->vidmem.ce_ctx_id != (u32)~0)
gk20a_ce_delete_context(g->dev, mm->vidmem.ce_ctx_id); gk20a_ce_delete_context(g->dev, mm->vidmem.ce_ctx_id);
mm->vidmem.ce_ctx_id = ~0; mm->vidmem.ce_ctx_id = (u32)~0;
if (platform->has_ce) if (platform->has_ce)
gk20a_vm_remove_support_nofree(&mm->ce.vm); gk20a_vm_remove_support_nofree(&mm->ce.vm);
@@ -836,7 +836,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
u64 region2_base = 0; u64 region2_base = 0;
int err = 0; int err = 0;
if (mm->vidmem.ce_ctx_id == ~0) if (mm->vidmem.ce_ctx_id == (u32)~0)
return -EINVAL; return -EINVAL;
err = gk20a_ce_execute_ops(g->dev, err = gk20a_ce_execute_ops(g->dev,
@@ -989,7 +989,7 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
gk20a_init_pramin(mm); gk20a_init_pramin(mm);
mm->vidmem.ce_ctx_id = ~0; mm->vidmem.ce_ctx_id = (u32)~0;
err = gk20a_init_vidmem(mm); err = gk20a_init_vidmem(mm);
if (err) if (err)
@@ -1119,7 +1119,7 @@ int gk20a_init_mm_support(struct gk20a *g)
void gk20a_init_mm_ce_context(struct gk20a *g) void gk20a_init_mm_ce_context(struct gk20a *g)
{ {
#if defined(CONFIG_GK20A_VIDMEM) #if defined(CONFIG_GK20A_VIDMEM)
if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == ~0)) { if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == (u32)~0)) {
g->mm.vidmem.ce_ctx_id = g->mm.vidmem.ce_ctx_id =
gk20a_ce_create_context_with_cb(g->dev, gk20a_ce_create_context_with_cb(g->dev,
gk20a_fifo_get_fast_ce_runlist_id(g), gk20a_fifo_get_fast_ce_runlist_id(g),
@@ -1128,7 +1128,7 @@ void gk20a_init_mm_ce_context(struct gk20a *g)
-1, -1,
NULL); NULL);
if (g->mm.vidmem.ce_ctx_id == ~0) if (g->mm.vidmem.ce_ctx_id == (u32)~0)
gk20a_err(g->dev, gk20a_err(g->dev,
"Failed to allocate CE context for vidmem page clearing support"); "Failed to allocate CE context for vidmem page clearing support");
} }
@@ -3021,7 +3021,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
struct page_alloc_chunk *chunk = NULL; struct page_alloc_chunk *chunk = NULL;
int err = 0; int err = 0;
if (g->mm.vidmem.ce_ctx_id == ~0) if (g->mm.vidmem.ce_ctx_id == (u32)~0)
return -EINVAL; return -EINVAL;
alloc = get_vidmem_page_alloc(mem->sgt->sgl); alloc = get_vidmem_page_alloc(mem->sgt->sgl);

View File

@@ -56,7 +56,7 @@ struct gk20a_platform {
bool has_syncpoints; bool has_syncpoints;
/* channel limit after which to start aggressive sync destroy */ /* channel limit after which to start aggressive sync destroy */
int aggressive_sync_destroy_thresh; unsigned int aggressive_sync_destroy_thresh;
/* flag to set sync destroy aggressiveness */ /* flag to set sync destroy aggressiveness */
bool aggressive_sync_destroy; bool aggressive_sync_destroy;

View File

@@ -58,8 +58,8 @@
extern struct device tegra_vpr_dev; extern struct device tegra_vpr_dev;
struct gk20a_emc_params { struct gk20a_emc_params {
long bw_ratio; unsigned long bw_ratio;
long freq_last_set; unsigned long freq_last_set;
}; };
static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
@@ -217,7 +217,7 @@ static void gk20a_tegra_postscale(struct device *dev,
struct clk *emc_clk = platform->clk[2]; struct clk *emc_clk = platform->clk[2];
enum tegra_chipid chip_id = tegra_get_chip_id(); enum tegra_chipid chip_id = tegra_get_chip_id();
unsigned long emc_target; unsigned long emc_target;
long emc_freq_lower, emc_freq_upper, emc_freq_rounded; unsigned long emc_freq_lower, emc_freq_upper, emc_freq_rounded;
emc_target = gk20a_tegra_get_emc_rate(g, emc_params); emc_target = gk20a_tegra_get_emc_rate(g, emc_params);
@@ -234,8 +234,10 @@ static void gk20a_tegra_postscale(struct device *dev,
break; break;
case TEGRA_CHIPID_TEGRA21: case TEGRA_CHIPID_TEGRA21:
emc_freq_lower = tegra_emc_round_rate_updown(emc_target, false); emc_freq_lower = (unsigned long)
emc_freq_upper = tegra_emc_round_rate_updown(emc_target, true); tegra_emc_round_rate_updown(emc_target, false);
emc_freq_upper = (unsigned long)
tegra_emc_round_rate_updown(emc_target, true);
/* round to the nearest frequency step */ /* round to the nearest frequency step */
if (emc_target < (emc_freq_lower + emc_freq_upper) / 2) if (emc_target < (emc_freq_lower + emc_freq_upper) / 2)
@@ -645,7 +647,7 @@ static int gk20a_tegra_get_clocks(struct device *dev)
{ {
struct gk20a_platform *platform = dev_get_drvdata(dev); struct gk20a_platform *platform = dev_get_drvdata(dev);
char devname[16]; char devname[16];
int i; unsigned int i;
int ret = 0; int ret = 0;
BUG_ON(GK20A_CLKS_MAX < ARRAY_SIZE(tegra_gk20a_clocks)); BUG_ON(GK20A_CLKS_MAX < ARRAY_SIZE(tegra_gk20a_clocks));

View File

@@ -2709,7 +2709,7 @@ static bool pmu_queue_has_room(struct pmu_gk20a *pmu,
{ {
u32 head, tail; u32 head, tail;
bool rewind = false; bool rewind = false;
int free; unsigned int free;
size = ALIGN(size, QUEUE_ALIGNMENT); size = ALIGN(size, QUEUE_ALIGNMENT);
@@ -2955,7 +2955,8 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = &mm->pmu.vm; struct vm_gk20a *vm = &mm->pmu.vm;
struct device *d = dev_from_gk20a(g); struct device *d = dev_from_gk20a(g);
int i, err = 0; unsigned int i;
int err = 0;
u8 *ptr; u8 *ptr;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
@@ -4128,7 +4129,7 @@ static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu)
void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) void pmu_dump_falcon_stats(struct pmu_gk20a *pmu)
{ {
struct gk20a *g = gk20a_from_pmu(pmu); struct gk20a *g = gk20a_from_pmu(pmu);
int i; unsigned int i;
gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d", gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d",
gk20a_readl(g, pwr_falcon_os_r())); gk20a_readl(g, pwr_falcon_os_r()));

View File

@@ -35,7 +35,7 @@ static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
if (key < prange->base) if (key < prange->base)
return -1; return -1;
else if (prange->base <= key && key < (prange->base + else if (prange->base <= key && key < (prange->base +
(prange->count * 4))) (prange->count * 4U)))
return 0; return 0;
return 1; return 1;
} }
@@ -379,7 +379,8 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_reg_op *ops, struct nvgpu_dbg_gpu_reg_op *ops,
u64 num_ops) u64 num_ops)
{ {
int err = 0, i; int err = 0;
unsigned int i;
struct channel_gk20a *ch = NULL; struct channel_gk20a *ch = NULL;
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
/*struct gr_gk20a *gr = &g->gr;*/ /*struct gr_gk20a *gr = &g->gr;*/
@@ -799,7 +800,8 @@ static int gk20a_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
* it was already swapped out in/out once or not, etc. * it was already swapped out in/out once or not, etc.
*/ */
struct nvgpu_dbg_gpu_reg_op ops[4]; struct nvgpu_dbg_gpu_reg_op ops[4];
int i; unsigned int i;
for (i = 0; i < ARRAY_SIZE(ops); i++) { for (i = 0; i < ARRAY_SIZE(ops); i++) {
ops[i].op = REGOP(WRITE_32); ops[i].op = REGOP(WRITE_32);
ops[i].type = REGOP(TYPE_GR_CTX); ops[i].type = REGOP(TYPE_GR_CTX);

View File

@@ -140,7 +140,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
struct fifo_gk20a *f = &sched->g->fifo; struct fifo_gk20a *f = &sched->g->fifo;
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
u64 *bitmap; u64 *bitmap;
int tsgid; unsigned int tsgid;
/* pid at user level corresponds to kernel tgid */ /* pid at user level corresponds to kernel tgid */
pid_t tgid = (pid_t)arg->pid; pid_t tgid = (pid_t)arg->pid;
int err = 0; int err = 0;
@@ -492,7 +492,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
struct gk20a *g = sched->g; struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
int tsgid; unsigned int tsgid;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched);

View File

@@ -222,7 +222,7 @@ static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s)
* the value of the semaphore then the semaphore has been signaled * the value of the semaphore then the semaphore has been signaled
* (a.k.a. released). * (a.k.a. released).
*/ */
return sema_val >= atomic_read(&s->value); return (int)sema_val >= atomic_read(&s->value);
} }
static inline bool gk20a_semaphore_is_acquired(struct gk20a_semaphore *s) static inline bool gk20a_semaphore_is_acquired(struct gk20a_semaphore *s)
@@ -240,12 +240,12 @@ static inline u32 gk20a_semaphore_read(struct gk20a_semaphore *s)
static inline u32 gk20a_semaphore_get_value(struct gk20a_semaphore *s) static inline u32 gk20a_semaphore_get_value(struct gk20a_semaphore *s)
{ {
return atomic_read(&s->value); return (u32)atomic_read(&s->value);
} }
static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s) static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s)
{ {
return atomic_read(&s->hw_sema->next_value); return (u32)atomic_read(&s->hw_sema->next_value);
} }
/* /*

View File

@@ -445,7 +445,7 @@ static int gk20a_sync_fill_driver_data(struct sync_pt *sync_pt,
{ {
struct gk20a_sync_pt_info info; struct gk20a_sync_pt_info info;
if (size < sizeof(info)) if (size < (int)sizeof(info))
return -ENOMEM; return -ENOMEM;
info.hw_op_ns = ktime_to_ns(gk20a_sync_pt_duration(sync_pt)); info.hw_op_ns = ktime_to_ns(gk20a_sync_pt_duration(sync_pt));

View File

@@ -65,7 +65,7 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
struct fifo_runlist_info_gk20a *runlist; struct fifo_runlist_info_gk20a *runlist;
int i; unsigned int i;
for (i = 0; i < f->max_runlists; ++i) { for (i = 0; i < f->max_runlists; ++i) {
runlist = &f->runlist_info[i]; runlist = &f->runlist_info[i];
@@ -112,7 +112,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
ch->tsgid = tsg->tsgid; ch->tsgid = tsg->tsgid;
/* all the channel part of TSG should need to be same runlist_id */ /* all the channel part of TSG should need to be same runlist_id */
if (tsg->runlist_id == ~0) if (tsg->runlist_id == FIFO_INVAL_TSG_ID)
tsg->runlist_id = ch->runlist_id; tsg->runlist_id = ch->runlist_id;
else if (tsg->runlist_id != ch->runlist_id) { else if (tsg->runlist_id != ch->runlist_id) {
gk20a_err(dev_from_gk20a(tsg->g), gk20a_err(dev_from_gk20a(tsg->g),
@@ -154,7 +154,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
{ {
struct tsg_gk20a *tsg = NULL; struct tsg_gk20a *tsg = NULL;
if (tsgid < 0 || tsgid >= g->fifo.num_channels) if (tsgid >= g->fifo.num_channels)
return -EINVAL; return -EINVAL;
tsg = &g->fifo.tsg[tsgid]; tsg = &g->fifo.tsg[tsgid];
@@ -198,7 +198,7 @@ static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
} }
static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg, static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
int event_id, unsigned int event_id,
struct gk20a_event_id_data **event_id_data) struct gk20a_event_id_data **event_id_data)
{ {
struct gk20a_event_id_data *local_event_id_data; struct gk20a_event_id_data *local_event_id_data;
@@ -383,7 +383,7 @@ static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f) static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f)
{ {
struct tsg_gk20a *tsg = NULL; struct tsg_gk20a *tsg = NULL;
int tsgid; unsigned int tsgid;
mutex_lock(&f->tsg_inuse_mutex); mutex_lock(&f->tsg_inuse_mutex);
for (tsgid = 0; tsgid < f->num_channels; tsgid++) { for (tsgid = 0; tsgid < f->num_channels; tsgid++) {

View File

@@ -43,9 +43,9 @@ struct tsg_gk20a {
int num_active_channels; int num_active_channels;
struct mutex ch_list_lock; struct mutex ch_list_lock;
int timeslice_us; unsigned int timeslice_us;
int timeslice_timeout; unsigned int timeslice_timeout;
int timeslice_scale; unsigned int timeslice_scale;
struct gr_ctx_desc *tsg_gr_ctx; struct gr_ctx_desc *tsg_gr_ctx;

View File

@@ -212,7 +212,8 @@ static int gm206_bootstrap_hs_flcn(struct gk20a *g)
{ {
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = &mm->pmu.vm; struct vm_gk20a *vm = &mm->pmu.vm;
int i, err = 0; unsigned int i;
int err = 0;
u64 *acr_dmem; u64 *acr_dmem;
u32 img_size_in_bytes = 0; u32 img_size_in_bytes = 0;
u32 status; u32 status;

View File

@@ -830,7 +830,7 @@ static int gm206_bios_preos(struct gk20a *g)
static int gm206_bios_init(struct gk20a *g) static int gm206_bios_init(struct gk20a *g)
{ {
int i; unsigned int i;
struct gk20a_platform *platform = dev_get_drvdata(g->dev); struct gk20a_platform *platform = dev_get_drvdata(g->dev);
struct dentry *d; struct dentry *d;
const struct firmware *bios_fw; const struct firmware *bios_fw;

View File

@@ -1185,7 +1185,7 @@ int acr_ucode_patch_sig(struct gk20a *g,
unsigned int *p_patch_loc, unsigned int *p_patch_loc,
unsigned int *p_patch_ind) unsigned int *p_patch_ind)
{ {
int i, *p_sig; unsigned int i, *p_sig;
gm20b_dbg_pmu(""); gm20b_dbg_pmu("");
if (!pmu_is_debug_mode_en(g)) { if (!pmu_is_debug_mode_en(g)) {

View File

@@ -101,12 +101,12 @@ static bool gm20b_fb_set_use_full_comp_tag_line(struct gk20a *g)
return true; return true;
} }
static int gm20b_fb_compression_page_size(struct gk20a *g) static unsigned int gm20b_fb_compression_page_size(struct gk20a *g)
{ {
return SZ_128K; return SZ_128K;
} }
static int gm20b_fb_compressible_page_size(struct gk20a *g) static unsigned int gm20b_fb_compressible_page_size(struct gk20a *g)
{ {
return SZ_64K; return SZ_64K;
} }

View File

@@ -80,7 +80,7 @@ static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
} else { } else {
u32 mmu_id = gm20b_engine_id_to_mmu_id(g, u32 mmu_id = gm20b_engine_id_to_mmu_id(g,
engine_id); engine_id);
if (mmu_id != ~0) if (mmu_id != (u32)~0)
gk20a_writel(g, fifo_trigger_mmu_fault_r(mmu_id), gk20a_writel(g, fifo_trigger_mmu_fault_r(mmu_id),
fifo_trigger_mmu_fault_enable_f(1)); fifo_trigger_mmu_fault_enable_f(1));
} }

View File

@@ -548,11 +548,11 @@ static void gr_gm20b_load_tpc_mask(struct gk20a *g)
fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0);
if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask && if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask &&
fuse_tpc_mask == (0x1 << g->gr.max_tpc_count) - 1) { fuse_tpc_mask == (0x1U << g->gr.max_tpc_count) - 1U) {
u32 val = g->tpc_fs_mask_user; u32 val = g->tpc_fs_mask_user;
val &= (0x1 << g->gr.max_tpc_count) - 1; val &= (0x1U << g->gr.max_tpc_count) - 1U;
/* skip tpc to disable the other tpc cause channel timeout */ /* skip tpc to disable the other tpc cause channel timeout */
val = (0x1 << hweight32(val)) - 1; val = (0x1U << hweight32(val)) - 1U;
gk20a_writel(g, gr_fe_tpc_fs_r(), val); gk20a_writel(g, gr_fe_tpc_fs_r(), val);
} else { } else {
gk20a_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask); gk20a_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask);

View File

@@ -198,7 +198,7 @@ void gm20b_ltc_init_fs_state(struct gk20a *g)
void gm20b_ltc_isr(struct gk20a *g) void gm20b_ltc_isr(struct gk20a *g)
{ {
u32 mc_intr, ltc_intr; u32 mc_intr, ltc_intr;
int ltc, slice; unsigned int ltc, slice;
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
@@ -227,8 +227,8 @@ void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g)
u32 data; u32 data;
bool done[g->ltc_count]; bool done[g->ltc_count];
s32 retry = 100; s32 retry = 100;
int i; unsigned int i;
int num_done = 0; unsigned int num_done = 0;
u32 ltc_d = ltc_ltc1_ltss_g_elpg_r() - ltc_ltc0_ltss_g_elpg_r(); u32 ltc_d = ltc_ltc1_ltss_g_elpg_r() - ltc_ltc0_ltss_g_elpg_r();
gk20a_dbg_fn(""); gk20a_dbg_fn("");
@@ -289,7 +289,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
void gm20b_flush_ltc(struct gk20a *g) void gm20b_flush_ltc(struct gk20a *g)
{ {
unsigned long timeout; unsigned long timeout;
int ltc; unsigned int ltc;
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
#define __timeout_init() \ #define __timeout_init() \

View File

@@ -30,9 +30,9 @@ static int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops; struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops;
void *oob; void *oob;
size_t oob_size; size_t oob_size, ops_size;
void *handle = NULL; void *handle = NULL;
int ops_size, err = 0; int err = 0;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op)); BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op));

View File

@@ -184,7 +184,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
{ {
struct fifo_runlist_info_gk20a *runlist; struct fifo_runlist_info_gk20a *runlist;
struct device *d = dev_from_gk20a(g); struct device *d = dev_from_gk20a(g);
s32 runlist_id = -1; unsigned int runlist_id = -1;
u32 i; u32 i;
u64 runlist_size; u64 runlist_size;
@@ -238,7 +238,8 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
struct device *d = dev_from_gk20a(g); struct device *d = dev_from_gk20a(g);
struct vgpu_priv_data *priv = vgpu_get_priv_data(g); struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
int chid, err = 0; unsigned int chid;
int err = 0;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
@@ -486,7 +487,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
/* valid channel, add/remove it from active list. /* valid channel, add/remove it from active list.
Otherwise, keep active list untouched for suspend/resume. */ Otherwise, keep active list untouched for suspend/resume. */
if (hw_chid != ~0) { if (hw_chid != (u32)~0) {
if (add) { if (add) {
if (test_and_set_bit(hw_chid, if (test_and_set_bit(hw_chid,
runlist->active_channels) == 1) runlist->active_channels) == 1)
@@ -498,7 +499,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
} }
} }
if (hw_chid != ~0 || /* add/remove a valid channel */ if (hw_chid != (u32)~0 || /* add/remove a valid channel */
add /* resume to add all channels back */) { add /* resume to add all channels back */) {
u32 chid; u32 chid;

View File

@@ -928,7 +928,7 @@ struct nvgpu_dbg_gpu_suspend_resume_contexts_args {
*/ */
#define NVGPU_IOCTL_MAGIC 'H' #define NVGPU_IOCTL_MAGIC 'H'
#define NVGPU_NO_TIMEOUT (-1) #define NVGPU_NO_TIMEOUT ((u32)~0)
#define NVGPU_PRIORITY_LOW 50 #define NVGPU_PRIORITY_LOW 50
#define NVGPU_PRIORITY_MEDIUM 100 #define NVGPU_PRIORITY_MEDIUM 100
#define NVGPU_PRIORITY_HIGH 150 #define NVGPU_PRIORITY_HIGH 150