From ee6ef2a71919cddbd0a3f4ff066e178096f8017a Mon Sep 17 00:00:00 2001 From: Nicolas Benech Date: Wed, 13 Feb 2019 18:22:45 -0500 Subject: [PATCH] gpu: nvgpu: resolve MISRA 17.7 for WARN_ON MISRA Rule-17.7 requires the return value of all functions to be used. Fix is either to use the return value or change the function to return void. This patch ensures that WARN and WARN_ON always return void; and introduces a new nvgpu_do_assert construct to trigger the equivalent of WARN_ON(true) so that stack can be dumped (depends on OS support) JIRA NVGPU-677 Change-Id: Ie2312c5588ceb5b1db825d15a096149b63b69af4 Signed-off-by: Nicolas Benech Reviewed-on: https://git-master.nvidia.com/r/2018706 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/clk_arb/clk_arb.c | 4 ++-- drivers/gpu/nvgpu/common/fb/fb_gv11b.c | 24 ++++++++++--------- drivers/gpu/nvgpu/common/fifo/channel.c | 4 ++-- .../common/mm/allocators/bitmap_allocator.c | 12 ++++------ .../common/mm/allocators/page_allocator.c | 6 ++--- drivers/gpu/nvgpu/common/mm/gmmu/page_table.c | 3 ++- drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c | 11 +++++---- drivers/gpu/nvgpu/common/mm/nvgpu_mem.c | 12 +++++----- drivers/gpu/nvgpu/common/mm/vm.c | 18 +++++++++----- .../common/sync/channel_sync_semaphore.c | 3 ++- .../nvgpu/common/vgpu/gp10b/vgpu_mm_gp10b.c | 8 ++++++- drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c | 5 ++-- drivers/gpu/nvgpu/common/vgpu/vgpu.c | 4 +++- drivers/gpu/nvgpu/include/nvgpu/bug.h | 17 ++++++++++++- drivers/gpu/nvgpu/include/nvgpu/posix/bug.h | 4 ++-- drivers/gpu/nvgpu/os/linux/dmabuf.c | 22 ++++++++++------- drivers/gpu/nvgpu/os/linux/kmem.c | 6 +++-- .../gpu/nvgpu/os/linux/sync_sema_android.c | 13 ++++++---- 18 files changed, 110 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c b/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c index d693b227c..287a1da88 100644 --- a/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c +++ b/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c @@ -534,8 +534,8 @@ void nvgpu_clk_arb_worker_enqueue(struct gk20a *g, /* * Warn if worker thread cannot run */ - if (WARN_ON(__nvgpu_clk_arb_worker_start(g) != 0)) { - nvgpu_warn(g, "clk arb worker cannot run!"); + if (__nvgpu_clk_arb_worker_start(g) != 0) { + nvgpu_do_assert_print(g, "clk arb worker cannot run!"); return; } diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c index f22d40f52..c3d241a0a 100644 --- a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c +++ b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c @@ -709,16 +709,16 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status) static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault) { - if (WARN_ON(mmfault->fault_type >= - ARRAY_SIZE(fault_type_descs_gv11b))) { + if (mmfault->fault_type >= ARRAY_SIZE(fault_type_descs_gv11b)) { + nvgpu_do_assert(); mmfault->fault_type_desc = invalid_str; } else { mmfault->fault_type_desc = fault_type_descs_gv11b[mmfault->fault_type]; } - if (WARN_ON(mmfault->client_type >= - ARRAY_SIZE(fault_client_type_descs_gv11b))) { + if (mmfault->client_type >= ARRAY_SIZE(fault_client_type_descs_gv11b)) { + nvgpu_do_assert(); mmfault->client_type_desc = invalid_str; } else { mmfault->client_type_desc = @@ -726,20 +726,22 @@ static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault) } mmfault->client_id_desc = invalid_str; - if (mmfault->client_type == - gmmu_fault_client_type_hub_v()) { - - if (!(WARN_ON(mmfault->client_id >= - ARRAY_SIZE(hub_client_descs_gv11b)))) { + if (mmfault->client_type == gmmu_fault_client_type_hub_v()) { + if (!(mmfault->client_id >= + ARRAY_SIZE(hub_client_descs_gv11b))) { mmfault->client_id_desc = hub_client_descs_gv11b[mmfault->client_id]; + } else { + nvgpu_do_assert(); } } else if (mmfault->client_type == gmmu_fault_client_type_gpc_v()) { - if (!(WARN_ON(mmfault->client_id >= - ARRAY_SIZE(gpc_client_descs_gv11b)))) { + if (!(mmfault->client_id >= + ARRAY_SIZE(gpc_client_descs_gv11b))) { mmfault->client_id_desc = gpc_client_descs_gv11b[mmfault->client_id]; + } else { + nvgpu_do_assert(); } } diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c index 2d6f7386c..d84f1f531 100644 --- a/drivers/gpu/nvgpu/common/fifo/channel.c +++ b/drivers/gpu/nvgpu/common/fifo/channel.c @@ -1947,8 +1947,8 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) /* * Warn if worker thread cannot run */ - if (WARN_ON(__nvgpu_channel_worker_start(g) != 0)) { - nvgpu_warn(g, "channel worker cannot run!"); + if (__nvgpu_channel_worker_start(g) != 0) { + nvgpu_do_assert_print(g, "channel worker cannot run!"); return; } diff --git a/drivers/gpu/nvgpu/common/mm/allocators/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/bitmap_allocator.c index 62f45e12d..660ce093c 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/bitmap_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/bitmap_allocator.c @@ -121,7 +121,8 @@ static void nvgpu_bitmap_free_fixed(struct nvgpu_allocator *na, u64 blks, offs; offs = base >> a->blk_shift; - if (WARN_ON(offs * a->blk_size != base)) { + if (offs * a->blk_size != base) { + nvgpu_do_assert(); return; } @@ -396,15 +397,12 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, bool is_base_aligned = (base & (blk_size - 1ULL)) == 0ULL; bool is_length_aligned = (length & (blk_size - 1ULL)) == 0ULL; - if (WARN_ON(!is_blk_size_pwr_2)) { + if (!is_blk_size_pwr_2) { + nvgpu_do_assert(); return -EINVAL; } - /* - * blk_size must be a power-of-2; base and length also need to be - * aligned to blk_size. - */ - if (!is_blk_size_pwr_2 || !is_base_aligned || !is_length_aligned) { + if (!is_base_aligned || !is_length_aligned) { return -EINVAL; } diff --git a/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c index 8d26949de..feb74c5df 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -375,7 +375,7 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a, slab_page->nr_objects, 0, 1, 0); if (offs >= slab_page->nr_objects) { - (void) WARN(1, "Empty/partial slab with no free objects?"); + WARN(true, "Empty/partial slab with no free objects?"); /* Add the buggy page to the full list... This isn't ideal. */ add_slab_page_to_full(slab, slab_page); @@ -774,7 +774,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages_fixed( alloc->sgt.ops = &page_alloc_sgl_ops; alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0); if (alloc->base == 0ULL) { - (void) WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", + WARN(true, "nvgpu: failed to fixed alloc pages @ 0x%010llx", base); goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index cf0fd6a70..f03a4a245 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -198,7 +198,8 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm) pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); err = nvgpu_pd_alloc(vm, &vm->pdb, pdb_size); - if (WARN_ON(err != 0)) { + if (err != 0) { + nvgpu_do_assert(); return err; } diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c b/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c index 838fc7d6e..7cadc947c 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -240,8 +240,8 @@ void nvgpu_pd_cache_fini(struct gk20a *g) } for (i = 0U; i < NVGPU_PD_CACHE_COUNT; i++) { - (void) WARN_ON(!nvgpu_list_empty(&cache->full[i])); - (void) WARN_ON(!nvgpu_list_empty(&cache->partial[i])); + nvgpu_assert(nvgpu_list_empty(&cache->full[i])); + nvgpu_assert(nvgpu_list_empty(&cache->partial[i])); } nvgpu_kfree(g, g->mm.pd_cache); @@ -465,7 +465,8 @@ int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) return 0; } - if (WARN_ON(g->mm.pd_cache == NULL)) { + if (g->mm.pd_cache == NULL) { + nvgpu_do_assert(); return -ENOMEM; } @@ -553,7 +554,7 @@ static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache, pentry = nvgpu_pd_cache_look_up(g, cache, pd); if (pentry == NULL) { - (void) WARN(true, "Attempting to free non-existent pd"); + nvgpu_do_assert_print(g, "Attempting to free non-existent pd"); return; } diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c index 9c7498666..2813f754b 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c @@ -54,7 +54,7 @@ u32 nvgpu_aperture_mask_raw(struct gk20a *g, enum nvgpu_aperture aperture, case APERTURE_VIDMEM: return vidmem_mask; case APERTURE_INVALID: - (void)WARN(true, "Bad aperture"); + nvgpu_do_assert_print(g, "Bad aperture"); } return 0; } @@ -105,7 +105,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) nvgpu_pramin_rd_n(g, mem, w * (u32)sizeof(u32), (u32)sizeof(u32), &data); } else { - (void)WARN(true, "Accessing unallocated nvgpu_mem"); + nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); } return data; @@ -139,7 +139,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, } else if (mem->aperture == APERTURE_VIDMEM) { nvgpu_pramin_rd_n(g, mem, offset, size, dest); } else { - (void)WARN(true, "Accessing unallocated nvgpu_mem"); + nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); } } @@ -157,7 +157,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) nvgpu_wmb(); } } else { - (void)WARN(true, "Accessing unallocated nvgpu_mem"); + nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); } } @@ -184,7 +184,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, nvgpu_wmb(); } } else { - (void)WARN(true, "Accessing unallocated nvgpu_mem"); + nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); } } @@ -210,7 +210,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, nvgpu_wmb(); } } else { - (void)WARN(true, "Accessing unallocated nvgpu_mem"); + nvgpu_do_assert_print(g, "Accessing unallocated nvgpu_mem"); } } diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index b6ec67381..7b22a9cc9 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -285,19 +285,23 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm, bool unified_va, const char *name) { - int err = 0; char alloc_name[32]; u64 kernel_vma_flags; u64 user_vma_start, user_vma_limit; u64 user_lp_vma_start, user_lp_vma_limit; u64 kernel_vma_start, kernel_vma_limit; struct gk20a *g = gk20a_from_mm(mm); + int err = 0; - if (WARN_ON(kernel_reserved + low_hole > aperture_size)) { + if (kernel_reserved + low_hole > aperture_size) { + nvgpu_do_assert_print(g, + "Overlap between user and kernel spaces"); return -ENOMEM; } - if (WARN_ON(vm->guest_managed && kernel_reserved != 0U)) { + if (vm->guest_managed && kernel_reserved != 0U) { + nvgpu_do_assert_print(g, + "Cannot use guest managed VM with kernel space"); return -EINVAL; } @@ -382,9 +386,11 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm, nvgpu_log_info(g, "kernel_vma [0x%llx,0x%llx)", kernel_vma_start, kernel_vma_limit); - if (WARN_ON(user_vma_start > user_vma_limit) || - WARN_ON(user_lp_vma_start > user_lp_vma_limit) || - WARN_ON(!vm->guest_managed && kernel_vma_start >= kernel_vma_limit)) { + if ((user_vma_start > user_vma_limit) || + (user_lp_vma_start > user_lp_vma_limit) || + (!vm->guest_managed && kernel_vma_start >= kernel_vma_limit)) { + nvgpu_err(g, "Invalid vm configuration"); + nvgpu_do_assert(); err = -EINVAL; goto clean_up_page_tables; } diff --git a/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c b/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c index f7dad2e2b..74988d73a 100644 --- a/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c +++ b/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c @@ -342,7 +342,8 @@ nvgpu_channel_sync_semaphore_create( int asid = -1; int err; - if (WARN_ON(c->vm == NULL)) { + if (c->vm == NULL) { + nvgpu_do_assert(); return NULL; } diff --git a/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_mm_gp10b.c index 751fa1e80..54defb479 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_mm_gp10b.c +++ b/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_mm_gp10b.c @@ -85,7 +85,13 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, /* FIXME: add support for sparse mappings */ - if (WARN_ON(!sgt) || WARN_ON(nvgpu_iommuable(g))) { + if (!sgt) { + nvgpu_do_assert_print(g, "NULL SGT"); + return 0; + } + + if (nvgpu_iommuable(g)) { + nvgpu_do_assert_print(g, "MM should not be IOMMU-able"); return 0; } diff --git a/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c index 67f79cd90..fe051359d 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c @@ -367,8 +367,9 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) config->pe_count_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); - if (WARN(config->pe_count_per_gpc > GK20A_GR_MAX_PES_PER_GPC, - "too many pes per gpc %u\n", config->pe_count_per_gpc)) { + if (config->pe_count_per_gpc > GK20A_GR_MAX_PES_PER_GPC) { + nvgpu_do_assert_print(g, "too many pes per gpc %u\n", + config->pe_count_per_gpc); goto cleanup; } if (config->pe_count_per_gpc > TEGRA_VGPU_MAX_PES_COUNT_PER_GPC) { diff --git a/drivers/gpu/nvgpu/common/vgpu/vgpu.c b/drivers/gpu/nvgpu/common/vgpu/vgpu.c index 76f1b87bb..a167d1dc7 100644 --- a/drivers/gpu/nvgpu/common/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/vgpu.c @@ -166,7 +166,9 @@ int vgpu_intr_thread(void *dev_id) if (err == -ETIME) { continue; } - if (WARN_ON(err)) { + if (err != 0) { + nvgpu_do_assert_print(g, + "Unexpected vgpu_ivc_recv err=%d", err); continue; } diff --git a/drivers/gpu/nvgpu/include/nvgpu/bug.h b/drivers/gpu/nvgpu/include/nvgpu/bug.h index 61fb40085..0fa2d76f2 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/bug.h +++ b/drivers/gpu/nvgpu/include/nvgpu/bug.h @@ -28,6 +28,8 @@ #include #endif +#include + /* * Define an assert macro that code within nvgpu can use. * @@ -42,7 +44,7 @@ * As a result this macro varies depending on platform. */ #if defined(__KERNEL__) -#define nvgpu_assert(cond) WARN_ON(!(cond)) +#define nvgpu_assert(cond) ((void) WARN_ON(!(cond))) #else /* * A static inline for POSIX/QNX/etc so that we can hide the branch in BUG_ON() @@ -60,4 +62,17 @@ static inline void nvgpu_assert(bool cond) } #endif +/* + * Define simple macros to force the consequences of a failed assert + * (presumably done in a previous if statement). + * The exact behavior will be OS dependent. See above. + */ +#define nvgpu_do_assert() nvgpu_assert(false) + +#define nvgpu_do_assert_print(g, fmt, ...) \ + do { \ + nvgpu_err(g, fmt, ##__VA_ARGS__); \ + nvgpu_do_assert(); \ + } while (false) + #endif /* NVGPU_BUG_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/posix/bug.h b/drivers/gpu/nvgpu/include/nvgpu/posix/bug.h index 511263b8c..272d13fe5 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/posix/bug.h +++ b/drivers/gpu/nvgpu/include/nvgpu/posix/bug.h @@ -34,8 +34,8 @@ } \ } while (false) -#define WARN(cond, msg, arg...) __warn(cond, msg, ##arg) -#define WARN_ON(cond) __warn(cond, "") +#define WARN(cond, msg, arg...) ((void) __warn(cond, msg, ##arg)) +#define WARN_ON(cond) ((void) __warn(cond, "")) #define WARN_ONCE(cond, msg, arg...) \ ({static bool warn_once_warned = false; \ diff --git a/drivers/gpu/nvgpu/os/linux/dmabuf.c b/drivers/gpu/nvgpu/os/linux/dmabuf.c index 10156b340..c7acd8873 100644 --- a/drivers/gpu/nvgpu/os/linux/dmabuf.c +++ b/drivers/gpu/nvgpu/os/linux/dmabuf.c @@ -1,5 +1,5 @@ /* -* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. +* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -23,6 +23,7 @@ #include #include +#include #include "gk20a/fence_gk20a.h" @@ -69,10 +70,10 @@ enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, if (buf_owner == NULL) { /* Not nvgpu-allocated, assume system memory */ return APERTURE_SYSMEM; - } else if (WARN_ON(buf_owner == g && unified_memory)) { + } else if ((buf_owner == g) && unified_memory) { /* Looks like our video memory, but this gpu doesn't support * it. Warn about a bug and bail out */ - nvgpu_warn(g, + nvgpu_do_assert_print(g, "dmabuf is our vidmem but we don't have local vidmem"); return APERTURE_INVALID; } else if (buf_owner != g) { @@ -90,7 +91,8 @@ struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf, struct gk20a_dmabuf_priv *priv; priv = dma_buf_get_drvdata(dmabuf, dev); - if (WARN_ON(!priv)) { + if (!priv) { + nvgpu_do_assert(); return ERR_PTR(-EINVAL); } @@ -129,10 +131,10 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf, return; nvgpu_mutex_acquire(&priv->lock); - WARN_ON(priv->sgt != sgt); - WARN_ON(priv->attach != attachment); + nvgpu_assert(priv->sgt == sgt); + nvgpu_assert(priv->attach == attachment); priv->pin_count--; - WARN_ON(priv->pin_count < 0); + nvgpu_assert(priv->pin_count >= 0); dma_addr = sg_dma_address(priv->sgt->sgl); if (priv->pin_count == 0) { dma_buf_unmap_attachment(priv->attach, priv->sgt, @@ -183,7 +185,8 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g, struct gk20a_buffer_state *s; struct device *dev = dev_from_gk20a(g); - if (WARN_ON(offset >= (u64)dmabuf->size)) { + if (offset >= (u64)dmabuf->size) { + nvgpu_do_assert(); return -EINVAL; } @@ -192,7 +195,8 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g, return err; priv = dma_buf_get_drvdata(dmabuf, dev); - if (WARN_ON(!priv)) { + if (!priv) { + nvgpu_do_assert(); return -ENOSYS; } diff --git a/drivers/gpu/nvgpu/os/linux/kmem.c b/drivers/gpu/nvgpu/os/linux/kmem.c index 7c3549f0f..ea455929e 100644 --- a/drivers/gpu/nvgpu/os/linux/kmem.c +++ b/drivers/gpu/nvgpu/os/linux/kmem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -295,8 +295,10 @@ static int __nvgpu_free_kmem_alloc(struct nvgpu_mem_alloc_tracker *tracker, nvgpu_lock_tracker(tracker); alloc = nvgpu_rem_alloc(tracker, addr); - if (WARN(!alloc, "Possible double-free detected: 0x%llx!", addr)) { + if (!alloc) { nvgpu_unlock_tracker(tracker); + nvgpu_do_assert_print(g, + "Possible double-free detected: 0x%llx!", addr); return -EINVAL; } diff --git a/drivers/gpu/nvgpu/os/linux/sync_sema_android.c b/drivers/gpu/nvgpu/os/linux/sync_sema_android.c index f4f2c0957..b77e70a95 100644 --- a/drivers/gpu/nvgpu/os/linux/sync_sema_android.c +++ b/drivers/gpu/nvgpu/os/linux/sync_sema_android.c @@ -1,7 +1,7 @@ /* * Semaphore Sync Framework Integration * - * Copyright (c) 2017-2018, NVIDIA Corporation. All rights reserved. + * Copyright (c) 2017-2019, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -119,8 +119,10 @@ static struct gk20a_sync_pt *to_gk20a_sync_pt(struct sync_pt *pt) } static struct gk20a_sync_timeline *to_gk20a_timeline(struct sync_timeline *obj) { - if (WARN_ON(obj->ops != &gk20a_sync_timeline_ops)) + if (obj->ops != &gk20a_sync_timeline_ops) { + nvgpu_do_assert(); return NULL; + } return (struct gk20a_sync_timeline *)obj; } @@ -241,12 +243,15 @@ static int gk20a_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) struct gk20a_sync_pt *pt_a = to_gk20a_sync_pt(a); struct gk20a_sync_pt *pt_b = to_gk20a_sync_pt(b); - if (WARN_ON(pt_a->obj != pt_b->obj)) + if (pt_a->obj != pt_b->obj) { + nvgpu_do_assert(); return 0; + } /* Early out */ - if (a == b) + if (a == b) { return 0; + } a_expired = gk20a_sync_pt_has_signaled(a); b_expired = gk20a_sync_pt_has_signaled(b);