gpu: nvgpu: fix MISRA 11.2 nvgpu_sgl

MISRA rule 11.2 doesn't allow conversions of a pointer from or to an
incomplete type. These type of conversions may result in a pointer
aligned incorrectly and may further result in undefined behavior.

This patch addresses rule 11.2 violations related to pointers to and
from struct nvgpu_sgl. This patch replaces struct nvgpu_sgl pointers by
void pointers.

Jira NVGPU-3736

Change-Id: I8fd5766eacace596f2761b308bce79f22f2cb207
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2267876
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2019-12-23 16:05:45 -08:00
committed by Alex Waterman
parent 6b62e0f79a
commit a615604411
20 changed files with 68 additions and 88 deletions

View File

@@ -160,11 +160,11 @@ static void nvgpu_page_release_co(struct nvgpu_allocator *a,
nvgpu_alloc_release_carveout(&va->source_allocator, co);
}
static struct nvgpu_sgl *nvgpu_page_alloc_sgl_next(void *sgl)
static void *nvgpu_page_alloc_sgl_next(void *sgl)
{
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
return (struct nvgpu_sgl *)sgl_impl->next;
return (void *)sgl_impl->next;
}
static u64 nvgpu_page_alloc_sgl_phys(struct gk20a *g, void *sgl)
@@ -175,7 +175,7 @@ static u64 nvgpu_page_alloc_sgl_phys(struct gk20a *g, void *sgl)
}
static u64 nvgpu_page_alloc_sgl_ipa_to_pa(struct gk20a *g,
struct nvgpu_sgl *sgl, u64 ipa, u64 *pa_len)
void *sgl, u64 ipa, u64 *pa_len)
{
return ipa;
}
@@ -244,7 +244,7 @@ static void nvgpu_page_alloc_free_pages(struct nvgpu_page_allocator *a,
struct nvgpu_page_alloc *alloc,
bool free_buddy_alloc)
{
struct nvgpu_sgl *sgl = alloc->sgt.sgl;
void *sgl = alloc->sgt.sgl;
struct gk20a *g = a->owner->g;
if (free_buddy_alloc) {
@@ -460,7 +460,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab(
goto fail;
}
alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
alloc->sgt.sgl = (void *)sgl;
err = do_slab_alloc(a, slab, alloc);
if (err != 0) {
goto fail;
@@ -626,7 +626,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
if (prev_sgl != NULL) {
prev_sgl->next = sgl;
} else {
alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
alloc->sgt.sgl = (void *)sgl;
}
prev_sgl = sgl;
@@ -660,7 +660,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages(
{
struct gk20a *g = a->owner->g;
struct nvgpu_page_alloc *alloc = NULL;
struct nvgpu_sgl *sgl;
void *sgl;
u64 pages;
u32 i = 0;
@@ -807,7 +807,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages_fixed(
alloc->nr_chunks = 1;
alloc->length = length;
alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
alloc->sgt.sgl = (void *)sgl;
sgl->phys = alloc->base;
sgl->dma = alloc->base;
@@ -834,7 +834,7 @@ static u64 nvgpu_page_palloc_fixed(struct nvgpu_allocator *na,
{
struct nvgpu_page_allocator *a = page_allocator(na);
struct nvgpu_page_alloc *alloc = NULL;
struct nvgpu_sgl *sgl;
void *sgl;
struct gk20a *g = a->owner->g;
u64 aligned_len, pages;
u32 i = 0;

View File

@@ -547,7 +547,7 @@ static int nvgpu_set_pd_level(struct vm_gk20a *vm,
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 17_2))
static int nvgpu_gmmu_do_update_page_table_sgl(struct vm_gk20a *vm,
struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl,
struct nvgpu_sgt *sgt, void *sgl,
u64 *space_to_skip_ptr,
u64 *virt_addr_ptr, u64 *length_ptr,
u64 phys_addr_val, u64 ipa_addr_val,
@@ -647,7 +647,7 @@ static int nvgpu_gmmu_do_update_page_table_no_iommu(struct vm_gk20a *vm,
struct nvgpu_gmmu_attrs *attrs)
{
struct gk20a *g = gk20a_from_vm(vm);
struct nvgpu_sgl *sgl;
void *sgl;
u64 space_to_skip = space_to_skip_val;
u64 virt_addr = virt_addr_val;
u64 length = length_val;

View File

@@ -311,11 +311,11 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
}
}
static struct nvgpu_sgl *nvgpu_mem_phys_sgl_next(void *sgl)
static void *nvgpu_mem_phys_sgl_next(void *sgl)
{
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
return (struct nvgpu_sgl *)(void *)sgl_impl->next;
return (void *)(void *)sgl_impl->next;
}
/*
@@ -337,7 +337,7 @@ static u64 nvgpu_mem_phys_sgl_phys(struct gk20a *g, void *sgl)
}
static u64 nvgpu_mem_phys_sgl_ipa_to_pa(struct gk20a *g,
struct nvgpu_sgl *sgl, u64 ipa, u64 *pa_len)
void *sgl, u64 ipa, u64 *pa_len)
{
return ipa;
}
@@ -409,7 +409,7 @@ int nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest,
sgl->next = NULL;
sgl->phys = src_phys;
sgl->length = dest->size;
sgt->sgl = (struct nvgpu_sgl *)(void *)sgl;
sgt->sgl = (void *)sgl;
sgt->ops = &nvgpu_mem_phys_ops;
return ret;

View File

@@ -27,43 +27,39 @@
#include <nvgpu/nvgpu_sgt_os.h>
#include <nvgpu/log.h>
struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl)
void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl)
{
return sgt->ops->sgl_next(sgl);
}
u64 nvgpu_sgt_get_phys(struct gk20a *g, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl)
u64 nvgpu_sgt_get_phys(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl)
{
return sgt->ops->sgl_phys(g, sgl);
}
u64 nvgpu_sgt_get_ipa(struct gk20a *g, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl)
u64 nvgpu_sgt_get_ipa(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl)
{
return sgt->ops->sgl_ipa(g, sgl);
}
u64 nvgpu_sgt_ipa_to_pa(struct gk20a *g, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl, u64 ipa, u64 *pa_len)
void *sgl, u64 ipa, u64 *pa_len)
{
return sgt->ops->sgl_ipa_to_pa(g, sgl, ipa, pa_len);
}
u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, void *sgl)
{
return sgt->ops->sgl_dma(sgl);
}
u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, void *sgl)
{
return sgt->ops->sgl_length(sgl);
}
u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl,
struct nvgpu_gmmu_attrs *attrs)
u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl,
struct nvgpu_gmmu_attrs *attrs)
{
return sgt->ops->sgl_gpu_addr(g, sgl, attrs);
}
@@ -93,7 +89,7 @@ void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt)
u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
{
u64 align = 0, chunk_align = 0;
struct nvgpu_sgl *sgl;
void *sgl;
/*
* If this SGT is iommuable and we want to use the IOMMU address then

View File

@@ -453,7 +453,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
struct nvgpu_fence_type *fence_out = NULL;
struct nvgpu_fence_type *last_fence = NULL;
struct nvgpu_page_alloc *alloc = NULL;
struct nvgpu_sgl *sgl = NULL;
void *sgl = NULL;
int err = 0;
if (g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID) {

View File

@@ -45,7 +45,7 @@ static void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
{
struct nvgpu_page_alloc *alloc = NULL;
struct nvgpu_sgt *sgt;
struct nvgpu_sgl *sgl;
void *sgl;
u64 byteoff, start_reg, until_end, n;
/*

View File

@@ -272,7 +272,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
void *handle = NULL;
size_t oob_size;
u8 prot;
struct nvgpu_sgl *sgl;
void *sgl;
nvgpu_log_fn(g, " ");

View File

@@ -35,7 +35,7 @@
#ifdef CONFIG_NVGPU_DGPU
u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem,
struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, u32 w)
struct nvgpu_sgt *sgt, void *sgl, u32 w)
{
u64 bufbase = nvgpu_sgt_get_phys(g, sgt, sgl);
u64 addr = bufbase + w * sizeof(u32);

View File

@@ -27,15 +27,12 @@
struct gk20a;
struct nvgpu_mem;
struct nvgpu_sgt;
struct nvgpu_sgl;
void gk20a_bus_isr(struct gk20a *g);
int gk20a_bus_init_hw(struct gk20a *g);
#ifdef CONFIG_NVGPU_DGPU
u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem,
struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl,
u32 w);
struct nvgpu_sgt *sgt, void *sgl, u32 w);
#endif
#endif /* BUS_GK20A_H */

View File

@@ -97,7 +97,6 @@ struct boardobjgrp;
struct boardobjgrp_pmu_cmd;
struct boardobjgrpmask;
struct nvgpu_sgt;
struct nvgpu_sgl;
struct nvgpu_channel_hw_state;
struct nvgpu_mem;
struct gk20a_cs_snapshot_client;

View File

@@ -122,7 +122,7 @@ struct gops_bus {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_DGPU
u32 (*set_bar0_window)(struct gk20a *g, struct nvgpu_mem *mem,
struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, u32 w);
struct nvgpu_sgt *sgt, void *sgl, u32 w);
#endif
u32 (*read_sw_scratch)(struct gk20a *g, u32 index);
void (*write_sw_scratch)(struct gk20a *g, u32 index, u32 val);

View File

@@ -37,13 +37,6 @@ struct nvgpu_gmmu_attrs;
struct nvgpu_sgt;
/**
* Forward declared opaque placeholder type that does not really exist, but
* helps the compiler help us about getting types right. In reality,
* implementors of nvgpu_sgt_ops will have some concrete type in place of this.
*/
struct nvgpu_sgl;
/**
* This structure holds the necessary operations required for
* interacting with the underlying scatter gather list entries.
@@ -53,7 +46,7 @@ struct nvgpu_sgt_ops {
* Used to get the next scatter gather list entry in the
* scatter gather list entries.
*/
struct nvgpu_sgl *(*sgl_next)(void *sgl);
void *(*sgl_next)(void *sgl);
/**
* Used to get the physical address associated with the
* scatter gather list entry.
@@ -68,8 +61,8 @@ struct nvgpu_sgt_ops {
* Used to get the physical address from the intermediate
* physical address.
*/
u64 (*sgl_ipa_to_pa)(struct gk20a *g, struct nvgpu_sgl *sgl,
u64 ipa, u64 *pa_len);
u64 (*sgl_ipa_to_pa)(struct gk20a *g, void *sgl,
u64 ipa, u64 *pa_len);
/**
* Used to get the iommuable virtual address associated with the
* scatter gather list entry.
@@ -112,7 +105,7 @@ struct nvgpu_sgt {
/**
* The first node in the scatter gather list.
*/
struct nvgpu_sgl *sgl;
void *sgl;
};
/**
@@ -194,8 +187,7 @@ struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g,
* @return Pointer to a scatter gather list.
* @return NULL if there is no next scatter gather list is found.
*/
struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl);
void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl);
/**
* @brief Get the intermediate physical address from given scatter
@@ -216,8 +208,7 @@ struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt,
* @return Intermediate physical address associated with the
* given scatter gather list.
*/
u64 nvgpu_sgt_get_ipa(struct gk20a *g, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl);
u64 nvgpu_sgt_get_ipa(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl);
/**
* @brief Get the physical address from the intermediate physical address
@@ -239,7 +230,7 @@ u64 nvgpu_sgt_get_ipa(struct gk20a *g, struct nvgpu_sgt *sgt,
* address.
*/
u64 nvgpu_sgt_ipa_to_pa(struct gk20a *g, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl, u64 ipa, u64 *pa_len);
void *sgl, u64 ipa, u64 *pa_len);
/**
* @brief Get the physical address associated with the scatter gather list.
@@ -258,8 +249,7 @@ u64 nvgpu_sgt_ipa_to_pa(struct gk20a *g, struct nvgpu_sgt *sgt,
*
* @return Physical address associated with the input sgl.
*/
u64 nvgpu_sgt_get_phys(struct gk20a *g, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl);
u64 nvgpu_sgt_get_phys(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl);
/**
* @brief Get the io virtual address associated with the scatter
@@ -275,7 +265,7 @@ u64 nvgpu_sgt_get_phys(struct gk20a *g, struct nvgpu_sgt *sgt,
*
* @return Intermediate physical address.
*/
u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, void *sgl);
/**
* @brief Get the length associated with given scatter gather list.
@@ -290,7 +280,7 @@ u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
*
* @return Length associated with the input sgl.
*/
u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, void *sgl);
/**
* @brief Get the physical address/intermediate physical address
@@ -313,8 +303,7 @@ u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
* @return Address associated with the given sgl.
*/
u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt,
struct nvgpu_sgl *sgl,
struct nvgpu_gmmu_attrs *attrs);
void*sgl, struct nvgpu_gmmu_attrs *attrs);
/**
* @brief Free the scatter gather table object.
@@ -369,9 +358,9 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(__NVGPU_POSIX__)
struct nvgpu_sgl *nvgpu_mem_sgl_next(void *sgl);
void *nvgpu_mem_sgl_next(void *sgl);
u64 nvgpu_mem_sgl_phys(struct gk20a *g, void *sgl);
u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, struct nvgpu_sgl *sgl, u64 ipa,
u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, void *sgl, u64 ipa,
u64 *pa_len);
u64 nvgpu_mem_sgl_dma(void *sgl);
u64 nvgpu_mem_sgl_length(void *sgl);

View File

@@ -37,12 +37,12 @@
#include "platform_gk20a.h"
static u64 __nvgpu_sgl_ipa(struct gk20a *g, struct nvgpu_sgl *sgl)
static u64 __nvgpu_sgl_ipa(struct gk20a *g, void *sgl)
{
return sg_phys((struct scatterlist *)sgl);
}
static u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
static u64 __nvgpu_sgl_phys(struct gk20a *g, void *sgl)
{
struct device *dev = dev_from_gk20a(g);
struct gk20a_platform *platform = gk20a_get_platform(dev);
@@ -64,11 +64,11 @@ u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl)
if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ||
!nvgpu_iommuable(g))
return g->ops.mm.gmmu.gpu_phys_addr(g, NULL,
__nvgpu_sgl_phys(g, (struct nvgpu_sgl *)sgl));
__nvgpu_sgl_phys(g, (void *)sgl));
if (sg_dma_address(sgl) == 0)
return g->ops.mm.gmmu.gpu_phys_addr(g, NULL,
__nvgpu_sgl_phys(g, (struct nvgpu_sgl *)sgl));
__nvgpu_sgl_phys(g, (void *)sgl));
if (sg_dma_address(sgl) == DMA_ERROR_CODE)
return 0;
@@ -134,7 +134,7 @@ u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
return nvgpu_mem_get_addr(g, mem);
#endif
return __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)mem->priv.sgt->sgl);
return __nvgpu_sgl_phys(g, (void *)mem->priv.sgt->sgl);
}
/*
@@ -189,9 +189,9 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
return ret;
}
static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(void *sgl)
static void *nvgpu_mem_linux_sgl_next(void *sgl)
{
return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl);
return (void *)sg_next((struct scatterlist *)sgl);
}
static u64 nvgpu_mem_linux_sgl_ipa(struct gk20a *g, void *sgl)
@@ -200,7 +200,7 @@ static u64 nvgpu_mem_linux_sgl_ipa(struct gk20a *g, void *sgl)
}
static u64 nvgpu_mem_linux_sgl_ipa_to_pa(struct gk20a *g,
struct nvgpu_sgl *sgl, u64 ipa, u64 *pa_len)
void *sgl, u64 ipa, u64 *pa_len)
{
struct device *dev = dev_from_gk20a(g);
struct gk20a_platform *platform = gk20a_get_platform(dev);
@@ -297,7 +297,7 @@ struct nvgpu_sgt *nvgpu_linux_sgt_create(struct gk20a *g, struct sg_table *sgt)
nvgpu_log(g, gpu_dbg_sgl, "Making Linux SGL!");
nvgpu_sgt->sgl = (struct nvgpu_sgl *)linux_sgl;
nvgpu_sgt->sgl = (void *)linux_sgl;
nvgpu_sgt->ops = &nvgpu_linux_sgt_ops;
return nvgpu_sgt;

View File

@@ -198,7 +198,7 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
mem->priv.sgt->ops = &nvgpu_sgt_posix_ops;
/* Allocate memory for sgl */
mem->priv.sgt->sgl = (struct nvgpu_sgl *)
mem->priv.sgt->sgl = (struct nvgpu_mem_sgl *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_mem_sgl));
if (mem->priv.sgt->sgl == NULL) {
nvgpu_err(g, "sgl allocation failed\n");

View File

@@ -45,11 +45,11 @@ u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
return (u64)(uintptr_t)mem->cpu_va;
}
struct nvgpu_sgl *nvgpu_mem_sgl_next(void *sgl)
void *nvgpu_mem_sgl_next(void *sgl)
{
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
return (struct nvgpu_sgl *) mem->next;
return (void *) mem->next;
}
u64 nvgpu_mem_sgl_phys(struct gk20a *g, void *sgl)
@@ -59,8 +59,7 @@ u64 nvgpu_mem_sgl_phys(struct gk20a *g, void *sgl)
return (u64)(uintptr_t)mem->phys;
}
u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, struct nvgpu_sgl *sgl,
u64 ipa, u64 *pa_len)
u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, void *sgl, u64 ipa, u64 *pa_len)
{
return nvgpu_mem_sgl_phys(g, sgl);
}
@@ -188,7 +187,7 @@ struct nvgpu_sgt *nvgpu_mem_sgt_posix_create_from_list(struct gk20a *g,
nvgpu_kfree(g, sgt);
return NULL;
}
sgt->sgl = (struct nvgpu_sgl *)sgl;
sgt->sgl = (void *)sgl;
sgt->ops = &nvgpu_sgt_posix_ops;
return sgt;
@@ -243,7 +242,7 @@ struct nvgpu_sgt *nvgpu_sgt_os_create_from_mem(struct gk20a *g,
sgl->length = mem->size;
sgl->phys = (u64) mem->cpu_va;
sgt->sgl = (struct nvgpu_sgl *) sgl;
sgt->sgl = (void *) sgl;
return sgt;
}

View File

@@ -390,7 +390,7 @@ static int test_page_allocator_sgt_ops(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 addr;
struct nvgpu_sgl *sgl = NULL;
void *sgl = NULL;
struct nvgpu_page_alloc *alloc = NULL;
addr = na->ops->alloc(na, SZ_32K);

View File

@@ -265,7 +265,7 @@ static struct test_parameters test_sgt_iommu_sysmem = {
* This is used to test a corner case in __nvgpu_gmmu_do_update_page_table()
*/
static u64 nvgpu_mem_sgl_ipa_to_pa_by_half(struct gk20a *g,
struct nvgpu_sgl *sgl, u64 ipa, u64 *pa_len)
void *sgl, u64 ipa, u64 *pa_len)
{
*pa_len = test_iommu_sysmem_sgl_skip.offset_pages * SZ_4K / 2;

View File

@@ -537,9 +537,9 @@ int test_nvgpu_mem_phys_ops(struct unit_module *m,
u64 ret;
struct nvgpu_gmmu_attrs *attrs = NULL;
struct nvgpu_sgt *test_sgt = test_mem->phys_sgt;
struct nvgpu_sgl *test_sgl = test_sgt->sgl;
void *test_sgl = test_sgt->sgl;
struct nvgpu_sgl *temp_sgl = test_sgt->ops->sgl_next(test_sgl);
void *temp_sgl = test_sgt->ops->sgl_next(test_sgl);
if (temp_sgl != NULL) {
unit_return_fail(m,

View File

@@ -44,7 +44,7 @@ static u64 ops_sgl_ipa(struct gk20a *g, void *sgl)
return EXPECTED_U64;
}
static u64 ops_sgl_ipa_to_pa(struct gk20a *g, struct nvgpu_sgl *sgl,
static u64 ops_sgl_ipa_to_pa(struct gk20a *g, void *sgl,
u64 ipa, u64 *pa_len)
{
return EXPECTED_U64;
@@ -157,7 +157,7 @@ int test_nvgpu_sgt_get_next(struct unit_module *m, struct gk20a *g,
{
int ret = UNIT_SUCCESS;
struct nvgpu_sgt *sgt;
struct nvgpu_sgl *api_ptr;
void *api_ptr;
struct nvgpu_mem_sgl *sgl_ptr;
int i;
#define SGL_LEN 100

View File

@@ -240,7 +240,7 @@ static int create_alloc_and_sgt(struct unit_module *m, struct gk20a *g,
return -ENOMEM;
}
mem->vidmem_alloc->sgt.ops = sgt->ops;
mem->vidmem_alloc->sgt.sgl = (struct nvgpu_sgl *) mem;
mem->vidmem_alloc->sgt.sgl = (void *) mem;
free(sgt);
/* All PRAMIN accessed must have a VIDMEM aperture */
@@ -306,7 +306,7 @@ static int test_pramin_rd_n_single(struct unit_module *m, struct gk20a *g,
goto free_vidmem;
}
mem.vidmem_alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
mem.vidmem_alloc->sgt.sgl = (void *)sgl;
nvgpu_pramin_rd_n(g, &mem, 0, byte_cnt, (void *) dest);
@@ -394,7 +394,7 @@ static int test_pramin_wr_n_multi(struct unit_module *m, struct gk20a *g,
sgl2->next = sgl3;
sgl3->next = NULL;
mem.vidmem_alloc->sgt.sgl = (struct nvgpu_sgl *) sgl1;
mem.vidmem_alloc->sgt.sgl = (void *) sgl1;
nvgpu_pramin_wr_n(g, &mem, byte_offset, byte_cnt, (void *) src);
@@ -449,7 +449,7 @@ static int test_pramin_memset(struct unit_module *m, struct gk20a *g,
goto free_vidmem;
}
mem.vidmem_alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
mem.vidmem_alloc->sgt.sgl = (void *)sgl;
nvgpu_pramin_memset(g, &mem, 0, byte_cnt, MEMSET_PATTERN);