mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: select map access type from dmabuf permission and user request
Add api to translate dmabuf's fmode_t to gk20a_mem_rw_flag for read only/read write mapping selection. By default dmabuf fd mapping permission should be a maximum access permission associated to a particual dmabuf fd. Remove bit flag MAP_ACCESS_NO_WRITE and add 2 bit values for user access requests NVGPU_VM_MAP_ACCESS_DEFAULT|READ_ONLY| READ_WRITE. To unify map access type handling in Linux and QNX move the parameter NVGPU_VM_MAP_ACCESS_* check to common function nvgpu_vm_map. Set MAP_ACCESS_TYPE enabled flag in common characteristics init function as it is supported for Linux and QNX. Bug 200717195 Bug 3250920 Change-Id: I1a249f7c52bda099390dd4f371b005e1a7cef62f Signed-off-by: Lakshmanan M <lm@nvidia.com> Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2507150 Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
261bdb9cc2
commit
19186c8a02
@@ -912,6 +912,7 @@ int nvgpu_init_gpu_characteristics(struct gk20a *g)
|
|||||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true);
|
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true);
|
||||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true);
|
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true);
|
||||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
|
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
|
||||||
|
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_ACCESS_TYPE, true);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fast submits are supported as long as the user doesn't request
|
* Fast submits are supported as long as the user doesn't request
|
||||||
|
|||||||
@@ -1462,7 +1462,8 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
|
|||||||
u64 map_addr,
|
u64 map_addr,
|
||||||
u64 map_size,
|
u64 map_size,
|
||||||
u64 phys_offset,
|
u64 phys_offset,
|
||||||
enum gk20a_mem_rw_flag rw,
|
enum gk20a_mem_rw_flag buffer_rw_mode,
|
||||||
|
u32 map_access_requested,
|
||||||
u32 flags,
|
u32 flags,
|
||||||
s16 compr_kind,
|
s16 compr_kind,
|
||||||
s16 incompr_kind,
|
s16 incompr_kind,
|
||||||
@@ -1473,6 +1474,7 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
|
|||||||
struct gk20a *g = gk20a_from_vm(vm);
|
struct gk20a *g = gk20a_from_vm(vm);
|
||||||
struct nvgpu_mapped_buf *mapped_buffer = NULL;
|
struct nvgpu_mapped_buf *mapped_buffer = NULL;
|
||||||
struct nvgpu_ctag_buffer_info binfo = { 0 };
|
struct nvgpu_ctag_buffer_info binfo = { 0 };
|
||||||
|
enum gk20a_mem_rw_flag rw = buffer_rw_mode;
|
||||||
struct nvgpu_vm_area *vm_area = NULL;
|
struct nvgpu_vm_area *vm_area = NULL;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
bool va_allocated = true;
|
bool va_allocated = true;
|
||||||
@@ -1484,6 +1486,16 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
|
|||||||
*/
|
*/
|
||||||
s16 map_key_kind;
|
s16 map_key_kind;
|
||||||
|
|
||||||
|
if ((map_access_requested == NVGPU_VM_MAP_ACCESS_READ_WRITE) &&
|
||||||
|
(buffer_rw_mode == gk20a_mem_flag_read_only)) {
|
||||||
|
nvgpu_err(g, "RW mapping requested for RO buffer");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (map_access_requested == NVGPU_VM_MAP_ACCESS_READ_ONLY) {
|
||||||
|
rw = gk20a_mem_flag_read_only;
|
||||||
|
}
|
||||||
|
|
||||||
*mapped_buffer_arg = NULL;
|
*mapped_buffer_arg = NULL;
|
||||||
|
|
||||||
err = nvgpu_vm_map_check_attributes(vm, os_buf, &binfo, flags,
|
err = nvgpu_vm_map_check_attributes(vm, os_buf, &binfo, flags,
|
||||||
|
|||||||
@@ -56,6 +56,7 @@ struct nvgpu_mapped_buf_priv {
|
|||||||
int nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
int nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
||||||
struct dma_buf *dmabuf,
|
struct dma_buf *dmabuf,
|
||||||
u64 map_addr,
|
u64 map_addr,
|
||||||
|
u32 map_access_requested,
|
||||||
u32 flags,
|
u32 flags,
|
||||||
u32 page_size,
|
u32 page_size,
|
||||||
s16 compr_kind,
|
s16 compr_kind,
|
||||||
|
|||||||
@@ -352,7 +352,10 @@ struct vm_gk20a {
|
|||||||
#define NVGPU_VM_MAP_DIRECT_KIND_CTRL BIT32(4)
|
#define NVGPU_VM_MAP_DIRECT_KIND_CTRL BIT32(4)
|
||||||
#define NVGPU_VM_MAP_L3_ALLOC BIT32(5)
|
#define NVGPU_VM_MAP_L3_ALLOC BIT32(5)
|
||||||
#define NVGPU_VM_MAP_PLATFORM_ATOMIC BIT32(6)
|
#define NVGPU_VM_MAP_PLATFORM_ATOMIC BIT32(6)
|
||||||
#define NVGPU_VM_MAP_ACCESS_NO_WRITE BIT32(7)
|
|
||||||
|
#define NVGPU_VM_MAP_ACCESS_DEFAULT 0U
|
||||||
|
#define NVGPU_VM_MAP_ACCESS_READ_ONLY 1U
|
||||||
|
#define NVGPU_VM_MAP_ACCESS_READ_WRITE 2U
|
||||||
|
|
||||||
#define NVGPU_KIND_INVALID S16(-1)
|
#define NVGPU_KIND_INVALID S16(-1)
|
||||||
|
|
||||||
@@ -604,6 +607,7 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
|
|||||||
u64 map_size,
|
u64 map_size,
|
||||||
u64 phys_offset,
|
u64 phys_offset,
|
||||||
enum gk20a_mem_rw_flag rw,
|
enum gk20a_mem_rw_flag rw,
|
||||||
|
u32 map_access_requested,
|
||||||
u32 flags,
|
u32 flags,
|
||||||
s16 compr_kind,
|
s16 compr_kind,
|
||||||
s16 incompr_kind,
|
s16 incompr_kind,
|
||||||
|
|||||||
@@ -1107,6 +1107,7 @@ __releases(&l->cde_app->mutex)
|
|||||||
/* map the destination buffer */
|
/* map the destination buffer */
|
||||||
get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
|
get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
|
||||||
err = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
|
err = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
|
||||||
|
NVGPU_VM_MAP_ACCESS_DEFAULT,
|
||||||
NVGPU_VM_MAP_CACHEABLE |
|
NVGPU_VM_MAP_CACHEABLE |
|
||||||
NVGPU_VM_MAP_DIRECT_KIND_CTRL,
|
NVGPU_VM_MAP_DIRECT_KIND_CTRL,
|
||||||
gk20a_cde_mapping_page_size(cde_ctx->vm,
|
gk20a_cde_mapping_page_size(cde_ctx->vm,
|
||||||
|
|||||||
@@ -316,7 +316,6 @@ void gk20a_init_linux_characteristics(struct gk20a *g)
|
|||||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_PARTIAL_MAPPINGS, true);
|
nvgpu_set_enabled(g, NVGPU_SUPPORT_PARTIAL_MAPPINGS, true);
|
||||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true);
|
nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true);
|
||||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true);
|
nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true);
|
||||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_ACCESS_TYPE, true);
|
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_NVGPU_SYNCFD_NONE)) {
|
if (!IS_ENABLED(CONFIG_NVGPU_SYNCFD_NONE)) {
|
||||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true);
|
nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true);
|
||||||
|
|||||||
@@ -43,6 +43,9 @@
|
|||||||
static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
|
static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
|
||||||
{
|
{
|
||||||
u32 core_flags = 0;
|
u32 core_flags = 0;
|
||||||
|
u32 map_access_bitmask =
|
||||||
|
(BIT32(NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_BITMASK_SIZE) - 1U) <<
|
||||||
|
NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_BITMASK_OFFSET;
|
||||||
|
|
||||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)
|
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)
|
||||||
core_flags |= NVGPU_VM_MAP_FIXED_OFFSET;
|
core_flags |= NVGPU_VM_MAP_FIXED_OFFSET;
|
||||||
@@ -58,8 +61,9 @@ static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
|
|||||||
core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL;
|
core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL;
|
||||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC)
|
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC)
|
||||||
core_flags |= NVGPU_VM_MAP_PLATFORM_ATOMIC;
|
core_flags |= NVGPU_VM_MAP_PLATFORM_ATOMIC;
|
||||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_NO_WRITE)
|
|
||||||
core_flags |= NVGPU_VM_MAP_ACCESS_NO_WRITE;
|
/* copy the map access bitfield from flags */
|
||||||
|
core_flags |= (flags & map_access_bitmask);
|
||||||
|
|
||||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS)
|
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS)
|
||||||
nvgpu_warn(g, "Ignoring deprecated flag: "
|
nvgpu_warn(g, "Ignoring deprecated flag: "
|
||||||
@@ -68,6 +72,20 @@ static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
|
|||||||
return core_flags;
|
return core_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvgpu_vm_translate_map_access(struct gk20a *g, u32 flags,
|
||||||
|
u32 *map_access)
|
||||||
|
{
|
||||||
|
*map_access = (flags >> NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_BITMASK_OFFSET) &
|
||||||
|
(BIT32(NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_BITMASK_SIZE) - 1U);
|
||||||
|
|
||||||
|
if (*map_access > NVGPU_AS_MAP_BUFFER_ACCESS_READ_WRITE) {
|
||||||
|
nvgpu_err(g, "Invalid map access specified %u", *map_access);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf_reverse(
|
static struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf_reverse(
|
||||||
struct vm_gk20a *vm, struct dma_buf *dmabuf, s16 kind)
|
struct vm_gk20a *vm, struct dma_buf *dmabuf, s16 kind)
|
||||||
{
|
{
|
||||||
@@ -183,9 +201,40 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
|
|||||||
return mapped_buffer;
|
return mapped_buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvgpu_convert_fmode_to_gmmu_rw_attr(struct gk20a *g, fmode_t mode,
|
||||||
|
enum gk20a_mem_rw_flag *rw_attr)
|
||||||
|
{
|
||||||
|
fmode_t fmode_rw_flag = mode & (FMODE_READ | FMODE_PREAD |
|
||||||
|
FMODE_WRITE | FMODE_PWRITE);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!fmode_rw_flag) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (fmode_rw_flag) {
|
||||||
|
case FMODE_READ:
|
||||||
|
case FMODE_PREAD:
|
||||||
|
case (FMODE_READ | FMODE_PREAD):
|
||||||
|
*rw_attr = gk20a_mem_flag_read_only;
|
||||||
|
break;
|
||||||
|
case FMODE_WRITE:
|
||||||
|
case FMODE_PWRITE:
|
||||||
|
case (FMODE_WRITE | FMODE_PWRITE):
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
*rw_attr = gk20a_mem_flag_none;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
int nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
||||||
struct dma_buf *dmabuf,
|
struct dma_buf *dmabuf,
|
||||||
u64 map_addr,
|
u64 map_addr,
|
||||||
|
u32 map_access_requested,
|
||||||
u32 flags,
|
u32 flags,
|
||||||
u32 page_size,
|
u32 page_size,
|
||||||
s16 compr_kind,
|
s16 compr_kind,
|
||||||
@@ -195,7 +244,7 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
|||||||
struct vm_gk20a_mapping_batch *batch,
|
struct vm_gk20a_mapping_batch *batch,
|
||||||
u64 *gpu_va)
|
u64 *gpu_va)
|
||||||
{
|
{
|
||||||
enum gk20a_mem_rw_flag rw_flag = gk20a_mem_flag_none;
|
enum gk20a_mem_rw_flag buffer_rw_mode = gk20a_mem_flag_none;
|
||||||
struct gk20a *g = gk20a_from_vm(vm);
|
struct gk20a *g = gk20a_from_vm(vm);
|
||||||
struct device *dev = dev_from_gk20a(g);
|
struct device *dev = dev_from_gk20a(g);
|
||||||
struct nvgpu_os_buffer os_buf;
|
struct nvgpu_os_buffer os_buf;
|
||||||
@@ -208,10 +257,12 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
|||||||
nvgpu_log(g, gpu_dbg_map, "dmabuf file mode: 0x%x mapping flags: 0x%x",
|
nvgpu_log(g, gpu_dbg_map, "dmabuf file mode: 0x%x mapping flags: 0x%x",
|
||||||
dmabuf->file->f_mode, flags);
|
dmabuf->file->f_mode, flags);
|
||||||
|
|
||||||
if (!(dmabuf->file->f_mode & (FMODE_WRITE | FMODE_PWRITE)) &&
|
err = nvgpu_convert_fmode_to_gmmu_rw_attr(g, dmabuf->file->f_mode,
|
||||||
!(flags & NVGPU_VM_MAP_ACCESS_NO_WRITE)) {
|
&buffer_rw_mode);
|
||||||
nvgpu_err(g, "RW access requested for RO mapped buffer");
|
if (err != 0) {
|
||||||
return -EINVAL;
|
nvgpu_err(g, "dmabuf file mode 0x%x not supported for GMMU map",
|
||||||
|
dmabuf->file->f_mode);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
sgt = nvgpu_mm_pin(dev, dmabuf, &attachment);
|
sgt = nvgpu_mm_pin(dev, dmabuf, &attachment);
|
||||||
@@ -234,17 +285,14 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
|||||||
goto clean_up;
|
goto clean_up;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & NVGPU_VM_MAP_ACCESS_NO_WRITE) {
|
|
||||||
rw_flag = gk20a_mem_flag_read_only;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = nvgpu_vm_map(vm,
|
err = nvgpu_vm_map(vm,
|
||||||
&os_buf,
|
&os_buf,
|
||||||
nvgpu_sgt,
|
nvgpu_sgt,
|
||||||
map_addr,
|
map_addr,
|
||||||
mapping_size,
|
mapping_size,
|
||||||
buffer_offset,
|
buffer_offset,
|
||||||
rw_flag,
|
buffer_rw_mode,
|
||||||
|
map_access_requested,
|
||||||
flags,
|
flags,
|
||||||
compr_kind,
|
compr_kind,
|
||||||
incompr_kind,
|
incompr_kind,
|
||||||
@@ -284,6 +332,7 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
|||||||
{
|
{
|
||||||
struct gk20a *g = gk20a_from_vm(vm);
|
struct gk20a *g = gk20a_from_vm(vm);
|
||||||
struct dma_buf *dmabuf;
|
struct dma_buf *dmabuf;
|
||||||
|
u32 map_access;
|
||||||
u64 ret_va;
|
u64 ret_va;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
@@ -333,7 +382,14 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nvgpu_vm_map_linux(vm, dmabuf, *map_addr,
|
err = nvgpu_vm_translate_map_access(g, flags, &map_access);
|
||||||
|
if (err != 0) {
|
||||||
|
nvgpu_err(g, "map access translation failed");
|
||||||
|
dma_buf_put(dmabuf);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nvgpu_vm_map_linux(vm, dmabuf, *map_addr, map_access,
|
||||||
nvgpu_vm_translate_linux_flags(g, flags),
|
nvgpu_vm_translate_linux_flags(g, flags),
|
||||||
page_size,
|
page_size,
|
||||||
compr_kind, incompr_kind,
|
compr_kind, incompr_kind,
|
||||||
|
|||||||
@@ -110,7 +110,13 @@ struct nvgpu_as_bind_channel_args {
|
|||||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC (1 << 7)
|
#define NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC (1 << 7)
|
||||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8)
|
#define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8)
|
||||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC (1 << 9)
|
#define NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC (1 << 9)
|
||||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_NO_WRITE (1 << 10)
|
|
||||||
|
#define NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_BITMASK_OFFSET 10U
|
||||||
|
#define NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_BITMASK_SIZE 2U
|
||||||
|
|
||||||
|
#define NVGPU_AS_MAP_BUFFER_ACCESS_DEFAULT 0U
|
||||||
|
#define NVGPU_AS_MAP_BUFFER_ACCESS_READ_ONLY 1U
|
||||||
|
#define NVGPU_AS_MAP_BUFFER_ACCESS_READ_WRITE 2U
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VM map buffer IOCTL
|
* VM map buffer IOCTL
|
||||||
|
|||||||
@@ -355,6 +355,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
|||||||
buf_size,
|
buf_size,
|
||||||
0,
|
0,
|
||||||
gk20a_mem_flag_none,
|
gk20a_mem_flag_none,
|
||||||
|
NVGPU_VM_MAP_ACCESS_READ_WRITE,
|
||||||
NVGPU_VM_MAP_CACHEABLE,
|
NVGPU_VM_MAP_CACHEABLE,
|
||||||
NV_KIND_INVALID,
|
NV_KIND_INVALID,
|
||||||
0,
|
0,
|
||||||
@@ -377,6 +378,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
|||||||
buf_size,
|
buf_size,
|
||||||
0,
|
0,
|
||||||
gk20a_mem_flag_none,
|
gk20a_mem_flag_none,
|
||||||
|
NVGPU_VM_MAP_ACCESS_READ_WRITE,
|
||||||
NVGPU_VM_MAP_CACHEABLE,
|
NVGPU_VM_MAP_CACHEABLE,
|
||||||
NV_KIND_INVALID,
|
NV_KIND_INVALID,
|
||||||
0,
|
0,
|
||||||
@@ -399,6 +401,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
|||||||
buf_size,
|
buf_size,
|
||||||
0,
|
0,
|
||||||
gk20a_mem_flag_none,
|
gk20a_mem_flag_none,
|
||||||
|
NVGPU_VM_MAP_ACCESS_READ_WRITE,
|
||||||
NVGPU_VM_MAP_CACHEABLE,
|
NVGPU_VM_MAP_CACHEABLE,
|
||||||
NV_KIND_INVALID,
|
NV_KIND_INVALID,
|
||||||
0,
|
0,
|
||||||
@@ -420,6 +423,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
|||||||
SZ_1G,
|
SZ_1G,
|
||||||
0,
|
0,
|
||||||
gk20a_mem_flag_none,
|
gk20a_mem_flag_none,
|
||||||
|
NVGPU_VM_MAP_ACCESS_READ_WRITE,
|
||||||
NVGPU_VM_MAP_CACHEABLE,
|
NVGPU_VM_MAP_CACHEABLE,
|
||||||
NV_KIND_INVALID,
|
NV_KIND_INVALID,
|
||||||
0,
|
0,
|
||||||
@@ -441,6 +445,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
|||||||
buf_size,
|
buf_size,
|
||||||
0,
|
0,
|
||||||
gk20a_mem_flag_none,
|
gk20a_mem_flag_none,
|
||||||
|
NVGPU_VM_MAP_ACCESS_READ_WRITE,
|
||||||
NVGPU_VM_MAP_CACHEABLE,
|
NVGPU_VM_MAP_CACHEABLE,
|
||||||
NVGPU_KIND_INVALID,
|
NVGPU_KIND_INVALID,
|
||||||
NVGPU_KIND_INVALID,
|
NVGPU_KIND_INVALID,
|
||||||
@@ -463,6 +468,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
|||||||
buf_size,
|
buf_size,
|
||||||
0,
|
0,
|
||||||
gk20a_mem_flag_none,
|
gk20a_mem_flag_none,
|
||||||
|
NVGPU_VM_MAP_ACCESS_READ_WRITE,
|
||||||
NVGPU_VM_MAP_CACHEABLE,
|
NVGPU_VM_MAP_CACHEABLE,
|
||||||
NV_KIND_INVALID,
|
NV_KIND_INVALID,
|
||||||
0,
|
0,
|
||||||
@@ -616,6 +622,7 @@ static int map_buffer(struct unit_module *m,
|
|||||||
buf_size,
|
buf_size,
|
||||||
0,
|
0,
|
||||||
gk20a_mem_flag_none,
|
gk20a_mem_flag_none,
|
||||||
|
NVGPU_VM_MAP_ACCESS_READ_WRITE,
|
||||||
flags,
|
flags,
|
||||||
compr_kind,
|
compr_kind,
|
||||||
0,
|
0,
|
||||||
@@ -641,6 +648,7 @@ static int map_buffer(struct unit_module *m,
|
|||||||
buf_size,
|
buf_size,
|
||||||
0,
|
0,
|
||||||
gk20a_mem_flag_none,
|
gk20a_mem_flag_none,
|
||||||
|
NVGPU_VM_MAP_ACCESS_READ_WRITE,
|
||||||
flags,
|
flags,
|
||||||
compr_kind,
|
compr_kind,
|
||||||
0,
|
0,
|
||||||
|
|||||||
Reference in New Issue
Block a user