mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: remove vm->guest_managed
gpu server now moved to use kernel vma range too, so guest_managed is not used anymore. Jira GVSCI-10900 Change-Id: I838cad24194faf72fe5ef53053e5dacc9f6588c1 Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2546189 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
2845f2b66e
commit
671dbbb145
@@ -245,11 +245,6 @@ u64 nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
|
|||||||
|
|
||||||
vma = vm->vma[pgsz_idx];
|
vma = vm->vma[pgsz_idx];
|
||||||
|
|
||||||
if (vm->guest_managed) {
|
|
||||||
nvgpu_err(g, "Illegal GPU allocation on behalf of guest OS");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pgsz_idx >= GMMU_NR_PAGE_SIZES) {
|
if (pgsz_idx >= GMMU_NR_PAGE_SIZES) {
|
||||||
nvgpu_err(g, "(%s) invalid page size requested", vma->name);
|
nvgpu_err(g, "(%s) invalid page size requested", vma->name);
|
||||||
return 0;
|
return 0;
|
||||||
@@ -599,8 +594,7 @@ static int nvgpu_vm_init_check_vma_limits(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
{
|
{
|
||||||
if ((user_vma_start > user_vma_limit) ||
|
if ((user_vma_start > user_vma_limit) ||
|
||||||
(user_lp_vma_start > user_lp_vma_limit) ||
|
(user_lp_vma_start > user_lp_vma_limit) ||
|
||||||
(!vm->guest_managed &&
|
(kernel_vma_start >= kernel_vma_limit)) {
|
||||||
(kernel_vma_start >= kernel_vma_limit))) {
|
|
||||||
nvgpu_err(g, "Invalid vm configuration");
|
nvgpu_err(g, "Invalid vm configuration");
|
||||||
nvgpu_do_assert();
|
nvgpu_do_assert();
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -739,12 +733,6 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vm->guest_managed && (kernel_reserved != 0U)) {
|
|
||||||
nvgpu_do_assert_print(g,
|
|
||||||
"Cannot use guest managed VM with kernel space");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, "
|
nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, "
|
||||||
"LP size=0x%x lowhole=0x%llx",
|
"LP size=0x%x lowhole=0x%llx",
|
||||||
name, aperture_size,
|
name, aperture_size,
|
||||||
|
|||||||
@@ -256,8 +256,6 @@ struct vm_gk20a {
|
|||||||
* It is not enabled for safety build.
|
* It is not enabled for safety build.
|
||||||
*/
|
*/
|
||||||
bool enable_ctag;
|
bool enable_ctag;
|
||||||
/** Whether this address space is managed by guest or not. */
|
|
||||||
bool guest_managed;
|
|
||||||
|
|
||||||
/** Page size used for mappings with this address space. */
|
/** Page size used for mappings with this address space. */
|
||||||
u32 big_page_size;
|
u32 big_page_size;
|
||||||
|
|||||||
@@ -820,7 +820,6 @@ test_nvgpu_gmmu_map_unmap_adv.gmmu_map_unmap_no_iommu_sysmem_adv_big_pages_offse
|
|||||||
test_nvgpu_gmmu_map_unmap_adv.gmmu_map_unmap_tlb_invalidate_fail=0
|
test_nvgpu_gmmu_map_unmap_adv.gmmu_map_unmap_tlb_invalidate_fail=0
|
||||||
test_nvgpu_gmmu_map_unmap_batched.gmmu_map_unmap_iommu_sysmem_adv_big_pages_batched=0
|
test_nvgpu_gmmu_map_unmap_batched.gmmu_map_unmap_iommu_sysmem_adv_big_pages_batched=0
|
||||||
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_fi_null_sgt=0
|
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_fi_null_sgt=0
|
||||||
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_fi_vm_alloc=0
|
|
||||||
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_pd_allocate=0
|
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_pd_allocate=0
|
||||||
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_pd_allocate_child=0
|
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_pd_allocate_child=0
|
||||||
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_tlb_invalidate=0
|
test_nvgpu_gmmu_map_unmap_map_fail.map_fail_tlb_invalidate=0
|
||||||
|
|||||||
@@ -64,10 +64,9 @@
|
|||||||
|
|
||||||
/* Some special failure cases */
|
/* Some special failure cases */
|
||||||
#define SPECIAL_MAP_FAIL_FI_NULL_SGT 0
|
#define SPECIAL_MAP_FAIL_FI_NULL_SGT 0
|
||||||
#define SPECIAL_MAP_FAIL_VM_ALLOC 1
|
#define SPECIAL_MAP_FAIL_PD_ALLOCATE 1
|
||||||
#define SPECIAL_MAP_FAIL_PD_ALLOCATE 2
|
#define SPECIAL_MAP_FAIL_PD_ALLOCATE_CHILD 2
|
||||||
#define SPECIAL_MAP_FAIL_PD_ALLOCATE_CHILD 3
|
#define SPECIAL_MAP_FAIL_TLB_INVALIDATE 3
|
||||||
#define SPECIAL_MAP_FAIL_TLB_INVALIDATE 4
|
|
||||||
|
|
||||||
/* Consts for requirements C1/C2 testing */
|
/* Consts for requirements C1/C2 testing */
|
||||||
#define REQ_C1_NUM_MEMS 3
|
#define REQ_C1_NUM_MEMS 3
|
||||||
@@ -604,11 +603,6 @@ int test_nvgpu_gmmu_map_unmap_map_fail(struct unit_module *m, struct gk20a *g,
|
|||||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 3);
|
nvgpu_posix_enable_fault_injection(kmem_fi, true, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (scenario == SPECIAL_MAP_FAIL_VM_ALLOC) {
|
|
||||||
/* Special case: cause __nvgpu_vm_alloc_va to fail */
|
|
||||||
g->mm.pmu.vm->guest_managed = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (scenario == SPECIAL_MAP_FAIL_TLB_INVALIDATE) {
|
if (scenario == SPECIAL_MAP_FAIL_TLB_INVALIDATE) {
|
||||||
g->ops.fb.tlb_invalidate = hal_fb_tlb_invalidate_fail;
|
g->ops.fb.tlb_invalidate = hal_fb_tlb_invalidate_fail;
|
||||||
}
|
}
|
||||||
@@ -623,7 +617,6 @@ int test_nvgpu_gmmu_map_unmap_map_fail(struct unit_module *m, struct gk20a *g,
|
|||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||||
g->mm.pmu.vm->guest_managed = false;
|
|
||||||
|
|
||||||
if (mem.gpu_va != 0) {
|
if (mem.gpu_va != 0) {
|
||||||
unit_return_fail(m, "map did not fail as expected\n");
|
unit_return_fail(m, "map did not fail as expected\n");
|
||||||
@@ -1294,10 +1287,6 @@ struct unit_module_test nvgpu_gmmu_tests[] = {
|
|||||||
test_nvgpu_gmmu_map_unmap_map_fail,
|
test_nvgpu_gmmu_map_unmap_map_fail,
|
||||||
(void *) SPECIAL_MAP_FAIL_FI_NULL_SGT,
|
(void *) SPECIAL_MAP_FAIL_FI_NULL_SGT,
|
||||||
0),
|
0),
|
||||||
UNIT_TEST(map_fail_fi_vm_alloc,
|
|
||||||
test_nvgpu_gmmu_map_unmap_map_fail,
|
|
||||||
(void *) SPECIAL_MAP_FAIL_VM_ALLOC,
|
|
||||||
0),
|
|
||||||
UNIT_TEST(map_fail_tlb_invalidate,
|
UNIT_TEST(map_fail_tlb_invalidate,
|
||||||
test_nvgpu_gmmu_map_unmap_map_fail,
|
test_nvgpu_gmmu_map_unmap_map_fail,
|
||||||
(void *) SPECIAL_MAP_FAIL_TLB_INVALIDATE,
|
(void *) SPECIAL_MAP_FAIL_TLB_INVALIDATE,
|
||||||
|
|||||||
@@ -243,16 +243,6 @@ int test_nvgpu_vm_alloc_va(struct unit_module *m, struct gk20a *g,
|
|||||||
nvgpu_kmem_get_fault_injection();
|
nvgpu_kmem_get_fault_injection();
|
||||||
u64 addr;
|
u64 addr;
|
||||||
|
|
||||||
/* Error handling: VM cannot allocate VA */
|
|
||||||
vm->guest_managed = true;
|
|
||||||
addr = nvgpu_vm_alloc_va(vm, SZ_1K, 0);
|
|
||||||
vm->guest_managed = false;
|
|
||||||
if (addr != 0) {
|
|
||||||
unit_err(m, "nvgpu_vm_alloc_va did not fail as expected (1).\n");
|
|
||||||
ret = UNIT_FAIL;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Error handling: invalid page size */
|
/* Error handling: invalid page size */
|
||||||
addr = nvgpu_vm_alloc_va(vm, SZ_1K, GMMU_NR_PAGE_SIZES);
|
addr = nvgpu_vm_alloc_va(vm, SZ_1K, GMMU_NR_PAGE_SIZES);
|
||||||
if (addr != 0) {
|
if (addr != 0) {
|
||||||
@@ -969,19 +959,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
|||||||
|
|
||||||
/* Make nvgpu_vm_do_init fail with invalid parameters */
|
/* Make nvgpu_vm_do_init fail with invalid parameters */
|
||||||
vm = nvgpu_kzalloc(g, sizeof(*vm));
|
vm = nvgpu_kzalloc(g, sizeof(*vm));
|
||||||
vm->guest_managed = true;
|
|
||||||
if (!EXPECT_BUG(
|
|
||||||
nvgpu_vm_do_init(&g->mm, vm,
|
|
||||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
|
||||||
low_hole, user_vma, kernel_reserved,
|
|
||||||
nvgpu_gmmu_va_small_page_limit(),
|
|
||||||
big_pages, false, true, __func__)
|
|
||||||
)) {
|
|
||||||
unit_err(m, "BUG() was not called but it was expected (3).\n");
|
|
||||||
ret = UNIT_FAIL;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
vm->guest_managed = false;
|
|
||||||
|
|
||||||
/* vGPU with userspace managed */
|
/* vGPU with userspace managed */
|
||||||
g->is_virtual = true;
|
g->is_virtual = true;
|
||||||
@@ -1070,21 +1047,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Invalid low_hole and kernel_reserved to cause an invalid config */
|
|
||||||
vm->guest_managed = true;
|
|
||||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
|
||||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
|
||||||
nvgpu_gmmu_va_small_page_limit(),
|
|
||||||
((u64)SZ_1G * 200U), 0,
|
|
||||||
nvgpu_gmmu_va_small_page_limit(), big_pages,
|
|
||||||
false, false, __func__);
|
|
||||||
vm->guest_managed = false;
|
|
||||||
if (ret != -EINVAL) {
|
|
||||||
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (11).\n");
|
|
||||||
ret = UNIT_FAIL;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Cause nvgpu_vm_init_vma_allocators to fail for long vm name */
|
/* Cause nvgpu_vm_init_vma_allocators to fail for long vm name */
|
||||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||||
@@ -1157,7 +1119,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
|||||||
ret = UNIT_SUCCESS;
|
ret = UNIT_SUCCESS;
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
vm->guest_managed = false;
|
|
||||||
if (vm != NULL) {
|
if (vm != NULL) {
|
||||||
nvgpu_vm_put(vm);
|
nvgpu_vm_put(vm);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -315,12 +315,11 @@ done:
|
|||||||
|
|
||||||
#define F_SYNC_GET_RO_MAP_PRE_ALLOCATED 0
|
#define F_SYNC_GET_RO_MAP_PRE_ALLOCATED 0
|
||||||
#define F_SYNC_GET_RO_MAP 1
|
#define F_SYNC_GET_RO_MAP 1
|
||||||
#define F_SYNC_GET_RO_MAP_FAIL 2
|
#define F_SYNC_GET_RO_MAP_MAX 1
|
||||||
|
|
||||||
static const char *f_sync_get_ro_map[] = {
|
static const char *f_sync_get_ro_map[] = {
|
||||||
"sync_get_ro_map_preallocated",
|
"sync_get_ro_map_preallocated",
|
||||||
"sync_get_ro_map",
|
"sync_get_ro_map",
|
||||||
"sync_get_ro_map_fail",
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void syncpt_ro_map_gpu_va_clear(struct gk20a *g, struct nvgpu_channel *ch)
|
static void syncpt_ro_map_gpu_va_clear(struct gk20a *g, struct nvgpu_channel *ch)
|
||||||
@@ -347,7 +346,7 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
int ret = UNIT_FAIL;
|
int ret = UNIT_FAIL;
|
||||||
|
|
||||||
for (branches = 0U; branches <= F_SYNC_GET_RO_MAP_FAIL; branches++) {
|
for (branches = 0U; branches <= F_SYNC_GET_RO_MAP_MAX; branches++) {
|
||||||
if (branches == F_SYNC_GET_RO_MAP_PRE_ALLOCATED) {
|
if (branches == F_SYNC_GET_RO_MAP_PRE_ALLOCATED) {
|
||||||
ch->vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(ch->vm,
|
ch->vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(ch->vm,
|
||||||
&g->syncpt_mem, g->syncpt_unit_size,
|
&g->syncpt_mem, g->syncpt_unit_size,
|
||||||
@@ -358,10 +357,6 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
}
|
}
|
||||||
} else if (branches == F_SYNC_GET_RO_MAP) {
|
} else if (branches == F_SYNC_GET_RO_MAP) {
|
||||||
ch->vm->syncpt_ro_map_gpu_va = 0U;
|
ch->vm->syncpt_ro_map_gpu_va = 0U;
|
||||||
} else if (branches == F_SYNC_GET_RO_MAP_FAIL) {
|
|
||||||
ch->vm->syncpt_ro_map_gpu_va = 0U;
|
|
||||||
/* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */
|
|
||||||
ch->vm->guest_managed = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unit_info(m, "%s branch: %s\n", __func__, f_sync_get_ro_map[branches]);
|
unit_info(m, "%s branch: %s\n", __func__, f_sync_get_ro_map[branches]);
|
||||||
@@ -369,7 +364,7 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
err = g->ops.sync.syncpt.get_sync_ro_map(ch->vm,
|
err = g->ops.sync.syncpt.get_sync_ro_map(ch->vm,
|
||||||
&base_gpuva, &sync_size, &num_syncpoints);
|
&base_gpuva, &sync_size, &num_syncpoints);
|
||||||
|
|
||||||
if (branches < F_SYNC_GET_RO_MAP_FAIL) {
|
if (branches < F_SYNC_GET_RO_MAP_MAX) {
|
||||||
if(err != 0) {
|
if(err != 0) {
|
||||||
unit_return_fail(m,
|
unit_return_fail(m,
|
||||||
"unexpected failure in get_sync_ro_map");
|
"unexpected failure in get_sync_ro_map");
|
||||||
@@ -382,40 +377,25 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
|
|
||||||
unit_info(m, "Syncpt Shim GPU VA: %llu\n", base_gpuva);
|
unit_info(m, "Syncpt Shim GPU VA: %llu\n", base_gpuva);
|
||||||
|
|
||||||
} else {
|
|
||||||
if (err == 0) {
|
|
||||||
unit_return_fail(m,
|
|
||||||
"expected failure in get_sync_ro_map");
|
|
||||||
} else {
|
|
||||||
ret = UNIT_SUCCESS;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
syncpt_ro_map_gpu_va_clear(g, ch);
|
syncpt_ro_map_gpu_va_clear(g, ch);
|
||||||
|
|
||||||
if (ch->vm->guest_managed == true) {
|
|
||||||
ch->vm->guest_managed = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
base_gpuva = 0U;
|
base_gpuva = 0U;
|
||||||
sync_size = 0U;
|
sync_size = 0U;
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
syncpt_ro_map_gpu_va_clear(g, ch);
|
syncpt_ro_map_gpu_va_clear(g, ch);
|
||||||
|
|
||||||
if (ch->vm->guest_managed == true) {
|
|
||||||
ch->vm->guest_managed = false;
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define F_SYNC_SYNCPT_ALLOC_FAILED 1
|
#define F_SYNC_SYNCPT_ALLOC_FAILED 1
|
||||||
#define F_SYNC_STRADD_FAIL 3
|
#define F_SYNC_STRADD_FAIL 3
|
||||||
#define F_SYNC_NVHOST_CLIENT_MANAGED_FAIL 4
|
#define F_SYNC_NVHOST_CLIENT_MANAGED_FAIL 4
|
||||||
#define F_SYNC_RO_MAP_GPU_VA_MAP_FAIL 5
|
#define F_SYNC_MEM_CREATE_PHYS_FAIL 5
|
||||||
#define F_SYNC_MEM_CREATE_PHYS_FAIL 6
|
#define F_SYNC_BUF_MAP_FAIL 6
|
||||||
#define F_SYNC_BUF_MAP_FAIL 7
|
#define F_SYNC_FAIL_LAST 7
|
||||||
#define F_SYNC_FAIL_LAST 8
|
|
||||||
|
|
||||||
static const char *f_syncpt_open[] = {
|
static const char *f_syncpt_open[] = {
|
||||||
"global_disable_syncpt",
|
"global_disable_syncpt",
|
||||||
@@ -423,7 +403,6 @@ static const char *f_syncpt_open[] = {
|
|||||||
"syncpt_user_managed_false",
|
"syncpt_user_managed_false",
|
||||||
"syncpt_stradd_fail",
|
"syncpt_stradd_fail",
|
||||||
"syncpt_get_client_managed_fail",
|
"syncpt_get_client_managed_fail",
|
||||||
"syncpt_ro_map_gpu_va_fail",
|
|
||||||
"syncpt_create_phys_mem_fail",
|
"syncpt_create_phys_mem_fail",
|
||||||
"syncpt_buf_map_fail",
|
"syncpt_buf_map_fail",
|
||||||
};
|
};
|
||||||
@@ -439,10 +418,6 @@ static void clear_test_params(struct gk20a *g,
|
|||||||
bool *fault_injection_enabled, u32 branch,
|
bool *fault_injection_enabled, u32 branch,
|
||||||
struct nvgpu_posix_fault_inj *kmem_fi)
|
struct nvgpu_posix_fault_inj *kmem_fi)
|
||||||
{
|
{
|
||||||
if (ch->vm->guest_managed) {
|
|
||||||
ch->vm->guest_managed = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*fault_injection_enabled) {
|
if (*fault_injection_enabled) {
|
||||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||||
*fault_injection_enabled = false;
|
*fault_injection_enabled = false;
|
||||||
@@ -486,9 +461,6 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
g->name = FAIL_G_NAME_STR;
|
g->name = FAIL_G_NAME_STR;
|
||||||
} else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
|
} else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
|
||||||
g->nvhost->syncpt_id = 20U; /* arbitary id */
|
g->nvhost->syncpt_id = 20U; /* arbitary id */
|
||||||
} else if (branches == F_SYNC_RO_MAP_GPU_VA_MAP_FAIL) {
|
|
||||||
/* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */
|
|
||||||
ch->vm->guest_managed = true;
|
|
||||||
} else if (branches == F_SYNC_MEM_CREATE_PHYS_FAIL) {
|
} else if (branches == F_SYNC_MEM_CREATE_PHYS_FAIL) {
|
||||||
/*
|
/*
|
||||||
* bypass map of g->syncpt_mem and fail at
|
* bypass map of g->syncpt_mem and fail at
|
||||||
|
|||||||
Reference in New Issue
Block a user