gpu: nvgpu: Create unique share id

This is fixing a race in address space identifier
creation by making atomic variable increment.

Bug 3684734

Change-Id: I864e8f61257569e35f926822c2a5260532d41360
Signed-off-by: Dinesh T <dt@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2742206
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Richard Zhao <rizhao@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Dinesh T
2022-07-08 20:28:27 +00:00
committed by mobile promotions
parent eae4593343
commit b1d7c77d8e
8 changed files with 33 additions and 117 deletions

View File

@@ -1,7 +1,7 @@
/*
* GK20A Address Spaces
*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,32 +32,28 @@
#define VM_NAME_PREFIX "as_"
/* dumb allocator... */
static int generate_as_share_id(struct gk20a_as *as)
static nvgpu_atomic_t unique_share_id = NVGPU_ATOMIC_INIT(0);
/*
* Generate unique id by doing atomic increment on unique_share_id.
*/
static int generate_unique_share_id(void)
{
struct gk20a *g = gk20a_from_as(as);
int ret = 0;
nvgpu_log_fn(g, " ");
as->last_share_id = nvgpu_safe_add_s32(as->last_share_id, 1);
return as->last_share_id;
ret = nvgpu_atomic_inc_return(&unique_share_id);
if (ret == 0) {
nvgpu_err(NULL, "incrementing share_id overflow");
}
return ret;
}
/* still dumb */
static void release_as_share_id(struct gk20a_as_share *as_share)
{
struct gk20a *g = gk20a_from_as(as_share->as);
nvgpu_log_fn(g, " ");
return;
}
/* address space interfaces for the gk20a module */
static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
u32 big_page_size, u32 flags,
u64 va_range_start, u64 va_range_end,
u64 va_range_split)
{
struct gk20a_as *as = as_share->as;
struct gk20a *g = gk20a_from_as(as);
struct gk20a *g = as_share->g;
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm;
char name[NVGPU_VM_NAME_LEN] = VM_NAME_PREFIX;
@@ -171,8 +167,8 @@ int gk20a_as_alloc_share(struct gk20a *g,
return -ENOMEM;
}
as_share->as = &g->as;
as_share->id = generate_as_share_id(as_share->as);
as_share->g = g;
as_share->id = generate_unique_share_id();
/* this will set as_share->vm. */
err = gk20a_busy(g);
@@ -232,14 +228,8 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
gk20a_idle(g);
release_fail:
release_as_share_id(as_share);
nvgpu_put(g);
nvgpu_kfree(g, as_share);
return err;
}
struct gk20a *gk20a_from_as(struct gk20a_as *as)
{
return (struct gk20a *)((uintptr_t)as - offsetof(struct gk20a, as));
}

View File

@@ -1,7 +1,7 @@
/*
* GK20A Address Spaces
*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,16 +29,6 @@
struct vm_gk20a;
struct gk20a;
/**
* Basic structure to identify an address space (AS).
*/
struct gk20a_as {
/**
* Incrementing id to identify the AS, dummy allocator for now.
*/
int last_share_id;
};
/**
* Basic structure to share an AS.
*/
@@ -46,7 +36,7 @@ struct gk20a_as_share {
/**
* The AS to share.
*/
struct gk20a_as *as;
struct gk20a *g;
/**
* The VM used by the AS.
@@ -144,15 +134,4 @@ int gk20a_as_alloc_share(struct gk20a *g, u32 big_page_size,
u64 va_range_end, u64 va_range_split,
struct gk20a_as_share **out);
/**
* @brief Retrieve the instance of gk20a from a gk20a_as instance.
*
* @param as [in] The address space
*
* Given an instance of gk20a_as, retrieve a pointer to the underlying gk20a
* instance.
*
* @return pointer to the underlying GPU (gk20a).
*/
struct gk20a *gk20a_from_as(struct gk20a_as *as);
#endif /* NVGPU_AS_H */

View File

@@ -693,9 +693,6 @@ struct gk20a {
u32 pg_ms_gating_cnt;
#endif
/** GPU address-space identifier. */
struct gk20a_as as;
/** The HAL function pointers */
struct gpu_ops ops;

View File

@@ -1,7 +1,7 @@
/*
* GK20A Address Spaces
*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -457,7 +457,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int err = 0;
struct gk20a_as_share *as_share = filp->private_data;
struct gk20a *g = gk20a_from_as(as_share->as);
struct gk20a *g = as_share->g;
bool always_copy_to_user = false;
u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];

View File

@@ -17,7 +17,6 @@ gk20a_fifo_intr_1_isr
gk20a_fifo_intr_handle_chsw_error
gk20a_fifo_intr_handle_runlist_event
gk20a_fifo_pbdma_isr
gk20a_from_as
gk20a_get_pde_pgsz
gk20a_get_pte_pgsz
gk20a_mm_fb_flush

View File

@@ -333,18 +333,17 @@ test_pause_resume_mask.pause_resume_mask=0
test_unit_config.unit_config=2
[mm.as]
test_as_alloc_share.as_alloc_share_0k_um=0
test_as_alloc_share.as_alloc_share_64k_um=0
test_as_alloc_share.as_alloc_share_0k_um=2
test_as_alloc_share.as_alloc_share_64k_um=2
test_as_alloc_share.as_alloc_share_64k_um_as_fail=0
test_as_alloc_share.as_alloc_share_64k_um_busy_fail_1=0
test_as_alloc_share.as_alloc_share_64k_um_busy_fail_2=0
test_as_alloc_share.as_alloc_share_64k_um_busy_fail_2=2
test_as_alloc_share.as_alloc_share_64k_um_vm_fail=0
test_as_alloc_share.as_alloc_share_einval_um=0
test_as_alloc_share.as_alloc_share_notp2_um=0
test_as_alloc_share.as_alloc_share_uva=0
test_as_alloc_share.as_alloc_share_uva_enabled=0
test_as_alloc_share.as_alloc_share_uva=2
test_as_alloc_share.as_alloc_share_uva_enabled=2
test_init_mm.init=0
test_gk20a_from_as.gk20a_from_as=0
[mm.dma]
test_mm_dma_alloc.alloc=0

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -268,33 +268,6 @@ int test_as_alloc_share(struct unit_module *m, struct gk20a *g, void *args)
return UNIT_SUCCESS;
}
int test_gk20a_from_as(struct unit_module *m, struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
struct gk20a_as_share *out;
int err;
err = gk20a_as_alloc_share(g, SZ_64K, NVGPU_AS_ALLOC_USERSPACE_MANAGED,
(SZ_64K << 10), (1ULL << 37),
nvgpu_gmmu_va_small_page_limit(), &out);
if (err != 0) {
unit_return_fail(m, "gk20a_as_alloc_share failed err=%d\n",
err);
}
if (g != gk20a_from_as(out->as)) {
unit_err(m, "ptr mismatch in gk20a_from_as\n");
goto exit;
}
ret = UNIT_SUCCESS;
exit:
gk20a_as_release_share(out);
return ret;
}
struct unit_module_test nvgpu_mm_as_tests[] = {
UNIT_TEST(init, test_init_mm, NULL, 0),
UNIT_TEST(as_alloc_share_64k_um_as_fail, test_as_alloc_share,
@@ -304,20 +277,19 @@ struct unit_module_test nvgpu_mm_as_tests[] = {
UNIT_TEST(as_alloc_share_64k_um_busy_fail_1, test_as_alloc_share,
(void *) &test_64k_user_managed_busy_fail_1, 0),
UNIT_TEST(as_alloc_share_64k_um_busy_fail_2, test_as_alloc_share,
(void *) &test_64k_user_managed_busy_fail_2, 0),
(void *) &test_64k_user_managed_busy_fail_2, 2),
UNIT_TEST(as_alloc_share_64k_um, test_as_alloc_share,
(void *) &test_64k_user_managed, 0),
(void *) &test_64k_user_managed, 2),
UNIT_TEST(as_alloc_share_0k_um, test_as_alloc_share,
(void *) &test_0k_user_managed, 0),
(void *) &test_0k_user_managed, 2),
UNIT_TEST(as_alloc_share_einval_um, test_as_alloc_share,
(void *) &test_einval_user_managed, 0),
UNIT_TEST(as_alloc_share_notp2_um, test_as_alloc_share,
(void *) &test_notp2_user_managed, 0),
UNIT_TEST(as_alloc_share_uva, test_as_alloc_share,
(void *) &test_64k_unified_va, 0),
(void *) &test_64k_unified_va, 2),
UNIT_TEST(as_alloc_share_uva_enabled, test_as_alloc_share,
(void *) &test_64k_unified_va_enabled, 0),
UNIT_TEST(gk20a_from_as, test_gk20a_from_as, NULL, 0),
(void *) &test_64k_unified_va_enabled, 2),
};
UNIT_MODULE(mm.as, nvgpu_mm_as_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -119,7 +119,7 @@ int test_init_mm(struct unit_module *m, struct gk20a *g, void *args);
* Test Type: Feature
*
* Targets: gk20a_as_alloc_share, gk20a_as_release_share,
* gk20a_vm_release_share, gk20a_from_as
* gk20a_vm_release_share
*
* Input:
* - The test_init_mm must have been executed
@@ -153,24 +153,4 @@ int test_init_mm(struct unit_module *m, struct gk20a *g, void *args);
*/
int test_as_alloc_share(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_gk20a_from_as
*
* Description: Simple test to check gk20a_from_as.
*
* Test Type: Feature
*
* Targets: gk20a_from_as
*
* Input: None
*
* Steps:
* - Call gk20a_from_as with an 'as' pointer and ensure it returns a
* pointer on g.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gk20a_from_as(struct unit_module *m, struct gk20a *g, void *args);
#endif /* UNIT_MM_AS_H */