mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu: nvgpu: use inplace allocation in sync framework
This change is the first of a series of changes to support the usage of pre-allocated job tracking resources in the submit path. With this change, we still maintain a dynamically-allocated joblist, but make the necessary changes in the channel_sync & fence framework to use in-place allocations. Specifically, we: 1) Update channel sync framework routines to take in pre-allocated priv_cmd_entry(s) & gk20a_fence(s) rather than dynamically allocating themselves 2) Move allocation of priv_cmd_entry(s) & gk20a_fence(s) to gk20a_submit_prepare_syncs 3) Modify fence framework to have seperate allocation and init APIs. We expose allocation as a seperate API, so the client can allocate the object before passing it into the channel sync framework. 4) Fix clean_up logic in channel sync framework Bug 1795076 Change-Id: I96db457683cd207fd029c31c45f548f98055e844 Signed-off-by: Sachit Kadle <skadle@nvidia.com> Reviewed-on: http://git-master/r/1206725 (cherry picked from commit 9d196fd10db6c2f934c2a53b1fc0500eb4626624) Reviewed-on: http://git-master/r/1223933 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
3c2656c8c6
commit
63e8592e06
@@ -36,8 +36,8 @@ struct gk20a_channel_sync {
|
||||
* cmdbuf is executed.
|
||||
*/
|
||||
int (*wait_syncpt)(struct gk20a_channel_sync *s, u32 id, u32 thresh,
|
||||
struct priv_cmd_entry **entry,
|
||||
struct gk20a_fence **fence);
|
||||
struct priv_cmd_entry *entry,
|
||||
struct gk20a_fence *fence);
|
||||
|
||||
/* Generate a gpu wait cmdbuf from sync fd.
|
||||
* Returns
|
||||
@@ -46,8 +46,8 @@ struct gk20a_channel_sync {
|
||||
* cmdbuf is executed.
|
||||
*/
|
||||
int (*wait_fd)(struct gk20a_channel_sync *s, int fd,
|
||||
struct priv_cmd_entry **entry,
|
||||
struct gk20a_fence **fence);
|
||||
struct priv_cmd_entry *entry,
|
||||
struct gk20a_fence *fence);
|
||||
|
||||
/* Increment syncpoint/semaphore.
|
||||
* Returns
|
||||
@@ -55,8 +55,8 @@ struct gk20a_channel_sync {
|
||||
* - a fence that can be passed to wait_cpu() and is_expired().
|
||||
*/
|
||||
int (*incr)(struct gk20a_channel_sync *s,
|
||||
struct priv_cmd_entry **entry,
|
||||
struct gk20a_fence **fence,
|
||||
struct priv_cmd_entry *entry,
|
||||
struct gk20a_fence *fence,
|
||||
bool need_sync_fence);
|
||||
|
||||
/* Increment syncpoint/semaphore, preceded by a wfi.
|
||||
@@ -65,8 +65,8 @@ struct gk20a_channel_sync {
|
||||
* - a fence that can be passed to wait_cpu() and is_expired().
|
||||
*/
|
||||
int (*incr_wfi)(struct gk20a_channel_sync *s,
|
||||
struct priv_cmd_entry **entry,
|
||||
struct gk20a_fence **fence);
|
||||
struct priv_cmd_entry *entry,
|
||||
struct gk20a_fence *fence);
|
||||
|
||||
/* Increment syncpoint/semaphore, so that the returned fence represents
|
||||
* work completion (may need wfi) and can be returned to user space.
|
||||
@@ -77,8 +77,8 @@ struct gk20a_channel_sync {
|
||||
*/
|
||||
int (*incr_user)(struct gk20a_channel_sync *s,
|
||||
int wait_fence_fd,
|
||||
struct priv_cmd_entry **entry,
|
||||
struct gk20a_fence **fence,
|
||||
struct priv_cmd_entry *entry,
|
||||
struct gk20a_fence *fence,
|
||||
bool wfi,
|
||||
bool need_sync_fence);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user