gpu: nvgpu: support user fence updates

Add support for user fence updates i.e. increments added by user space
in pushbuffer directly

Add a submit IOCTL flag NVGPU_SUBMIT_GPFIFO_FLAGS_USER_FENCE_UPDATE to indicate
if User has added increments in pushbuffer
If yes, number_of_increment value is received in fence.value from User

If User is adding increments in the pushbuffer then we don't need to do any job
tracking in the kernel
So fail the submit if we evaluate need_job_tracking to true and
FLAGS_USER_FENCE_UPDATE is set
User is responsible for ensuring all pre-requisites for a fast submit and to
prevent kernel job tracking

Since user space adds increments in the pushbuffer, just handle the threshold
book keeping in kernel.

Bug 200326065
Jira NVGPU-179

Change-Id: Ic0f0b1aa69e3389a4c3305fb6a559c5113719e0f
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1661854
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2018-02-20 23:49:37 -08:00
committed by mobile promotions
parent 8d5536271f
commit 0c46f8a5e1
4 changed files with 41 additions and 1 deletions

View File

@@ -720,7 +720,8 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
return -EINVAL; return -EINVAL;
if ((flags & (NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT | if ((flags & (NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT |
NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET)) && NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET |
NVGPU_SUBMIT_GPFIFO_FLAGS_USER_FENCE_UPDATE)) &&
!fence) !fence)
return -EINVAL; return -EINVAL;
@@ -757,6 +758,16 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
(g->can_railgate && !c->deterministic) || (g->can_railgate && !c->deterministic) ||
!skip_buffer_refcounting; !skip_buffer_refcounting;
/*
* If User is adding increments to the pushbuffer and doing all job
* tracking, then no need for kernel tracking here
* User should ensure that all pre-requisites for fast submit are met
* Fail the submit if that's not the case
*/
if (need_job_tracking &&
(flags & NVGPU_SUBMIT_GPFIFO_FLAGS_USER_FENCE_UPDATE))
return -EINVAL;
if (need_job_tracking) { if (need_job_tracking) {
bool need_sync_framework = false; bool need_sync_framework = false;
@@ -868,6 +879,15 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
goto clean_up; goto clean_up;
} }
if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_USER_FENCE_UPDATE) {
/*
* User space adds increments in the pushbuffer, so just
* handle the threshold book keeping in kernel by adding
* number of syncpoint increments to threshold
*/
c->sync->add_user_incrs(c->sync, fence->value);
}
if (need_job_tracking) { if (need_job_tracking) {
err = channel_gk20a_alloc_job(c, &job); err = channel_gk20a_alloc_job(c, &job);
if (err) if (err)

View File

@@ -301,6 +301,13 @@ static u64 gk20a_channel_syncpt_address(struct gk20a_channel_sync *s)
return sp->syncpt_buf.gpu_va; return sp->syncpt_buf.gpu_va;
} }
static u32 gk20a_channel_add_user_incrs(struct gk20a_channel_sync *s, u32 val)
{
struct gk20a_channel_syncpt *sp =
container_of(s, struct gk20a_channel_syncpt, ops);
return nvgpu_nvhost_syncpt_incr_max_ext(sp->nvhost_dev, sp->id, val);
}
static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s) static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
{ {
struct gk20a_channel_syncpt *sp = struct gk20a_channel_syncpt *sp =
@@ -353,6 +360,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
sp->ops.signal_timeline = gk20a_channel_syncpt_signal_timeline; sp->ops.signal_timeline = gk20a_channel_syncpt_signal_timeline;
sp->ops.syncpt_id = gk20a_channel_syncpt_id; sp->ops.syncpt_id = gk20a_channel_syncpt_id;
sp->ops.syncpt_address = gk20a_channel_syncpt_address; sp->ops.syncpt_address = gk20a_channel_syncpt_address;
sp->ops.add_user_incrs = gk20a_channel_add_user_incrs;
sp->ops.destroy = gk20a_channel_syncpt_destroy; sp->ops.destroy = gk20a_channel_syncpt_destroy;
return &sp->ops; return &sp->ops;
@@ -878,6 +886,12 @@ static u64 gk20a_channel_semaphore_syncpt_address(struct gk20a_channel_sync *s)
return 0; return 0;
} }
static u32 gk20a_channel_semaphore_add_user_incrs(struct gk20a_channel_sync *s,
u32 val)
{
return 0;
}
static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s) static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
{ {
struct gk20a_channel_semaphore *sema = struct gk20a_channel_semaphore *sema =
@@ -930,6 +944,7 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c)
sema->ops.signal_timeline = gk20a_channel_semaphore_signal_timeline; sema->ops.signal_timeline = gk20a_channel_semaphore_signal_timeline;
sema->ops.syncpt_id = gk20a_channel_semaphore_syncpt_id; sema->ops.syncpt_id = gk20a_channel_semaphore_syncpt_id;
sema->ops.syncpt_address = gk20a_channel_semaphore_syncpt_address; sema->ops.syncpt_address = gk20a_channel_semaphore_syncpt_address;
sema->ops.add_user_incrs = gk20a_channel_semaphore_add_user_incrs;
sema->ops.destroy = gk20a_channel_semaphore_destroy; sema->ops.destroy = gk20a_channel_semaphore_destroy;
return &sema->ops; return &sema->ops;

View File

@@ -105,6 +105,9 @@ struct gk20a_channel_sync {
/* Returns the sync point address of sync point or 0 if not supported */ /* Returns the sync point address of sync point or 0 if not supported */
u64 (*syncpt_address)(struct gk20a_channel_sync *s); u64 (*syncpt_address)(struct gk20a_channel_sync *s);
/* Handle user added increments in the push buffer */
u32 (*add_user_incrs)(struct gk20a_channel_sync *s, u32 val);
/* Free the resources allocated by gk20a_channel_sync_create. */ /* Free the resources allocated by gk20a_channel_sync_create. */
void (*destroy)(struct gk20a_channel_sync *s); void (*destroy)(struct gk20a_channel_sync *s);
}; };

View File

@@ -1478,6 +1478,8 @@ struct nvgpu_fence {
#define NVGPU_SUBMIT_GPFIFO_FLAGS_SKIP_BUFFER_REFCOUNTING (1 << 5) #define NVGPU_SUBMIT_GPFIFO_FLAGS_SKIP_BUFFER_REFCOUNTING (1 << 5)
/* expire current timeslice and reschedule runlist from front */ /* expire current timeslice and reschedule runlist from front */
#define NVGPU_SUBMIT_GPFIFO_FLAGS_RESCHEDULE_RUNLIST (1 << 6) #define NVGPU_SUBMIT_GPFIFO_FLAGS_RESCHEDULE_RUNLIST (1 << 6)
/* user space has added syncpoint increments in the pushbuffer */
#define NVGPU_SUBMIT_GPFIFO_FLAGS_USER_FENCE_UPDATE (1 << 7)
struct nvgpu_submit_gpfifo_args { struct nvgpu_submit_gpfifo_args {
__u64 gpfifo; __u64 gpfifo;