gpu: nvgpu: clarify fence api assumptions

Adjust documentation and validity checks in the fence functions for
simplicity.

Now that the cde code is using user fences cleanly, the
do-nothing-on-null action can cause unintended behaviour in new code
using nvgpu_fence_get and nvgpu_fence_put. It does not make sense to
call these with a null fence, so delete the checks.

Extend the documentation in nvgpu_fence_extract_user() for the os fence
lifetime to give a reason for the dup call.

Make nvgpu_fence_from_semaphore() and nvgpu_fence_from_syncpt() return
void. These fill a previously allocated object; the only failure would
have been a null object, but that never happens and is not acceptable
behaviour for callers so delete these null checks and fix types.

Jira NVGPU-5248

Change-Id: I9f82365d50ab5600374c8f7dd513691eac14a2f1
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2359624
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2020-06-17 16:13:17 +03:00
committed by Alex Waterman
parent 39d1af0f65
commit 223d8522a1
4 changed files with 17 additions and 61 deletions

View File

@@ -63,16 +63,12 @@ static void nvgpu_fence_free(struct nvgpu_ref *ref)
void nvgpu_fence_put(struct nvgpu_fence_type *f) void nvgpu_fence_put(struct nvgpu_fence_type *f)
{ {
if (f != NULL) {
nvgpu_ref_put(&f->ref, nvgpu_fence_free); nvgpu_ref_put(&f->ref, nvgpu_fence_free);
}
} }
struct nvgpu_fence_type *nvgpu_fence_get(struct nvgpu_fence_type *f) struct nvgpu_fence_type *nvgpu_fence_get(struct nvgpu_fence_type *f)
{ {
if (f != NULL) {
nvgpu_ref_get(&f->ref); nvgpu_ref_get(&f->ref);
}
return f; return f;
} }
@@ -89,8 +85,11 @@ struct nvgpu_user_fence nvgpu_fence_extract_user(struct nvgpu_fence_type *f)
}; };
/* /*
* Keep our ref to the os fence for now so that the user fence can be * The os fence member has to live so it can be signaled when the job
* extracted multiple times (for cde). * completes. The returned user fence may live longer than that before
* being safely attached to an fd if the job completes before a
* submission ioctl finishes, or if it's stored for cde job state
* tracking.
*/ */
if (nvgpu_os_fence_is_initialized(&f->os_fence)) { if (nvgpu_os_fence_is_initialized(&f->os_fence)) {
f->os_fence.ops->dup(&f->os_fence); f->os_fence.ops->dup(&f->os_fence);
@@ -188,9 +187,6 @@ void nvgpu_fence_init(struct nvgpu_fence_type *f,
const struct nvgpu_fence_ops *ops, const struct nvgpu_fence_ops *ops,
struct nvgpu_os_fence os_fence) struct nvgpu_os_fence os_fence)
{ {
if (f == NULL) {
return;
}
f->ops = ops; f->ops = ops;
f->syncpt_id = NVGPU_INVALID_SYNCPT_ID; f->syncpt_id = NVGPU_INVALID_SYNCPT_ID;
#ifdef CONFIG_NVGPU_SW_SEMAPHORE #ifdef CONFIG_NVGPU_SW_SEMAPHORE
@@ -225,24 +221,16 @@ static const struct nvgpu_fence_ops nvgpu_semaphore_fence_ops = {
}; };
/* This function takes ownership of the semaphore as well as the os_fence */ /* This function takes ownership of the semaphore as well as the os_fence */
int nvgpu_fence_from_semaphore( void nvgpu_fence_from_semaphore(
struct nvgpu_fence_type *fence_out, struct nvgpu_fence_type *f,
struct nvgpu_semaphore *semaphore, struct nvgpu_semaphore *semaphore,
struct nvgpu_cond *semaphore_wq, struct nvgpu_cond *semaphore_wq,
struct nvgpu_os_fence os_fence) struct nvgpu_os_fence os_fence)
{ {
struct nvgpu_fence_type *f = fence_out;
nvgpu_fence_init(f, &nvgpu_semaphore_fence_ops, os_fence); nvgpu_fence_init(f, &nvgpu_semaphore_fence_ops, os_fence);
if (f == NULL) {
return -EINVAL;
}
f->semaphore = semaphore; f->semaphore = semaphore;
f->semaphore_wq = semaphore_wq; f->semaphore_wq = semaphore_wq;
return 0;
} }
#endif #endif
@@ -286,30 +274,15 @@ static const struct nvgpu_fence_ops nvgpu_fence_syncpt_ops = {
}; };
/* This function takes the ownership of the os_fence */ /* This function takes the ownership of the os_fence */
int nvgpu_fence_from_syncpt( void nvgpu_fence_from_syncpt(
struct nvgpu_fence_type *fence_out, struct nvgpu_fence_type *f,
struct nvgpu_nvhost_dev *nvhost_dev, struct nvgpu_nvhost_dev *nvhost_dev,
u32 id, u32 value, struct nvgpu_os_fence os_fence) u32 id, u32 value, struct nvgpu_os_fence os_fence)
{ {
struct nvgpu_fence_type *f = fence_out;
nvgpu_fence_init(f, &nvgpu_fence_syncpt_ops, os_fence); nvgpu_fence_init(f, &nvgpu_fence_syncpt_ops, os_fence);
if (!f) {
return -EINVAL;
}
f->nvhost_dev = nvhost_dev; f->nvhost_dev = nvhost_dev;
f->syncpt_id = id; f->syncpt_id = id;
f->syncpt_value = value; f->syncpt_value = value;
return 0;
}
#else
int nvgpu_fence_from_syncpt(
struct nvgpu_fence_type *fence_out,
struct nvgpu_nvhost_dev *nvhost_dev,
u32 id, u32 value, struct nvgpu_os_fence os_fence)
{
return -EINVAL;
} }
#endif #endif

View File

@@ -213,19 +213,10 @@ static int channel_sync_semaphore_incr_common(
} }
} }
err = nvgpu_fence_from_semaphore(fence, semaphore, &c->semaphore_wq, nvgpu_fence_from_semaphore(fence, semaphore, &c->semaphore_wq, os_fence);
os_fence);
if (err != 0) {
goto clean_up_os_fence;
}
return 0; return 0;
clean_up_os_fence:
if (nvgpu_os_fence_is_initialized(&os_fence)) {
os_fence.ops->drop_ref(&os_fence);
}
clean_up_cmdbuf: clean_up_cmdbuf:
nvgpu_priv_cmdbuf_rollback(c->priv_cmd_q, *incr_cmd); nvgpu_priv_cmdbuf_rollback(c->priv_cmd_q, *incr_cmd);
clean_up_sema: clean_up_sema:

View File

@@ -216,15 +216,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
} }
} }
err = nvgpu_fence_from_syncpt(fence, sp->nvhost, nvgpu_fence_from_syncpt(fence, sp->nvhost, sp->id, thresh, os_fence);
sp->id, thresh, os_fence);
if (err != 0) {
if (nvgpu_os_fence_is_initialized(&os_fence) != 0) {
os_fence.ops->drop_ref(&os_fence);
}
goto clean_up_priv_cmd;
}
return 0; return 0;

View File

@@ -67,15 +67,15 @@ struct nvgpu_fence_ops {
#ifdef CONFIG_NVGPU_SW_SEMAPHORE #ifdef CONFIG_NVGPU_SW_SEMAPHORE
/* Fences can be created from semaphores or syncpoint (id, value) pairs */ /* Fences can be created from semaphores or syncpoint (id, value) pairs */
int nvgpu_fence_from_semaphore( void nvgpu_fence_from_semaphore(
struct nvgpu_fence_type *fence_out, struct nvgpu_fence_type *f,
struct nvgpu_semaphore *semaphore, struct nvgpu_semaphore *semaphore,
struct nvgpu_cond *semaphore_wq, struct nvgpu_cond *semaphore_wq,
struct nvgpu_os_fence os_fence); struct nvgpu_os_fence os_fence);
#endif #endif
int nvgpu_fence_from_syncpt( void nvgpu_fence_from_syncpt(
struct nvgpu_fence_type *fence_out, struct nvgpu_fence_type *f,
struct nvgpu_nvhost_dev *nvhost_dev, struct nvgpu_nvhost_dev *nvhost_dev,
u32 id, u32 value, u32 id, u32 value,
struct nvgpu_os_fence os_fence); struct nvgpu_os_fence os_fence);