gpu: nvgpu: silence coverity on fence code

- use release instead of free for the fence destroy identifier
- nvhost_dev is a struct name, so use nvhost_device
- compare nvgpu_nvhost_syncpt_read_ext_check retval properly

Also, if the syncpt read fails when checking for fence expiration,
behave as if the wait isn't expired. Possibly getting stuck is safer
than possibly continuing too early.

Jira NVGPU-5617

Change-Id: Ied529e25f8c43f1c78fd9eac73b9cd6c3550ead5
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2398399
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2020-08-13 10:55:42 +03:00
committed by Alex Waterman
parent 010f818596
commit a439d3767d
8 changed files with 27 additions and 22 deletions

View File

@@ -33,7 +33,7 @@ static struct nvgpu_fence_type *nvgpu_fence_from_ref(struct nvgpu_ref *ref)
offsetof(struct nvgpu_fence_type, priv.ref));
}
static void nvgpu_fence_free(struct nvgpu_ref *ref)
static void nvgpu_fence_release(struct nvgpu_ref *ref)
{
struct nvgpu_fence_type *f = nvgpu_fence_from_ref(ref);
struct nvgpu_fence_type_priv *pf = &f->priv;
@@ -42,14 +42,14 @@ static void nvgpu_fence_free(struct nvgpu_ref *ref)
pf->os_fence.ops->drop_ref(&pf->os_fence);
}
pf->ops->free(f);
pf->ops->release(f);
}
void nvgpu_fence_put(struct nvgpu_fence_type *f)
{
struct nvgpu_fence_type_priv *pf = &f->priv;
nvgpu_ref_put(&pf->ref, nvgpu_fence_free);
nvgpu_ref_put(&pf->ref, nvgpu_fence_release);
}
struct nvgpu_fence_type *nvgpu_fence_get(struct nvgpu_fence_type *f)

View File

@@ -29,7 +29,7 @@ struct nvgpu_fence_type;
struct nvgpu_fence_ops {
int (*wait)(struct nvgpu_fence_type *f, u32 timeout);
bool (*is_expired)(struct nvgpu_fence_type *f);
void (*free)(struct nvgpu_fence_type *f);
void (*release)(struct nvgpu_fence_type *f);
};
void nvgpu_fence_init(struct nvgpu_fence_type *f,

View File

@@ -47,7 +47,7 @@ static bool nvgpu_fence_semaphore_is_expired(struct nvgpu_fence_type *f)
return !nvgpu_semaphore_is_acquired(pf->semaphore);
}
static void nvgpu_fence_semaphore_free(struct nvgpu_fence_type *f)
static void nvgpu_fence_semaphore_release(struct nvgpu_fence_type *f)
{
struct nvgpu_fence_type_priv *pf = &f->priv;
@@ -59,7 +59,7 @@ static void nvgpu_fence_semaphore_free(struct nvgpu_fence_type *f)
static const struct nvgpu_fence_ops nvgpu_fence_semaphore_ops = {
.wait = nvgpu_fence_semaphore_wait,
.is_expired = nvgpu_fence_semaphore_is_expired,
.free = nvgpu_fence_semaphore_free,
.release = nvgpu_fence_semaphore_release,
};
/* This function takes ownership of the semaphore as well as the os_fence */

View File

@@ -30,7 +30,7 @@ static int nvgpu_fence_syncpt_wait(struct nvgpu_fence_type *f, u32 timeout)
struct nvgpu_fence_type_priv *pf = &f->priv;
return nvgpu_nvhost_syncpt_wait_timeout_ext(
pf->nvhost_dev, pf->syncpt_id, pf->syncpt_value,
pf->nvhost_device, pf->syncpt_id, pf->syncpt_value,
timeout, NVGPU_NVHOST_DEFAULT_WAITER);
}
@@ -43,42 +43,47 @@ static bool nvgpu_fence_syncpt_is_expired(struct nvgpu_fence_type *f)
* syncpt value to be updated. For this case, we force a read
* of the value from HW, and then check for expiration.
*/
if (!nvgpu_nvhost_syncpt_is_expired_ext(pf->nvhost_dev, pf->syncpt_id,
pf->syncpt_value)) {
if (!nvgpu_nvhost_syncpt_is_expired_ext(pf->nvhost_device,
pf->syncpt_id, pf->syncpt_value)) {
int err;
u32 val;
if (!nvgpu_nvhost_syncpt_read_ext_check(pf->nvhost_dev,
pf->syncpt_id, &val)) {
err = nvgpu_nvhost_syncpt_read_ext_check(pf->nvhost_device,
pf->syncpt_id, &val);
WARN(err != 0, "syncpt read failed??");
if (err == 0) {
return nvgpu_nvhost_syncpt_is_expired_ext(
pf->nvhost_dev,
pf->nvhost_device,
pf->syncpt_id, pf->syncpt_value);
} else {
return false;
}
}
return true;
}
static void nvgpu_fence_syncpt_free(struct nvgpu_fence_type *f)
static void nvgpu_fence_syncpt_release(struct nvgpu_fence_type *f)
{
}
static const struct nvgpu_fence_ops nvgpu_fence_syncpt_ops = {
.wait = nvgpu_fence_syncpt_wait,
.is_expired = nvgpu_fence_syncpt_is_expired,
.free = nvgpu_fence_syncpt_free,
.release = nvgpu_fence_syncpt_release,
};
/* This function takes the ownership of the os_fence */
void nvgpu_fence_from_syncpt(
struct nvgpu_fence_type *f,
struct nvgpu_nvhost_dev *nvhost_dev,
struct nvgpu_nvhost_dev *nvhost_device,
u32 id, u32 value, struct nvgpu_os_fence os_fence)
{
struct nvgpu_fence_type_priv *pf = &f->priv;
nvgpu_fence_init(f, &nvgpu_fence_syncpt_ops, os_fence);
pf->nvhost_dev = nvhost_dev;
pf->nvhost_device = nvhost_device;
pf->syncpt_id = id;
pf->syncpt_value = value;
}

View File

@@ -51,7 +51,7 @@ struct nvgpu_fence_type_priv {
#ifdef CONFIG_TEGRA_GK20A_NVHOST
/* Valid for fences created from syncpoints: */
struct nvgpu_nvhost_dev *nvhost_dev;
struct nvgpu_nvhost_dev *nvhost_device;
u32 syncpt_id;
u32 syncpt_value;
#endif

View File

@@ -32,7 +32,7 @@ struct nvgpu_nvhost_dev;
void nvgpu_fence_from_syncpt(
struct nvgpu_fence_type *f,
struct nvgpu_nvhost_dev *nvhost_dev,
struct nvgpu_nvhost_dev *nvhost_device,
u32 id, u32 value,
struct nvgpu_os_fence os_fence);

View File

@@ -38,7 +38,7 @@ struct nvgpu_os_fence_syncpt {
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && !defined(CONFIG_NVGPU_SYNCFD_NONE)
int nvgpu_os_fence_syncpt_create(struct nvgpu_os_fence *fence_out,
struct nvgpu_channel *c, struct nvgpu_nvhost_dev *nvhost_dev,
struct nvgpu_channel *c, struct nvgpu_nvhost_dev *nvhost_device,
u32 id, u32 thresh);
/*
@@ -71,7 +71,7 @@ u32 nvgpu_os_fence_syncpt_get_num_syncpoints(
static inline int nvgpu_os_fence_syncpt_create(
struct nvgpu_os_fence *fence_out, struct nvgpu_channel *c,
struct nvgpu_nvhost_dev *nvhost_dev,
struct nvgpu_nvhost_dev *nvhost_device,
u32 id, u32 thresh)
{
return -ENOSYS;

View File

@@ -56,7 +56,7 @@ static const struct nvgpu_os_fence_ops syncpt_ops = {
};
int nvgpu_os_fence_syncpt_create(struct nvgpu_os_fence *fence_out,
struct nvgpu_channel *c, struct nvgpu_nvhost_dev *nvhost_dev,
struct nvgpu_channel *c, struct nvgpu_nvhost_dev *nvhost_device,
u32 id, u32 thresh)
{
struct nvhost_ctrl_sync_fence_info pt = {
@@ -64,7 +64,7 @@ int nvgpu_os_fence_syncpt_create(struct nvgpu_os_fence *fence_out,
.thresh = thresh,
};
struct nvhost_fence *fence = nvhost_fence_create(
nvhost_dev->host1x_pdev, &pt, 1, "fence");
nvhost_device->host1x_pdev, &pt, 1, "fence");
if (IS_ERR(fence)) {
nvgpu_err(c->g, "error %d during construction of fence.",