mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-24 10:11:26 +03:00
DCE-KMD: Use OS abstraction for atomic operations
- Replace linux specific implementation with OS absraction added to display/drivers JIRA TDS-16052 Change-Id: I089dd75954cb8cfa533a697dddc2ae9c501c26a0 Signed-off-by: anupamg <anupamg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3171169 Reviewed-by: Mahesh Kumar <mahkumar@nvidia.com> Reviewed-by: Arun Swain <arswain@nvidia.com> GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Reviewed-by: svcacv <svcacv@nvidia.com>
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <dce.h>
|
||||
@@ -114,7 +114,7 @@ static void dce_client_async_event_work(struct work_struct *data)
|
||||
cl = d->d_clients[DCE_CLIENT_IPC_TYPE_RM_EVENT];
|
||||
|
||||
dce_client_process_event_ipc(d, cl);
|
||||
atomic_set(&work->in_use, 0);
|
||||
os_atomic_set(&work->in_use, 0);
|
||||
}
|
||||
|
||||
int tegra_dce_register_ipc_client(u32 type,
|
||||
@@ -171,7 +171,7 @@ int tegra_dce_register_ipc_client(u32 type,
|
||||
cl->handle = handle;
|
||||
cl->int_type = int_type;
|
||||
cl->callback_fn = callback_fn;
|
||||
atomic_set(&cl->complete, 0);
|
||||
os_atomic_set(&cl->complete, 0);
|
||||
|
||||
ret = dce_cond_init(&cl->recv_wait);
|
||||
if (ret) {
|
||||
@@ -249,7 +249,7 @@ int dce_client_init(struct tegra_dce *d)
|
||||
INIT_WORK(&d_work->async_event_work,
|
||||
dce_client_async_event_work);
|
||||
d_work->d = d;
|
||||
atomic_set(&d_work->in_use, 0);
|
||||
os_atomic_set(&d_work->in_use, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -284,11 +284,11 @@ int dce_client_ipc_wait(struct tegra_dce *d, u32 int_type)
|
||||
|
||||
retry_wait:
|
||||
DCE_COND_WAIT_INTERRUPTIBLE(&cl->recv_wait,
|
||||
atomic_read(&cl->complete) == 1);
|
||||
if (atomic_read(&cl->complete) != 1)
|
||||
os_atomic_read(&cl->complete) == 1);
|
||||
if (os_atomic_read(&cl->complete) != 1)
|
||||
goto retry_wait;
|
||||
|
||||
atomic_set(&cl->complete, 0);
|
||||
os_atomic_set(&cl->complete, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -341,7 +341,7 @@ static void dce_client_schedule_event_work(struct tegra_dce *d)
|
||||
for (i = 0; i < DCE_MAX_ASYNC_WORK; i++) {
|
||||
struct dce_async_work *d_work = &async_work_info->work[i];
|
||||
|
||||
if (atomic_add_unless(&d_work->in_use, 1, 1) > 0) {
|
||||
if (os_atomic_add_unless(&d_work->in_use, 1, 1) > 0) {
|
||||
queue_work(async_work_info->async_event_wq,
|
||||
&d_work->async_event_work);
|
||||
break;
|
||||
@@ -374,6 +374,6 @@ void dce_client_ipc_wakeup(struct tegra_dce *d, u32 ch_type)
|
||||
if (type == DCE_CLIENT_IPC_TYPE_RM_EVENT)
|
||||
return dce_client_schedule_event_work(d);
|
||||
|
||||
atomic_set(&cl->complete, 1);
|
||||
os_atomic_set(&cl->complete, 1);
|
||||
dce_cond_signal_interruptible(&cl->recv_wait);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <dce.h>
|
||||
@@ -35,9 +35,9 @@ int dce_wait_interruptible(struct tegra_dce *d, u32 msg_id)
|
||||
* Will be "1" and we immediately exit from the wait.
|
||||
*/
|
||||
DCE_COND_WAIT_INTERRUPTIBLE(&wait->cond_wait,
|
||||
atomic_read(&wait->complete) == 1);
|
||||
os_atomic_read(&wait->complete) == 1);
|
||||
|
||||
if (atomic_read(&wait->complete) != 1)
|
||||
if (os_atomic_read(&wait->complete) != 1)
|
||||
return -EINTR;
|
||||
|
||||
/*
|
||||
@@ -45,7 +45,7 @@ int dce_wait_interruptible(struct tegra_dce *d, u32 msg_id)
|
||||
* So that when the next dce_wait_interruptible is called, it doesn't see old
|
||||
* wait->complete state.
|
||||
*/
|
||||
atomic_set(&wait->complete, 0);
|
||||
os_atomic_set(&wait->complete, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ void dce_wakeup_interruptible(struct tegra_dce *d, u32 msg_id)
|
||||
* "dce_cond_signal_interruptible", it'll see the complete variable
|
||||
* as "1" and exit the wait immediately.
|
||||
*/
|
||||
atomic_set(&wait->complete, 1);
|
||||
os_atomic_set(&wait->complete, 1);
|
||||
dce_cond_signal_interruptible(&wait->cond_wait);
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ void dce_cond_wait_reset(struct tegra_dce *d, u32 msg_id)
|
||||
}
|
||||
|
||||
wait = &d->ipc_waits[msg_id];
|
||||
atomic_set(&wait->complete, 0);
|
||||
os_atomic_set(&wait->complete, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -137,7 +137,7 @@ int dce_work_cond_sw_resource_init(struct tegra_dce *d)
|
||||
goto init_error;
|
||||
}
|
||||
|
||||
atomic_set(&wait->complete, 0);
|
||||
os_atomic_set(&wait->complete, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -168,7 +168,7 @@ void dce_work_cond_sw_resource_deinit(struct tegra_dce *d)
|
||||
struct dce_wait_cond *wait = &d->ipc_waits[i];
|
||||
|
||||
dce_cond_destroy(&wait->cond_wait);
|
||||
atomic_set(&wait->complete, 0);
|
||||
os_atomic_set(&wait->complete, 0);
|
||||
}
|
||||
|
||||
dce_cond_destroy(&d->dce_bootstrap_done);
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef DCE_CLIENT_IPC_INTERNAL_H
|
||||
#define DCE_CLIENT_IPC_INTERNAL_H
|
||||
|
||||
#include <linux/platform/tegra/dce/dce-client-ipc.h>
|
||||
#include <atomic.h>
|
||||
|
||||
/**
|
||||
* struct tegra_dce_client_ipc - Data Structure to hold client specific ipc
|
||||
@@ -33,7 +34,7 @@ struct tegra_dce_client_ipc {
|
||||
uint32_t int_type;
|
||||
struct tegra_dce *d;
|
||||
struct dce_cond recv_wait;
|
||||
atomic_t complete;
|
||||
os_atomic_t complete;
|
||||
tegra_dce_client_ipc_callback_t callback_fn;
|
||||
};
|
||||
|
||||
@@ -41,7 +42,7 @@ struct tegra_dce_client_ipc {
|
||||
struct dce_async_work {
|
||||
struct tegra_dce *d;
|
||||
struct work_struct async_event_work;
|
||||
atomic_t in_use;
|
||||
os_atomic_t in_use;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef DCE_WORKER_H
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <dce-cond.h>
|
||||
#include <dce-lock.h>
|
||||
#include <dce-thread.h>
|
||||
#include <atomic.h>
|
||||
|
||||
struct tegra_dce;
|
||||
|
||||
@@ -20,7 +21,7 @@ struct tegra_dce;
|
||||
#define DCE_MAX_WAIT 5
|
||||
|
||||
struct dce_wait_cond {
|
||||
atomic_t complete;
|
||||
os_atomic_t complete;
|
||||
struct dce_cond cond_wait;
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user