mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
nvdla: Replace deprecated nvhost_syncpt_* APIs
This commit replaces the deprecated nvhost_syncpt_* functions with the equivalent host1x_syncpt_* APIs throughout the NVDLA driver. The following replacements were made: - nvhost_get_syncpt_host_managed() → host1x_syncpt_alloc() + host1x_syncpt_id() - nvhost_syncpt_put_ref_ext() → host1x_syncpt_put() - nvhost_syncpt_incr_max_ext() → host1x_syncpt_incr_max() - nvhost_syncpt_read_maxval() → host1x_syncpt_read_max() - nvhost_syncpt_is_expired_ext() → host1x_syncpt_wait() - nvhost_syncpt_set_min_update() → host1x_syncpt_read() + host1x_syncpt_incr() Each replacement includes: - Obtaining syncpoint handles when necessary with host1x_syncpt_get_by_id_noref() - Proper error checking for null handles and return values - Maintaining the emulation wrapper in nvdla_sync_syncpt_emu.c Bug 4922416 Jira HOSTX-5963 Change-Id: I8ce4299f89a8938c07895656689a59cc20a097ec Signed-off-by: Mainak Sen <msen@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3321364 Reviewed-by: Arvind M <am@nvidia.com> GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Reviewed-by: Mitch Harwell <mharwell@nvidia.com> Reviewed-by: Raghavendra Vishnu Kumar <rvk@nvidia.com>
This commit is contained in:
@@ -12,17 +12,89 @@
|
|||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
|
#include <linux/dma-fence.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
|
|
||||||
#include "port/nvdla_host_wrapper.h"
|
#include "port/nvdla_host_wrapper.h"
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_TEGRA_NVDLA_CHANNEL)
|
|
||||||
#include "nvhost_job.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "dla_channel.h"
|
#include "dla_channel.h"
|
||||||
#include "dla_queue.h"
|
#include "dla_queue.h"
|
||||||
#include "nvdla_debug.h"
|
#include "nvdla_debug.h"
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_TEGRA_NVDLA_CHANNEL)
|
||||||
|
#include "nvhost_job.h"
|
||||||
|
|
||||||
|
/* Add host1x host1x_cb structures and functions needed for handling callbacks */
|
||||||
|
struct nvdla_host1x_cb {
|
||||||
|
struct dma_fence_cb cb;
|
||||||
|
struct work_struct work;
|
||||||
|
void (*notifier)(void *data);
|
||||||
|
void *notifier_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void nvdla_host1x_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||||
|
{
|
||||||
|
struct nvdla_host1x_cb *host1x_cb;
|
||||||
|
|
||||||
|
host1x_cb = container_of(cb, struct nvdla_host1x_cb, cb);
|
||||||
|
schedule_work(&host1x_cb->work);
|
||||||
|
dma_fence_put(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nvdla_intr_do_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct nvdla_host1x_cb *host1x_cb;
|
||||||
|
|
||||||
|
host1x_cb = container_of(work, struct nvdla_host1x_cb, work);
|
||||||
|
host1x_cb->notifier(host1x_cb->notifier_data);
|
||||||
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0))
|
||||||
|
kfree_rcu_mightsleep(host1x_cb);
|
||||||
|
#else
|
||||||
|
kfree_rcu(host1x_cb);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nvdla_intr_register_notifier(struct platform_device *pdev,
|
||||||
|
u32 id, u32 thresh,
|
||||||
|
void (*callback)(void *data),
|
||||||
|
void *private_data)
|
||||||
|
{
|
||||||
|
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||||
|
struct dma_fence *fence;
|
||||||
|
struct nvdla_host1x_cb *cb;
|
||||||
|
struct host1x_syncpt *sp;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
|
||||||
|
if (!sp)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
fence = host1x_fence_create(sp, thresh, true);
|
||||||
|
if (IS_ERR(fence)) {
|
||||||
|
pr_err("error %d during construction of fence!",
|
||||||
|
(int)PTR_ERR(fence));
|
||||||
|
return PTR_ERR(fence);
|
||||||
|
}
|
||||||
|
|
||||||
|
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
|
||||||
|
if (!cb) {
|
||||||
|
dma_fence_put(fence);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
INIT_WORK(&cb->work, nvdla_intr_do_work);
|
||||||
|
cb->notifier = callback;
|
||||||
|
cb->notifier_data = private_data;
|
||||||
|
|
||||||
|
err = dma_fence_add_callback(fence, &cb->cb, nvdla_host1x_cb_func);
|
||||||
|
if (err < 0) {
|
||||||
|
dma_fence_put(fence);
|
||||||
|
kfree(cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#define CMDBUF_SIZE 4096
|
#define CMDBUF_SIZE 4096
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -553,7 +625,7 @@ int nvdla_queue_submit_to_host1x(struct nvdla_queue *queue,
|
|||||||
*task_syncpt_threshold = job->sp->fence;
|
*task_syncpt_threshold = job->sp->fence;
|
||||||
|
|
||||||
/* Register a callback function for releasing resources */
|
/* Register a callback function for releasing resources */
|
||||||
err = nvhost_intr_register_notifier(host1x_pdev,
|
err = nvdla_intr_register_notifier(host1x_pdev,
|
||||||
queue->syncpt_id,
|
queue->syncpt_id,
|
||||||
job->sp->fence,
|
job->sp->fence,
|
||||||
queue_task_update, task);
|
queue_task_update, task);
|
||||||
|
|||||||
@@ -31,6 +31,14 @@ struct nvdla_sync_context {
|
|||||||
dma_addr_t address;
|
dma_addr_t address;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static dma_addr_t nvdla_syncpt_address(struct platform_device *pdev, u32 syncpt_id)
|
||||||
|
{
|
||||||
|
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||||
|
struct nvhost_syncpt_interface *syncpt_if = pdata->syncpt_unit_interface;
|
||||||
|
|
||||||
|
return syncpt_if->base + syncpt_if->page_size * syncpt_id;
|
||||||
|
}
|
||||||
|
|
||||||
struct nvdla_sync_device *nvdla_sync_device_create_syncpoint(
|
struct nvdla_sync_device *nvdla_sync_device_create_syncpoint(
|
||||||
struct platform_device *pdev)
|
struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
@@ -52,7 +60,6 @@ struct nvdla_sync_device *nvdla_sync_device_create_syncpoint(
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Replace nvhost_syncpt_unit_interface_init with host1x_syncpt_get_shim_info */
|
|
||||||
pdata = platform_get_drvdata(pdev);
|
pdata = platform_get_drvdata(pdev);
|
||||||
syncpt_if = devm_kzalloc(&pdev->dev, sizeof(*syncpt_if), GFP_KERNEL);
|
syncpt_if = devm_kzalloc(&pdev->dev, sizeof(*syncpt_if), GFP_KERNEL);
|
||||||
if (!syncpt_if) {
|
if (!syncpt_if) {
|
||||||
@@ -142,7 +149,7 @@ dma_addr_t nvdla_sync_get_address_by_syncptid(
|
|||||||
dma_addr_t address = 0ULL;
|
dma_addr_t address = 0ULL;
|
||||||
|
|
||||||
if (device != NULL)
|
if (device != NULL)
|
||||||
address = nvhost_syncpt_address(device->pdev, syncptid);
|
address = nvdla_syncpt_address(device->pdev, syncptid);
|
||||||
|
|
||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
@@ -150,6 +157,8 @@ dma_addr_t nvdla_sync_get_address_by_syncptid(
|
|||||||
struct nvdla_sync_context *nvdla_sync_create(struct nvdla_sync_device *device)
|
struct nvdla_sync_context *nvdla_sync_create(struct nvdla_sync_device *device)
|
||||||
{
|
{
|
||||||
struct nvdla_sync_context *context = NULL;
|
struct nvdla_sync_context *context = NULL;
|
||||||
|
struct host1x_syncpt *sp = NULL;
|
||||||
|
struct nvhost_device_data *pdata = NULL;
|
||||||
|
|
||||||
if ((device == NULL) || (device->pdev == NULL))
|
if ((device == NULL) || (device->pdev == NULL))
|
||||||
goto fail;
|
goto fail;
|
||||||
@@ -162,15 +171,23 @@ struct nvdla_sync_context *nvdla_sync_create(struct nvdla_sync_device *device)
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
context->syncptid =
|
pdata = platform_get_drvdata(device->pdev);
|
||||||
nvhost_get_syncpt_host_managed(device->pdev, 0U, NULL);
|
sp = host1x_syncpt_alloc(pdata->host1x,
|
||||||
if (context->syncptid == 0) {
|
0U, /* Not client managed */
|
||||||
nvdla_dbg_err(device->pdev, "Failed to get syncpoint ID\n");
|
dev_name(&device->pdev->dev));
|
||||||
|
if (!sp) {
|
||||||
|
nvdla_dbg_err(device->pdev, "Failed to allocate syncpoint\n");
|
||||||
goto free_context;
|
goto free_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
context->address =
|
context->syncptid = host1x_syncpt_id(sp);
|
||||||
nvhost_syncpt_address(device->pdev, context->syncptid);
|
if (context->syncptid == 0) {
|
||||||
|
nvdla_dbg_err(device->pdev, "Failed to get syncpoint ID\n");
|
||||||
|
host1x_syncpt_put(sp); /* Release the allocated syncpoint */
|
||||||
|
goto free_context;
|
||||||
|
}
|
||||||
|
|
||||||
|
context->address = nvdla_syncpt_address(device->pdev, context->syncptid);
|
||||||
context->device = device;
|
context->device = device;
|
||||||
|
|
||||||
return context;
|
return context;
|
||||||
@@ -183,14 +200,21 @@ fail:
|
|||||||
|
|
||||||
void nvdla_sync_destroy(struct nvdla_sync_context *context)
|
void nvdla_sync_destroy(struct nvdla_sync_context *context)
|
||||||
{
|
{
|
||||||
|
struct host1x_syncpt *sp = NULL;
|
||||||
|
struct nvhost_device_data *pdata;
|
||||||
|
|
||||||
if (context == NULL)
|
if (context == NULL)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
if ((context->device == NULL) || (context->device->pdev == NULL))
|
if ((context->device == NULL) || (context->device->pdev == NULL))
|
||||||
goto free_context;
|
goto free_context;
|
||||||
|
|
||||||
/* Release the syncpoint ID */
|
pdata = platform_get_drvdata(context->device->pdev);
|
||||||
nvhost_syncpt_put_ref_ext(context->device->pdev, context->syncptid);
|
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, context->syncptid);
|
||||||
|
if (WARN_ON(!sp))
|
||||||
|
goto free_context;
|
||||||
|
|
||||||
|
host1x_syncpt_put(sp);
|
||||||
|
|
||||||
free_context:
|
free_context:
|
||||||
context->device = NULL;
|
context->device = NULL;
|
||||||
@@ -213,13 +237,18 @@ uint32_t nvdla_sync_increment_max_value(struct nvdla_sync_context *context,
|
|||||||
uint32_t increment)
|
uint32_t increment)
|
||||||
{
|
{
|
||||||
uint32_t maxval = 0U;
|
uint32_t maxval = 0U;
|
||||||
|
struct nvhost_device_data *pdata;
|
||||||
|
struct host1x_syncpt *sp;
|
||||||
|
|
||||||
if ((context == NULL) || (context->device == NULL))
|
if ((context == NULL) || (context->device == NULL))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
maxval = nvhost_syncpt_incr_max_ext(context->device->pdev,
|
pdata = platform_get_drvdata(context->device->pdev);
|
||||||
context->syncptid,
|
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, context->syncptid);
|
||||||
increment);
|
if (WARN_ON(!sp))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
maxval = host1x_syncpt_incr_max(sp, increment);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
return maxval;
|
return maxval;
|
||||||
@@ -228,12 +257,18 @@ fail:
|
|||||||
uint32_t nvdla_sync_get_max_value(struct nvdla_sync_context *context)
|
uint32_t nvdla_sync_get_max_value(struct nvdla_sync_context *context)
|
||||||
{
|
{
|
||||||
int32_t maxval = 0U;
|
int32_t maxval = 0U;
|
||||||
|
struct nvhost_device_data *pdata;
|
||||||
|
struct host1x_syncpt *sp;
|
||||||
|
|
||||||
if ((context == NULL) || (context->device == NULL))
|
if ((context == NULL) || (context->device == NULL))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
maxval = nvhost_syncpt_read_maxval(context->device->pdev,
|
pdata = platform_get_drvdata(context->device->pdev);
|
||||||
context->syncptid);
|
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, context->syncptid);
|
||||||
|
if (WARN_ON(!sp))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
maxval = host1x_syncpt_read_max(sp);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
return maxval;
|
return maxval;
|
||||||
@@ -246,6 +281,8 @@ int32_t nvdla_sync_wait(struct nvdla_sync_context *context,
|
|||||||
int32_t err = 0;
|
int32_t err = 0;
|
||||||
int wait_complete;
|
int wait_complete;
|
||||||
struct nvdla_sync_device *device;
|
struct nvdla_sync_device *device;
|
||||||
|
struct nvhost_device_data *pdata;
|
||||||
|
struct host1x_syncpt *sp;
|
||||||
|
|
||||||
if ((context == NULL) || (context->device == NULL)) {
|
if ((context == NULL) || (context->device == NULL)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
@@ -254,10 +291,15 @@ int32_t nvdla_sync_wait(struct nvdla_sync_context *context,
|
|||||||
|
|
||||||
device = context->device;
|
device = context->device;
|
||||||
if (timeout == 0ULL) {
|
if (timeout == 0ULL) {
|
||||||
wait_complete = nvhost_syncpt_is_expired_ext(device->pdev,
|
pdata = platform_get_drvdata(device->pdev);
|
||||||
context->syncptid,
|
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, context->syncptid);
|
||||||
threshold);
|
if (WARN_ON(!sp)) {
|
||||||
if (wait_complete == 0) {
|
err = -EINVAL;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_complete = (host1x_syncpt_wait(sp, threshold, 0, NULL) == 0);
|
||||||
|
if (!wait_complete) {
|
||||||
nvdla_dbg_err(device->pdev,
|
nvdla_dbg_err(device->pdev,
|
||||||
"Wait on sp[%u] for threshold[%u] timedout\n",
|
"Wait on sp[%u] for threshold[%u] timedout\n",
|
||||||
context->syncptid, threshold);
|
context->syncptid, threshold);
|
||||||
@@ -280,15 +322,28 @@ int32_t nvdla_sync_signal(struct nvdla_sync_context *context,
|
|||||||
uint32_t signal_value)
|
uint32_t signal_value)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
struct nvhost_device_data *pdata;
|
||||||
|
struct host1x_syncpt *sp;
|
||||||
|
uint32_t cur;
|
||||||
|
|
||||||
if ((context == NULL) || (context->device == NULL)) {
|
if ((context == NULL) || (context->device == NULL)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvhost_syncpt_set_min_update(context->device->pdev,
|
pdata = platform_get_drvdata(context->device->pdev);
|
||||||
context->syncptid,
|
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, context->syncptid);
|
||||||
signal_value);
|
if (WARN_ON(!sp)) {
|
||||||
|
err = -EINVAL;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
cur = host1x_syncpt_read(sp);
|
||||||
|
while (cur++ != signal_value)
|
||||||
|
host1x_syncpt_incr(sp);
|
||||||
|
|
||||||
|
/* Read back to ensure the value has been updated */
|
||||||
|
host1x_syncpt_read(sp);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
Reference in New Issue
Block a user