From a432d2adf34b851741ca8ff3a317ee69f8b07c2f Mon Sep 17 00:00:00 2001 From: Mikko Perttunen Date: Mon, 3 Oct 2022 14:08:28 +0300 Subject: [PATCH] gpu: nvgpu: linux/host1x: Execute fence callback in non-atomic context Due to changes in the host1x driver, dma_fence callbacks will be executed in interrupt context instead of workqueue context as previously. To allow for that, this patch effectively moves the workqueue step into nvgpu so that the in-nvgpu fence callback gets executed in workqueue context. Bug 3730564 Signed-off-by: Mikko Perttunen Change-Id: I7bfa294aa3b4bea9888921b79175a8fc218d8e3f Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2785968 (cherry picked from commit 5c8e511e48ca88b10fbd12c99799fdd03e825e99) Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2823241 Tested-by: Jonathan Hunter Reviewed-by: Jonathan Hunter GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/os/linux/nvhost_host1x.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/nvgpu/os/linux/nvhost_host1x.c b/drivers/gpu/nvgpu/os/linux/nvhost_host1x.c index 29881ecad..6e718d33a 100644 --- a/drivers/gpu/nvgpu/os/linux/nvhost_host1x.c +++ b/drivers/gpu/nvgpu/os/linux/nvhost_host1x.c @@ -124,18 +124,25 @@ bool nvgpu_nvhost_syncpt_is_expired_ext(struct nvgpu_nvhost_dev *nvhost_dev, struct nvgpu_host1x_cb { struct dma_fence_cb cb; + struct work_struct work; void (*notifier)(void *, int); void *notifier_data; }; +static void nvgpu_host1x_work_func(struct work_struct *work) +{ + struct nvgpu_host1x_cb *host1x_cb = container_of(work, struct nvgpu_host1x_cb, work); + + host1x_cb->notifier(host1x_cb->notifier_data, 0); + kfree_rcu(host1x_cb); +} + static void nvgpu_host1x_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { - struct nvgpu_host1x_cb *host1x_cb; + struct nvgpu_host1x_cb *host1x_cb = container_of(cb, struct nvgpu_host1x_cb, cb); - host1x_cb = container_of(cb, struct nvgpu_host1x_cb, cb); - host1x_cb->notifier(host1x_cb->notifier_data, 0); + schedule_work(&host1x_cb->work); dma_fence_put(f); - kfree(host1x_cb); } int nvgpu_nvhost_intr_register_notifier(struct nvgpu_nvhost_dev *nvhost_dev, @@ -157,7 +164,7 @@ int nvgpu_nvhost_intr_register_notifier(struct nvgpu_nvhost_dev *nvhost_dev, if (!sp) return -EINVAL; - fence = host1x_fence_create(sp, thresh); + fence = host1x_fence_create(sp, thresh, true); if (IS_ERR(fence)) { pr_err("error %d during construction of fence!", (int)PTR_ERR(fence)); @@ -171,6 +178,8 @@ int nvgpu_nvhost_intr_register_notifier(struct nvgpu_nvhost_dev *nvhost_dev, cb->notifier = notifier; cb->notifier_data = notifier_data; + INIT_WORK(&cb->work, nvgpu_host1x_work_func); + err = dma_fence_add_callback(fence, &cb->cb, nvgpu_host1x_cb_func); if (err < 0) { dma_fence_put(fence); @@ -378,7 +387,7 @@ struct nvhost_fence *nvgpu_nvhost_fence_create(struct platform_device *pdev, if (WARN_ON(!sp)) return ERR_PTR(-EINVAL); - return (struct nvhost_fence *)host1x_fence_create(sp, pts->thresh); + return (struct nvhost_fence *)host1x_fence_create(sp, pts->thresh, true); } struct nvhost_fence *nvgpu_nvhost_fence_get(int fd)