mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
gpu: host1x: Add option for disabling context stealing
Add kernel module parameter to allow disabling context stealing in cases where reliability and consistency is preferred. Change-Id: Iffa3f098814bcdd3ae760a05cb013c3c0c804ad5 Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3284406 GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Reviewed-by: Santosh BS <santoshb@nvidia.com>
This commit is contained in:
committed by
Jon Hunter
parent
48e383a1b9
commit
5362309e7f
@@ -1,12 +1,13 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-only
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2021-2024, NVIDIA Corporation.
|
* Copyright (c) 2021-2025, NVIDIA Corporation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/kref.h>
|
#include <linux/kref.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_device.h>
|
#include <linux/of_device.h>
|
||||||
#include <linux/pid.h>
|
#include <linux/pid.h>
|
||||||
@@ -15,6 +16,10 @@
|
|||||||
#include "context.h"
|
#include "context.h"
|
||||||
#include "dev.h"
|
#include "dev.h"
|
||||||
|
|
||||||
|
static bool static_context_alloc;
|
||||||
|
module_param(static_context_alloc, bool, 0644);
|
||||||
|
MODULE_PARM_DESC(static_context_alloc, "If enabled, memory contexts are allocated immediately on channel open and cannot be relinquished while a channel is open");
|
||||||
|
|
||||||
static void host1x_memory_context_release(struct device *dev)
|
static void host1x_memory_context_release(struct device *dev)
|
||||||
{
|
{
|
||||||
/* context device is freed in host1x_memory_context_list_free() */
|
/* context device is freed in host1x_memory_context_list_free() */
|
||||||
@@ -160,7 +165,7 @@ static struct host1x_hw_memory_context *host1x_memory_context_alloc_hw_locked(st
|
|||||||
|
|
||||||
/* Steal */
|
/* Steal */
|
||||||
|
|
||||||
if (!can_steal)
|
if (!can_steal || static_context_alloc)
|
||||||
return ERR_PTR(-EBUSY);
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
||||||
list_for_each_entry(ctx, &can_steal->owners, entry) {
|
list_for_each_entry(ctx, &can_steal->owners, entry) {
|
||||||
@@ -200,6 +205,7 @@ struct host1x_memory_context *host1x_memory_context_alloc(
|
|||||||
{
|
{
|
||||||
struct host1x_memory_context_list *cdl = &host1x->context_list;
|
struct host1x_memory_context_list *cdl = &host1x->context_list;
|
||||||
struct host1x_memory_context *ctx;
|
struct host1x_memory_context *ctx;
|
||||||
|
int err;
|
||||||
|
|
||||||
if (!cdl->len)
|
if (!cdl->len)
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
@@ -215,6 +221,16 @@ struct host1x_memory_context *host1x_memory_context_alloc(
|
|||||||
refcount_set(&ctx->ref, 1);
|
refcount_set(&ctx->ref, 1);
|
||||||
INIT_LIST_HEAD(&ctx->mappings);
|
INIT_LIST_HEAD(&ctx->mappings);
|
||||||
|
|
||||||
|
if (static_context_alloc) {
|
||||||
|
err = host1x_memory_context_active(ctx);
|
||||||
|
if (err) {
|
||||||
|
kfree(ctx);
|
||||||
|
return ERR_PTR(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx->static_alloc = true;
|
||||||
|
}
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
|
EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
|
||||||
@@ -241,6 +257,11 @@ retry:
|
|||||||
hw = host1x_memory_context_alloc_hw_locked(ctx->host, ctx->dev, ctx->pid);
|
hw = host1x_memory_context_alloc_hw_locked(ctx->host, ctx->dev, ctx->pid);
|
||||||
if (PTR_ERR(hw) == -EBUSY) {
|
if (PTR_ERR(hw) == -EBUSY) {
|
||||||
/* All contexts busy. Wait for free context. */
|
/* All contexts busy. Wait for free context. */
|
||||||
|
if (static_context_alloc) {
|
||||||
|
dev_warn(ctx->dev, "%s: all memory contexts are busy\n", current->comm);
|
||||||
|
err = -EBUSY;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
if (!retrying)
|
if (!retrying)
|
||||||
dev_warn(ctx->dev, "%s: all memory contexts are busy, waiting\n",
|
dev_warn(ctx->dev, "%s: all memory contexts are busy, waiting\n",
|
||||||
current->comm);
|
current->comm);
|
||||||
@@ -355,13 +376,11 @@ void host1x_memory_context_unmap(struct host1x_context_mapping *m)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(host1x_memory_context_unmap);
|
EXPORT_SYMBOL_GPL(host1x_memory_context_unmap);
|
||||||
|
|
||||||
void host1x_memory_context_inactive(struct host1x_memory_context *ctx)
|
static void host1x_memory_context_inactive_locked(struct host1x_memory_context *ctx)
|
||||||
{
|
{
|
||||||
struct host1x_memory_context_list *cdl = &ctx->host->context_list;
|
struct host1x_memory_context_list *cdl = &ctx->host->context_list;
|
||||||
struct hw_alloc_waiter *waiter;
|
struct hw_alloc_waiter *waiter;
|
||||||
|
|
||||||
mutex_lock(&cdl->lock);
|
|
||||||
|
|
||||||
if (--ctx->hw->active == 0) {
|
if (--ctx->hw->active == 0) {
|
||||||
/* Hardware context becomes eligible for stealing */
|
/* Hardware context becomes eligible for stealing */
|
||||||
list_for_each_entry(waiter, &cdl->waiters, entry) {
|
list_for_each_entry(waiter, &cdl->waiters, entry) {
|
||||||
@@ -376,6 +395,15 @@ void host1x_memory_context_inactive(struct host1x_memory_context *ctx)
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void host1x_memory_context_inactive(struct host1x_memory_context *ctx)
|
||||||
|
{
|
||||||
|
struct host1x_memory_context_list *cdl = &ctx->host->context_list;
|
||||||
|
|
||||||
|
mutex_lock(&cdl->lock);
|
||||||
|
|
||||||
|
host1x_memory_context_inactive_locked(ctx);
|
||||||
|
|
||||||
mutex_unlock(&cdl->lock);
|
mutex_unlock(&cdl->lock);
|
||||||
}
|
}
|
||||||
@@ -392,6 +420,9 @@ void host1x_memory_context_put(struct host1x_memory_context *ctx)
|
|||||||
struct host1x_memory_context_list *cdl = &ctx->host->context_list;
|
struct host1x_memory_context_list *cdl = &ctx->host->context_list;
|
||||||
|
|
||||||
if (refcount_dec_and_mutex_lock(&ctx->ref, &cdl->lock)) {
|
if (refcount_dec_and_mutex_lock(&ctx->ref, &cdl->lock)) {
|
||||||
|
if (ctx->static_alloc)
|
||||||
|
host1x_memory_context_inactive_locked(ctx);
|
||||||
|
|
||||||
if (ctx->hw) {
|
if (ctx->hw) {
|
||||||
list_del(&ctx->entry);
|
list_del(&ctx->entry);
|
||||||
|
|
||||||
|
|||||||
@@ -516,6 +516,7 @@ struct host1x_memory_context {
|
|||||||
struct pid *pid;
|
struct pid *pid;
|
||||||
|
|
||||||
refcount_t ref;
|
refcount_t ref;
|
||||||
|
bool static_alloc;
|
||||||
|
|
||||||
struct host1x_hw_memory_context *hw;
|
struct host1x_hw_memory_context *hw;
|
||||||
struct device *context_dev; /* Context device */
|
struct device *context_dev; /* Context device */
|
||||||
|
|||||||
Reference in New Issue
Block a user