gpu: host1x: Add option for disabling context stealing

Add kernel module parameter to allow disabling context stealing in
cases where reliability and consistency is preferred.

Change-Id: Iffa3f098814bcdd3ae760a05cb013c3c0c804ad5
Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3284406
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Santosh BS <santoshb@nvidia.com>
This commit is contained in:
Mikko Perttunen
2025-01-16 02:32:21 +00:00
committed by Jon Hunter
parent 48e383a1b9
commit 5362309e7f
2 changed files with 37 additions and 5 deletions

View File

@@ -1,12 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2024, NVIDIA Corporation.
* Copyright (c) 2021-2025, NVIDIA Corporation.
*/
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pid.h>
@@ -15,6 +16,10 @@
#include "context.h"
#include "dev.h"
static bool static_context_alloc;
module_param(static_context_alloc, bool, 0644);
MODULE_PARM_DESC(static_context_alloc, "If enabled, memory contexts are allocated immediately on channel open and cannot be relinquished while a channel is open");
static void host1x_memory_context_release(struct device *dev)
{
/* context device is freed in host1x_memory_context_list_free() */
@@ -160,7 +165,7 @@ static struct host1x_hw_memory_context *host1x_memory_context_alloc_hw_locked(st
/* Steal */
if (!can_steal)
if (!can_steal || static_context_alloc)
return ERR_PTR(-EBUSY);
list_for_each_entry(ctx, &can_steal->owners, entry) {
@@ -200,6 +205,7 @@ struct host1x_memory_context *host1x_memory_context_alloc(
{
struct host1x_memory_context_list *cdl = &host1x->context_list;
struct host1x_memory_context *ctx;
int err;
if (!cdl->len)
return ERR_PTR(-EOPNOTSUPP);
@@ -215,6 +221,16 @@ struct host1x_memory_context *host1x_memory_context_alloc(
refcount_set(&ctx->ref, 1);
INIT_LIST_HEAD(&ctx->mappings);
if (static_context_alloc) {
err = host1x_memory_context_active(ctx);
if (err) {
kfree(ctx);
return ERR_PTR(err);
}
ctx->static_alloc = true;
}
return ctx;
}
EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
@@ -241,6 +257,11 @@ retry:
hw = host1x_memory_context_alloc_hw_locked(ctx->host, ctx->dev, ctx->pid);
if (PTR_ERR(hw) == -EBUSY) {
/* All contexts busy. Wait for free context. */
if (static_context_alloc) {
dev_warn(ctx->dev, "%s: all memory contexts are busy\n", current->comm);
err = -EBUSY;
goto unlock;
}
if (!retrying)
dev_warn(ctx->dev, "%s: all memory contexts are busy, waiting\n",
current->comm);
@@ -355,13 +376,11 @@ void host1x_memory_context_unmap(struct host1x_context_mapping *m)
}
EXPORT_SYMBOL_GPL(host1x_memory_context_unmap);
void host1x_memory_context_inactive(struct host1x_memory_context *ctx)
static void host1x_memory_context_inactive_locked(struct host1x_memory_context *ctx)
{
struct host1x_memory_context_list *cdl = &ctx->host->context_list;
struct hw_alloc_waiter *waiter;
mutex_lock(&cdl->lock);
if (--ctx->hw->active == 0) {
/* Hardware context becomes eligible for stealing */
list_for_each_entry(waiter, &cdl->waiters, entry) {
@@ -376,6 +395,15 @@ void host1x_memory_context_inactive(struct host1x_memory_context *ctx)
*/
}
}
}
void host1x_memory_context_inactive(struct host1x_memory_context *ctx)
{
struct host1x_memory_context_list *cdl = &ctx->host->context_list;
mutex_lock(&cdl->lock);
host1x_memory_context_inactive_locked(ctx);
mutex_unlock(&cdl->lock);
}
@@ -392,6 +420,9 @@ void host1x_memory_context_put(struct host1x_memory_context *ctx)
struct host1x_memory_context_list *cdl = &ctx->host->context_list;
if (refcount_dec_and_mutex_lock(&ctx->ref, &cdl->lock)) {
if (ctx->static_alloc)
host1x_memory_context_inactive_locked(ctx);
if (ctx->hw) {
list_del(&ctx->entry);

View File

@@ -516,6 +516,7 @@ struct host1x_memory_context {
struct pid *pid;
refcount_t ref;
bool static_alloc;
struct host1x_hw_memory_context *hw;
struct device *context_dev; /* Context device */