Files
linux-nv-oot/drivers/gpu/host1x/hw/channel_hw.c
Mikko Perttunen 276418c013 gpu: host1x: Memory context stealing
Currently, each process holding an open TegraDRM channel reserves
for itself one of the limited number of hardware memory contexts.
Attempting to allocate a channel when all contexts are in use
results in failure.

While we cannot have more contexts than the hardware supports in
active use, idle channels don't necessarily need to have a backing
memory context. As such, in this patch, we add another layer
to allow hardware memory contexts to be "stolen away" by channels
that are in active use, from idle processes.

The way this is implemented, is by keeping track of memory
mappings on each abstracted memory context. If we need to
steal that memory context's backing hardware context, we unmap
everything from it and give it away. When that abstracted
memory context is needed again (re-activated), we attempt
to allocate or steal another hardware context and re-map
the previously unmapped buffers.

Unfortunately, this means additional overhead and unpredictability
at submit time. Submit can fail if we cannot re-allocate a
backing memory context. Future work includes a provision for
un-stealable backing hardware memory contexts for processes
requiring more determinism, as well as optimization and cosmetic
improvements.

Bug 4403250
Bug 4399310

Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Change-Id: I3d13e3476f1bff3c4757152254496cddaaafd76a
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3058905
Reviewed-by: Santosh BS <santoshb@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3227749
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Tested-by: Dane Liu <danel@nvidia.com>
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
2024-11-13 20:55:08 -08:00

400 lines
10 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Channel
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <linux/host1x-next.h>
#include <linux/iommu.h>
#include <linux/slab.h>
#include <trace/events/host1x.h>
#include "../channel.h"
#include "../context.h"
#include "../dev.h"
#include "../intr.h"
#include "../job.h"
#define TRACE_MAX_LENGTH 128U
static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
u32 offset, u32 words)
{
struct device *dev = cdma_to_channel(cdma)->dev;
void *mem = NULL;
if (host1x_debug_trace_cmdbuf)
mem = host1x_bo_mmap(bo);
if (mem) {
u32 i;
/*
* Write in batches of 128 as there seems to be a limit
* of how much you can output to ftrace at once.
*/
for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
u32 num_words = min(words - i, TRACE_MAX_LENGTH);
offset += i * sizeof(u32);
trace_host1x_cdma_push_gather(dev_name(dev), bo,
num_words, offset,
mem);
}
host1x_bo_munmap(bo, mem);
}
}
static void submit_wait(struct host1x_job *job, u32 id, u32 threshold)
{
struct host1x_cdma *cdma = &job->channel->cdma;
#if HOST1X_HW >= 2
host1x_cdma_push_wide(cdma,
host1x_opcode_setclass(
HOST1X_CLASS_HOST1X,
HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32,
/* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */
BIT(0) | BIT(2)
),
threshold,
id,
HOST1X_OPCODE_NOP
);
#else
/* TODO add waitchk or use waitbases or other mitigation */
host1x_cdma_push(cdma,
host1x_opcode_setclass(
HOST1X_CLASS_HOST1X,
host1x_uclass_wait_syncpt_r(),
BIT(0)
),
host1x_class_host_wait_syncpt(id, threshold)
);
#endif
}
static void submit_setclass(struct host1x_job *job, u32 next_class)
{
struct host1x_cdma *cdma = &job->channel->cdma;
#if HOST1X_HW >= 6
u32 stream_id;
/*
* If a memory context has been set, use it. Otherwise
* (if context isolation is disabled) use the engine's
* firmware stream ID.
*/
if (job->memory_context)
stream_id = job->memory_context->hw->stream_id;
else
stream_id = job->engine_fallback_streamid;
host1x_cdma_push_wide(cdma,
host1x_opcode_setclass(next_class, 0, 0),
host1x_opcode_setpayload(stream_id),
host1x_opcode_setstreamid(job->engine_streamid_offset / 4),
HOST1X_OPCODE_NOP);
#else
host1x_cdma_push(cdma,
host1x_opcode_setclass(next_class, 0, 0),
HOST1X_OPCODE_NOP
);
#endif
}
static void submit_gathers(struct host1x_job *job, struct host1x_job_cmd *cmds, u32 num_cmds,
u32 job_syncpt_base)
{
struct host1x_cdma *cdma = &job->channel->cdma;
#if HOST1X_HW < 6
struct device *dev = job->channel->dev;
#endif
unsigned int i;
u32 threshold;
for (i = 0; i < num_cmds; i++) {
struct host1x_job_cmd *cmd = &cmds[i];
if (cmd->type == HOST1X_JOB_CMD_WAIT) {
if (cmd->wait.relative)
threshold = job_syncpt_base + cmd->wait.threshold;
else
threshold = cmd->wait.threshold;
submit_wait(job, cmd->wait.id, threshold);
submit_setclass(job, cmd->wait.next_class);
} else if (cmd->type == HOST1X_JOB_CMD_GATHER) {
struct host1x_job_gather *g = &cmd->gather;
dma_addr_t addr = g->base + g->offset;
u32 op2, op3;
op2 = lower_32_bits(addr);
op3 = upper_32_bits(addr);
trace_write_gather(cdma, g->bo, g->offset, g->words);
if (op3 != 0) {
#if HOST1X_HW >= 6
u32 op1 = host1x_opcode_gather_wide(g->words);
u32 op4 = HOST1X_OPCODE_NOP;
host1x_cdma_push_wide(cdma, op1, op2, op3, op4);
#else
dev_err(dev, "invalid gather for push buffer %pad\n",
&addr);
continue;
#endif
} else {
u32 op1 = host1x_opcode_gather(g->words);
host1x_cdma_push(cdma, op1, op2);
}
} else if (cmd->type == HOST1X_JOB_CMD_REG_WRITE) {
struct host1x_job_reg_write *d = &cmd->reg_write;
host1x_cdma_push(cdma, host1x_opcode_incr(d->reg >> 2, 1), d->value);
}
}
}
static inline void synchronize_syncpt_base(struct host1x_job *job)
{
struct host1x_syncpt *sp = job->syncpt;
unsigned int id;
u32 value;
value = host1x_syncpt_read_max(sp);
id = sp->base->id;
host1x_cdma_push(&job->channel->cdma,
host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1),
HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) |
HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
}
static void host1x_channel_set_streamid(struct host1x_channel *channel)
{
#if HOST1X_HW >= 6
u32 sid = 0x7f;
#ifdef CONFIG_IOMMU_API
struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
if (spec)
sid = spec->ids[0] & 0xffff;
#endif
host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
#endif
}
static void host1x_enable_gather_filter(struct host1x_channel *ch)
{
#if HOST1X_HW >= 6
struct host1x *host = dev_get_drvdata(ch->dev->parent);
u32 val;
if (!host->hv_regs)
return;
val = host1x_hypervisor_readl(
host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
val |= BIT(ch->id % 32);
host1x_hypervisor_writel(
host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
#elif HOST1X_HW >= 4
host1x_ch_writel(ch,
HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1),
HOST1X_CHANNEL_CHANNELCTRL);
#endif
}
static void channel_program_cdma(struct host1x_job *job)
{
struct host1x_cdma *cdma = &job->channel->cdma;
struct host1x_syncpt *sp = job->syncpt;
#if HOST1X_HW >= 6
u32 fence;
int i = 0;
if (job->num_cmds == 0)
goto prefences_done;
if (job->cmds[0].type != HOST1X_JOB_CMD_WAIT || job->cmds[0].wait.relative)
goto prefences_done;
/* Enter host1x class with invalid stream ID for prefence waits. */
host1x_cdma_push_wide(cdma,
host1x_opcode_acquire_mlock(1),
host1x_opcode_setclass(1, 0, 0),
host1x_opcode_setpayload(0),
host1x_opcode_setstreamid(0x1fffff));
for (i = 0; i < job->num_cmds; i++) {
struct host1x_job_cmd *cmd = &job->cmds[i];
if (cmd->type != HOST1X_JOB_CMD_WAIT || cmd->wait.relative)
break;
submit_wait(job, cmd->wait.id, cmd->wait.threshold);
}
host1x_cdma_push(cdma,
HOST1X_OPCODE_NOP,
host1x_opcode_release_mlock(1));
prefences_done:
/* Enter engine class with invalid stream ID. */
host1x_cdma_push_wide(cdma,
host1x_opcode_acquire_mlock(job->class),
host1x_opcode_setclass(job->class, 0, 0),
host1x_opcode_setpayload(0),
host1x_opcode_setstreamid(job->engine_streamid_offset / 4));
/* Before switching stream ID to real stream ID, ensure engine is idle. */
fence = host1x_syncpt_incr_max(sp, 1);
host1x_cdma_push(&job->channel->cdma,
host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1),
HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) |
HOST1X_UCLASS_INCR_SYNCPT_COND_F(4));
submit_wait(job, job->syncpt->id, fence);
submit_setclass(job, job->class);
/* Submit work. */
job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs);
submit_gathers(job, job->cmds + i, job->num_cmds - i, job->syncpt_end - job->syncpt_incrs);
/* Before releasing MLOCK, ensure engine is idle again. */
fence = host1x_syncpt_incr_max(sp, 1);
host1x_cdma_push(&job->channel->cdma,
host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1),
HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) |
HOST1X_UCLASS_INCR_SYNCPT_COND_F(4));
submit_wait(job, job->syncpt->id, fence);
/* Release MLOCK. */
host1x_cdma_push(cdma,
HOST1X_OPCODE_NOP, host1x_opcode_release_mlock(job->class));
#else
if (job->serialize) {
/*
* Force serialization by inserting a host wait for the
* previous job to finish before this one can commence.
*/
host1x_cdma_push(cdma,
host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
host1x_uclass_wait_syncpt_r(), 1),
host1x_class_host_wait_syncpt(job->syncpt->id,
host1x_syncpt_read_max(sp)));
}
/* Synchronize base register to allow using it for relative waiting */
if (sp->base)
synchronize_syncpt_base(job);
/* add a setclass for modules that require it */
if (job->class)
host1x_cdma_push(cdma,
host1x_opcode_setclass(job->class, 0, 0),
HOST1X_OPCODE_NOP);
job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs);
submit_gathers(job, job->cmds, job->num_cmds, job->syncpt_end - job->syncpt_incrs);
#endif
}
static void job_complete_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct host1x_job *job = container_of(cb, struct host1x_job, fence_cb);
/* Schedules CDMA update. */
host1x_cdma_update(&job->channel->cdma);
complete(&job->fence_cb_done);
}
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
struct host1x_syncpt *sp = job->syncpt;
u32 prev_max = 0;
u32 syncval;
int err;
struct host1x *host = dev_get_drvdata(ch->dev->parent);
trace_host1x_channel_submit(dev_name(ch->dev),
job->num_cmds, job->num_relocs,
job->syncpt->id, job->syncpt_incrs);
/* before error checks, return current max */
prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
/* get submit lock */
err = mutex_lock_interruptible(&ch->submitlock);
if (err)
return err;
host1x_channel_set_streamid(ch);
host1x_enable_gather_filter(ch);
host1x_hw_syncpt_assign_to_channel(host, sp, ch);
if (job->secondary_syncpt)
host1x_hw_syncpt_assign_to_channel(host, job->secondary_syncpt, ch);
/* begin a CDMA submit */
err = host1x_cdma_begin(&ch->cdma, job);
if (err) {
mutex_unlock(&ch->submitlock);
return err;
}
channel_program_cdma(job);
syncval = host1x_syncpt_read_max(sp);
/*
* Create fence before submitting job to HW to avoid job completing
* before the fence is set up.
*/
job->fence = host1x_fence_create(sp, syncval, true);
if (WARN(IS_ERR(job->fence), "Failed to create submit complete fence")) {
job->fence = NULL;
} else {
err = dma_fence_add_callback(job->fence, &job->fence_cb,
job_complete_callback);
}
/* end CDMA submit & stash pinned hMems into sync queue */
host1x_cdma_end(&ch->cdma, job);
trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
mutex_unlock(&ch->submitlock);
if (err == -ENOENT)
host1x_cdma_update(&ch->cdma);
else
WARN(err, "Failed to set submit complete interrupt");
return 0;
}
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
unsigned int index)
{
#if HOST1X_HW < 6
ch->regs = dev->regs + index * 0x4000;
#else
ch->regs = dev->regs + index * 0x100;
#endif
return 0;
}
static const struct host1x_channel_ops host1x_channel_ops = {
.init = host1x_channel_init,
.submit = channel_submit,
};