mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
pva: mirror from gitlab cv/pva-sys-sw
Gitlab commit 689815042dacd ("Fix syncpt related crash durin...")
Changes since last deployment:
- Fix syncpt related crash during SC7 resume
- Add SC7 simulation and test
- tests: docs: fix mat_add test specs
- compat: bugfix: prevent fence override
- Add AI code reviewer to CI
Change-Id: I781ac9379787adb9dbdde55cd3fdec5729b08a95
Signed-off-by: nanwa <nanwa@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3358344
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Mohnish Jain <mohnishj@nvidia.com>
This commit is contained in:
@@ -96,7 +96,8 @@ static enum pva_error notify_fw_context_init(struct pva_kmd_context *ctx)
|
||||
ctx->ctx_resource_table.table_mem->iova,
|
||||
ctx->ctx_resource_table.n_entries);
|
||||
|
||||
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(ctx, ctx->ccq_id);
|
||||
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(
|
||||
ctx->pva, PVA_PRIV_CCQ_ID, ctx->ccq_id);
|
||||
pva_kmd_set_cmd_init_queue(
|
||||
queue_cmd, PVA_PRIV_CCQ_ID,
|
||||
ctx->ccq_id, /* For privileged queues, queue ID == user CCQ ID*/
|
||||
|
||||
@@ -319,6 +319,60 @@ static int64_t get_fw_debug_log_level(struct pva_kmd_device *dev,
|
||||
(uint64_t)formatted_len);
|
||||
}
|
||||
|
||||
static int64_t write_simulate_sc7(struct pva_kmd_device *pva, void *file_data,
|
||||
const uint8_t *in_buffer, uint64_t offset,
|
||||
uint64_t size)
|
||||
{
|
||||
uint8_t buf = 0;
|
||||
enum pva_error err;
|
||||
unsigned long ret;
|
||||
|
||||
if ((offset != 0) || (size < 1)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = pva_kmd_copy_data_from_user(&buf, in_buffer, 1);
|
||||
if (ret != 0) {
|
||||
pva_kmd_log_err(
|
||||
"SC7 simulation: failed to copy data from user");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (buf == '1') {
|
||||
if (pva->debugfs_context.entered_sc7 == 0) {
|
||||
err = pva_kmd_simulate_enter_sc7(pva);
|
||||
if (err != PVA_SUCCESS) {
|
||||
return -EFAULT;
|
||||
}
|
||||
pva->debugfs_context.entered_sc7 = 1;
|
||||
}
|
||||
} else if (buf == '0') {
|
||||
if (pva->debugfs_context.entered_sc7 == 1) {
|
||||
err = pva_kmd_simulate_exit_sc7(pva);
|
||||
if (err != PVA_SUCCESS) {
|
||||
return -EFAULT;
|
||||
}
|
||||
pva->debugfs_context.entered_sc7 = 0;
|
||||
}
|
||||
} else {
|
||||
pva_kmd_log_err(
|
||||
"SC7 simulation: invalid input; Must be 0 or 1");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static int64_t read_simulate_sc7(struct pva_kmd_device *pva, void *file_data,
|
||||
uint8_t *out_buffer, uint64_t offset,
|
||||
uint64_t size)
|
||||
{
|
||||
char buf;
|
||||
buf = pva->debugfs_context.entered_sc7 ? '1' : '0';
|
||||
|
||||
return read_from_buffer_to_user(out_buffer, size, offset, &buf, 1);
|
||||
}
|
||||
|
||||
enum pva_error pva_kmd_debugfs_create_nodes(struct pva_kmd_device *pva)
|
||||
{
|
||||
static const char *vpu_ocd_names[NUM_VPU_BLOCKS] = { "ocd_vpu0_v3",
|
||||
@@ -420,6 +474,16 @@ enum pva_error pva_kmd_debugfs_create_nodes(struct pva_kmd_device *pva)
|
||||
pva_kmd_device_init_profiler(pva);
|
||||
pva_kmd_device_init_tegra_stats(pva);
|
||||
|
||||
pva->debugfs_context.simulate_sc7_fops.read = &read_simulate_sc7;
|
||||
pva->debugfs_context.simulate_sc7_fops.write = &write_simulate_sc7;
|
||||
pva->debugfs_context.simulate_sc7_fops.pdev = pva;
|
||||
err = pva_kmd_debugfs_create_file(
|
||||
pva, "simulate_sc7", &pva->debugfs_context.simulate_sc7_fops);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err("Failed to create simulate_sc7 debugfs file");
|
||||
return err;
|
||||
}
|
||||
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ struct pva_kmd_debugfs_context {
|
||||
bool stats_enable;
|
||||
bool vpu_debug;
|
||||
bool vpu_print_enable;
|
||||
bool entered_sc7;
|
||||
char *allowlist_path;
|
||||
uint32_t profiling_level;
|
||||
struct pva_kmd_file_ops vpu_fops;
|
||||
@@ -35,6 +36,7 @@ struct pva_kmd_debugfs_context {
|
||||
struct pva_kmd_file_ops vpu_ocd_fops[NUM_VPU_BLOCKS];
|
||||
struct pva_kmd_fw_profiling_config g_fw_profiling_config;
|
||||
struct pva_kmd_file_ops fw_debug_log_level_fops;
|
||||
struct pva_kmd_file_ops simulate_sc7_fops;
|
||||
};
|
||||
|
||||
enum pva_error pva_kmd_debugfs_create_nodes(struct pva_kmd_device *dev);
|
||||
|
||||
@@ -479,7 +479,8 @@ pva_kmd_op_queue_create(struct pva_kmd_context *ctx, const void *input_buffer,
|
||||
goto out;
|
||||
}
|
||||
|
||||
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(ctx, queue_id);
|
||||
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(ctx->pva, ctx->ccq_id,
|
||||
queue_id);
|
||||
queue_out_args.error = err;
|
||||
queue_out_args.queue_id = queue_id;
|
||||
queue_out_args.syncpt_id = syncpt_info->syncpt_id;
|
||||
|
||||
@@ -138,7 +138,7 @@ enum pva_error pva_kmd_complete_resume(struct pva_kmd_device *pva)
|
||||
"PVA: Resume priv queue for context %d\n",
|
||||
ctx->ccq_id);
|
||||
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(
|
||||
PVA_PRIV_CCQ_ID, ctx->ccq_id);
|
||||
pva, PVA_PRIV_CCQ_ID, ctx->ccq_id);
|
||||
pva_kmd_set_cmd_init_queue(
|
||||
queue_cmd, PVA_PRIV_CCQ_ID,
|
||||
ctx->ccq_id, /* For privileged queues, queue ID == user CCQ ID*/
|
||||
@@ -168,7 +168,8 @@ enum pva_error pva_kmd_complete_resume(struct pva_kmd_device *pva)
|
||||
|
||||
syncpt_info =
|
||||
pva_kmd_queue_get_rw_syncpt_info(
|
||||
ctx, queue->queue_id);
|
||||
pva, ctx->ccq_id,
|
||||
queue->queue_id);
|
||||
pva_kmd_set_cmd_init_queue(
|
||||
queue_cmd, queue->ccq_id,
|
||||
queue->queue_id,
|
||||
@@ -209,4 +210,4 @@ cancel_builder:
|
||||
err_out:
|
||||
pva_kmd_mutex_unlock(&pva->powercycle_lock);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,7 +135,8 @@ enum pva_error pva_kmd_queue_create(struct pva_kmd_context *ctx,
|
||||
goto err_free_kmd_memory;
|
||||
}
|
||||
|
||||
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(ctx, queue->queue_id);
|
||||
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(ctx->pva, ctx->ccq_id,
|
||||
queue->queue_id);
|
||||
pva_kmd_set_cmd_init_queue(&cmd, queue->ccq_id, queue->queue_id,
|
||||
queue->queue_memory->iova,
|
||||
queue->max_num_submit,
|
||||
@@ -197,12 +198,13 @@ unlock:
|
||||
}
|
||||
|
||||
const struct pva_syncpt_rw_info *
|
||||
pva_kmd_queue_get_rw_syncpt_info(struct pva_kmd_context *ctx, uint8_t queue_id)
|
||||
pva_kmd_queue_get_rw_syncpt_info(struct pva_kmd_device *pva, uint8_t ccq_id,
|
||||
uint8_t queue_id)
|
||||
{
|
||||
uint8_t ctx_offset =
|
||||
safe_mulu32(ctx->ccq_id, PVA_NUM_RW_SYNCPTS_PER_CONTEXT);
|
||||
safe_mulu32(ccq_id, PVA_NUM_RW_SYNCPTS_PER_CONTEXT);
|
||||
uint32_t syncpt_index = safe_addu32(ctx_offset, queue_id);
|
||||
|
||||
ASSERT(syncpt_index < PVA_NUM_RW_SYNCPTS);
|
||||
return &ctx->pva->rw_syncpts[syncpt_index];
|
||||
return &pva->rw_syncpts[syncpt_index];
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ pva_kmd_queue_submit(struct pva_kmd_queue *queue,
|
||||
uint32_t pva_kmd_queue_space(struct pva_kmd_queue *queue);
|
||||
|
||||
const struct pva_syncpt_rw_info *
|
||||
pva_kmd_queue_get_rw_syncpt_info(struct pva_kmd_context *ctx, uint8_t queue_id);
|
||||
pva_kmd_queue_get_rw_syncpt_info(struct pva_kmd_device *pva, uint8_t ccq_id,
|
||||
uint8_t queue_id);
|
||||
|
||||
#endif // PVA_KMD_QUEUE_H
|
||||
|
||||
@@ -19,4 +19,7 @@ unsigned long pva_kmd_copy_data_to_user(void *to, const void *from,
|
||||
unsigned long size);
|
||||
unsigned long pva_kmd_strtol(const char *str, int base);
|
||||
|
||||
enum pva_error pva_kmd_simulate_enter_sc7(struct pva_kmd_device *pva);
|
||||
enum pva_error pva_kmd_simulate_exit_sc7(struct pva_kmd_device *pva);
|
||||
|
||||
#endif //PVA_KMD_SHIM_DEBUGFS_H
|
||||
@@ -667,6 +667,59 @@ done:
|
||||
return;
|
||||
}
|
||||
|
||||
enum pva_error pva_kmd_simulate_enter_sc7(struct pva_kmd_device *pva)
|
||||
{
|
||||
struct pva_kmd_linux_device_data *device_data;
|
||||
struct nvpva_device_data *device_props;
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
device_data = pva_kmd_linux_device_get_data(pva);
|
||||
device_props = device_data->pva_device_properties;
|
||||
dev = &device_props->pdev->dev;
|
||||
|
||||
// The PM core increases the device usage count before calling prepare, so
|
||||
// we need to emulate this behavior as well.
|
||||
pm_runtime_get_noresume(dev);
|
||||
|
||||
ret = pva_kmd_linux_device_prepare_suspend(dev);
|
||||
if (ret != 0) {
|
||||
pva_kmd_log_err("SC7 simulation: prepare suspend failed");
|
||||
return PVA_INTERNAL;
|
||||
}
|
||||
ret = pva_kmd_linux_device_suspend(dev);
|
||||
if (ret != 0) {
|
||||
pva_kmd_log_err("SC7 simulation: suspend failed");
|
||||
return PVA_INTERNAL;
|
||||
}
|
||||
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
enum pva_error pva_kmd_simulate_exit_sc7(struct pva_kmd_device *pva)
|
||||
{
|
||||
struct pva_kmd_linux_device_data *device_data =
|
||||
pva_kmd_linux_device_get_data(pva);
|
||||
struct nvpva_device_data *device_props =
|
||||
device_data->pva_device_properties;
|
||||
struct device *dev = &device_props->pdev->dev;
|
||||
int ret;
|
||||
|
||||
ret = pva_kmd_linux_device_resume(dev);
|
||||
if (ret != 0) {
|
||||
pva_kmd_log_err("SC7 simulation: resume failed");
|
||||
return PVA_INTERNAL;
|
||||
}
|
||||
|
||||
pva_kmd_linux_device_complete_resume(dev);
|
||||
|
||||
// The PM core decreases the device usage count after calling complete, so
|
||||
// we need to emulate this behavior as well.
|
||||
pm_runtime_put(dev);
|
||||
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops pva_kmd_linux_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pva_kmd_linux_device_suspend,
|
||||
pva_kmd_linux_device_resume)
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#include "pva_kmd_linux_device.h"
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace/events/nvpva_ftrace.h"
|
||||
#include <linux/nvhost.h>
|
||||
|
||||
static uint32_t get_job_id(uint32_t queue_id, uint64_t submit_id)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user