gpu: nvgpu: Add magic value for zero data methods

This is adding a magic value to the data input for
the methods which do not require any data input.
This is applicable for GA10B.

Bug 3634227

Change-Id: I95f56413552c9a37b67d0833ff61428a798a8a10
Signed-off-by: Dinesh T <dt@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2852602
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Dinesh T
2023-02-02 21:13:22 +00:00
committed by mobile promotions
parent 7d02506275
commit 7dbd29ceb6
15 changed files with 105 additions and 34 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,7 +57,7 @@ int ga100_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
op.mailbox.ok = gr_fecs_ctxsw_mailbox_value_pass_v();
flags |= NVGPU_GR_FALCON_SUBMIT_METHOD_F_LOCKED;
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, flags);
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, flags, fecs_method);
break;
default:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,6 +32,11 @@ u32 ga10b_gr_falcon_get_fecs_ctxsw_mailbox_size(void);
void ga10b_gr_falcon_fecs_ctxsw_clear_mailbox(struct gk20a *g,
u32 reg_index, u32 clear_val);
void ga10b_gr_falcon_dump_stats(struct gk20a *g);
#ifndef CONFIG_NVGPU_HAL_NON_FUSA
void ga10b_gr_falcon_set_null_fecs_method_data(struct gk20a *g,
struct nvgpu_fecs_method_op *op,
u32 fecs_method);
#endif
#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT
void ga10b_gr_falcon_gpccs_dmemc_write(struct gk20a *g, u32 port, u32 offs,
u32 blk, u32 ainc);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -25,10 +25,12 @@
#include <nvgpu/static_analysis.h>
#include <nvgpu/gr/gr_utils.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/falcon.h>
#include <nvgpu/soc.h>
#include "gr_falcon_ga10b.h"
#include "common/gr/gr_falcon_priv.h"
#include <nvgpu/hw/ga10b/hw_gr_ga10b.h>
@@ -39,6 +41,8 @@
#define NVGPU_GPCCS_ENCRYPT_DBG_UCODE_IMAGE "gpccs_encrypt_dbg.bin"
#define NVGPU_GPCCS_ENCRYPT_PROD_UCODE_IMAGE "gpccs_encrypt_prod.bin"
#define NVGPU_NULL_METHOD_DATA 0xDEADCA11U
void ga10b_gr_falcon_get_fw_name(struct gk20a *g, const char **ucode_name, u32 falcon_id)
{
nvgpu_log_fn(g, " ");
@@ -137,6 +141,25 @@ static void ga10b_gr_falcon_gpccs_dump_stats(struct gk20a *g)
}
}
#ifndef CONFIG_NVGPU_HAL_NON_FUSA
void ga10b_gr_falcon_set_null_fecs_method_data(struct gk20a *g,
struct nvgpu_fecs_method_op *op,
u32 fecs_method)
{
switch (fecs_method) {
case NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_IMAGE_SIZE:
#ifdef CONFIG_NVGPU_GRAPHICS
case NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE:
#endif
op->method.data = NVGPU_NULL_METHOD_DATA;
break;
default:
nvgpu_log(g, gpu_dbg_gpu_dbg, "fecs method: %d", fecs_method);
break;
}
}
#endif
void ga10b_gr_falcon_dump_stats(struct gk20a *g)
{
ga10b_gr_falcon_fecs_dump_stats(g);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -50,7 +50,7 @@ void gm20b_gr_falcon_bind_instblk(struct gk20a *g,
int gm20b_gr_falcon_wait_mem_scrubbing(struct gk20a *g);
int gm20b_gr_falcon_wait_ctxsw_ready(struct gk20a *g);
int gm20b_gr_falcon_submit_fecs_method_op(struct gk20a *g,
struct nvgpu_fecs_method_op op, u32 flags);
struct nvgpu_fecs_method_op op, u32 flags, u32 fecs_method);
int gm20b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
u32 data, u32 *ret_val);
int gm20b_gr_falcon_ctrl_ctxsw_internal(struct gk20a *g, u32 fecs_method,
@@ -62,6 +62,10 @@ u32 gm20b_gr_falcon_get_fecs_current_ctx_data(struct gk20a *g,
struct nvgpu_mem *inst_block);
int gm20b_gr_falcon_init_ctx_state(struct gk20a *g,
struct nvgpu_gr_falcon_query_sizes *sizes);
#ifdef CONFIG_NVGPU_GRAPHICS
int gm20b_gr_falcon_get_zcull_image_size(struct gk20a *g,
struct nvgpu_gr_falcon_query_sizes *sizes);
#endif
u32 gm20b_gr_falcon_read_status0_fecs_ctxsw(struct gk20a *g);
u32 gm20b_gr_falcon_read_status1_fecs_ctxsw(struct gk20a *g);
#ifdef CONFIG_NVGPU_GRAPHICS

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -536,6 +536,28 @@ int gm20b_gr_falcon_wait_ctxsw_ready(struct gk20a *g)
return 0;
}
#ifdef CONFIG_NVGPU_GRAPHICS
int gm20b_gr_falcon_get_zcull_image_size(struct gk20a *g,
struct nvgpu_gr_falcon_query_sizes *sizes)
{
int ret = 0;
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
ret = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
0, &sizes->zcull_image_size);
if (ret != 0) {
nvgpu_err(g,
"query zcull ctx image size failed");
return ret;
}
}
nvgpu_log(g, gpu_dbg_gr, "ZCULL image size = %u", sizes->zcull_image_size);
return ret;
}
#endif
int gm20b_gr_falcon_init_ctx_state(struct gk20a *g,
struct nvgpu_gr_falcon_query_sizes *sizes)
{
@@ -573,21 +595,14 @@ defined(CONFIG_NVGPU_CTXSW_FW_ERROR_CODE_TESTING)
nvgpu_log(g, gpu_dbg_gr, "PM CTXSW image size = %u", sizes->pm_ctxsw_image_size);
#endif
#ifdef CONFIG_NVGPU_NON_FUSA
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
ret = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
0, &sizes->zcull_image_size);
if (ret != 0) {
nvgpu_err(g,
"query zcull ctx image size failed");
return ret;
#ifdef CONFIG_NVGPU_GRAPHICS
if (g->ops.gr.falcon.get_zcull_image_size != NULL) {
ret = g->ops.gr.falcon.get_zcull_image_size(g, sizes);
}
}
nvgpu_log(g, gpu_dbg_gr, "ZCULL image size = %u", sizes->zcull_image_size);
#endif
if (ret != 0) {
nvgpu_err(g, "query zcull image size failed");
}
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done");
return 0;
@@ -665,13 +680,19 @@ void gm20b_gr_falcon_set_current_ctx_invalid(struct gk20a *g)
* We should replace most, if not all, fecs method calls to this instead.
*/
int gm20b_gr_falcon_submit_fecs_method_op(struct gk20a *g,
struct nvgpu_fecs_method_op op, u32 flags)
struct nvgpu_fecs_method_op op, u32 flags, u32 fecs_method)
{
int ret;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
bool sleepduringwait =
(flags & NVGPU_GR_FALCON_SUBMIT_METHOD_F_SLEEP) != 0U;
#ifndef CONFIG_NVGPU_HAL_NON_FUSA
if (g->ops.gr.falcon.set_null_fecs_method_data != NULL) {
g->ops.gr.falcon.set_null_fecs_method_data(g, &op, fecs_method);
}
#endif
if ((flags & NVGPU_GR_FALCON_SUBMIT_METHOD_F_LOCKED) == 0U) {
nvgpu_mutex_acquire(&gr_falcon->fecs_mutex);
}
@@ -699,8 +720,8 @@ int gm20b_gr_falcon_submit_fecs_method_op(struct gk20a *g,
op.cond.fail, op.mailbox.fail,
sleepduringwait);
if (ret != 0) {
nvgpu_err(g, "fecs method: data=0x%08x push adr=0x%08x",
op.method.data, op.method.addr);
nvgpu_err(g, "fecs method: %d data=0x%08x push adr=0x%08x",
fecs_method, op.method.data, op.method.addr);
}
if ((flags & NVGPU_GR_FALCON_SUBMIT_METHOD_F_LOCKED) == 0U) {
@@ -846,7 +867,7 @@ defined(CONFIG_NVGPU_CTXSW_FW_ERROR_CODE_TESTING)
break;
}
return gm20b_gr_falcon_submit_fecs_method_op(g, op, flags);
return gm20b_gr_falcon_submit_fecs_method_op(g, op, flags, fecs_method);
}
int gm20b_gr_falcon_ctrl_ctxsw_internal(struct gk20a *g, u32 fecs_method,
@@ -867,7 +888,7 @@ int gm20b_gr_falcon_ctrl_ctxsw_internal(struct gk20a *g, u32 fecs_method,
nvgpu_log_info(g, "fecs method %d data 0x%x ret_value %p",
fecs_method, data, ret_val);
return gm20b_gr_falcon_submit_fecs_method_op(g, op, flags);
return gm20b_gr_falcon_submit_fecs_method_op(g, op, flags, fecs_method);
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -85,7 +85,7 @@ int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
op.method.addr =
gr_fecs_method_push_adr_discover_preemption_image_size_v();
op.mailbox.ret = ret_val;
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, 0U);
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, 0U, fecs_method);
break;
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -150,7 +150,7 @@ int gv11b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
}
flags |= NVGPU_GR_FALCON_SUBMIT_METHOD_F_LOCKED;
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, flags);
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, flags, fecs_method);
break;
default:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -62,7 +62,7 @@ int tu104_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
op.cond.fail = GR_IS_UCODE_OP_EQUAL;
flags |= NVGPU_GR_FALCON_SUBMIT_METHOD_F_SLEEP;
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, flags);
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, flags, fecs_method);
break;
case NVGPU_GR_FALCON_METHOD_STOP_SMPC_GLOBAL_MODE:
@@ -76,7 +76,7 @@ int tu104_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
op.cond.fail = GR_IS_UCODE_OP_EQUAL;
flags |= NVGPU_GR_FALCON_SUBMIT_METHOD_F_SLEEP;
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, flags);
ret = gm20b_gr_falcon_submit_fecs_method_op(g, op, flags, fecs_method);
break;
#endif

View File

@@ -793,6 +793,7 @@ static const struct gops_gr_falcon ga100_ops_gr_falcon = {
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = gm20b_gr_falcon_get_fecs_current_ctx_data,
.init_ctx_state = gp10b_gr_falcon_init_ctx_state,
.get_zcull_image_size = gm20b_gr_falcon_get_zcull_image_size,
.fecs_host_int_enable = gv11b_gr_falcon_fecs_host_int_enable,
.read_fecs_ctxsw_status0 = gm20b_gr_falcon_read_status0_fecs_ctxsw,
.read_fecs_ctxsw_status1 = gm20b_gr_falcon_read_status1_fecs_ctxsw,

View File

@@ -797,6 +797,7 @@ static const struct gops_gr_falcon ga10b_ops_gr_falcon = {
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = gm20b_gr_falcon_get_fecs_current_ctx_data,
.init_ctx_state = gp10b_gr_falcon_init_ctx_state,
.get_zcull_image_size = gm20b_gr_falcon_get_zcull_image_size,
.fecs_host_int_enable = gv11b_gr_falcon_fecs_host_int_enable,
.read_fecs_ctxsw_status0 = gm20b_gr_falcon_read_status0_fecs_ctxsw,
.read_fecs_ctxsw_status1 = gm20b_gr_falcon_read_status1_fecs_ctxsw,
@@ -818,6 +819,9 @@ static const struct gops_gr_falcon ga10b_ops_gr_falcon = {
.configure_fmodel = gm20b_gr_falcon_configure_fmodel,
#endif
.get_fw_name = ga10b_gr_falcon_get_fw_name,
#ifndef CONFIG_NVGPU_HAL_NON_FUSA
.set_null_fecs_method_data = ga10b_gr_falcon_set_null_fecs_method_data,
#endif
};
static const struct gops_gr ga10b_ops_gr = {

View File

@@ -443,6 +443,7 @@ static const struct gops_gr_falcon gm20b_ops_gr_falcon = {
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = gm20b_gr_falcon_get_fecs_current_ctx_data,
.init_ctx_state = gm20b_gr_falcon_init_ctx_state,
.get_zcull_image_size = gm20b_gr_falcon_get_zcull_image_size,
.fecs_host_int_enable = gm20b_gr_falcon_fecs_host_int_enable,
.read_fecs_ctxsw_status0 = gm20b_gr_falcon_read_status0_fecs_ctxsw,
.read_fecs_ctxsw_status1 = gm20b_gr_falcon_read_status1_fecs_ctxsw,

View File

@@ -654,6 +654,9 @@ static const struct gops_gr_falcon gv11b_ops_gr_falcon = {
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = gm20b_gr_falcon_get_fecs_current_ctx_data,
.init_ctx_state = gp10b_gr_falcon_init_ctx_state,
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
.get_zcull_image_size = gm20b_gr_falcon_get_zcull_image_size,
#endif
.fecs_host_int_enable = gv11b_gr_falcon_fecs_host_int_enable,
.read_fecs_ctxsw_status0 = gm20b_gr_falcon_read_status0_fecs_ctxsw,
.read_fecs_ctxsw_status1 = gm20b_gr_falcon_read_status1_fecs_ctxsw,

View File

@@ -692,6 +692,7 @@ static const struct gops_gr_falcon tu104_ops_gr_falcon = {
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = gm20b_gr_falcon_get_fecs_current_ctx_data,
.init_ctx_state = gp10b_gr_falcon_init_ctx_state,
.get_zcull_image_size = gm20b_gr_falcon_get_zcull_image_size,
.fecs_host_int_enable = gv11b_gr_falcon_fecs_host_int_enable,
.read_fecs_ctxsw_status0 = gm20b_gr_falcon_read_status0_fecs_ctxsw,
.read_fecs_ctxsw_status1 = gm20b_gr_falcon_read_status1_fecs_ctxsw,

View File

@@ -383,6 +383,8 @@ struct gops_gr_falcon {
struct nvgpu_mem *inst_block);
int (*init_ctx_state)(struct gk20a *g,
struct nvgpu_gr_falcon_query_sizes *sizes);
int (*get_zcull_image_size)(struct gk20a *g,
struct nvgpu_gr_falcon_query_sizes *sizes);
void (*fecs_host_int_enable)(struct gk20a *g);
u32 (*read_fecs_ctxsw_status0)(struct gk20a *g);
u32 (*read_fecs_ctxsw_status1)(struct gk20a *g);
@@ -418,6 +420,10 @@ struct gops_gr_falcon {
void (*configure_fmodel)(struct gk20a *g);
#endif
void (*get_fw_name)(struct gk20a *g, const char **ucode_name, u32 falcon_id);
#ifndef CONFIG_NVGPU_HAL_NON_FUSA
void (*set_null_fecs_method_data)(struct gk20a *g,
struct nvgpu_fecs_method_op *op, u32 fecs_method);
#endif
/** @endcond */
};

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -45,6 +45,8 @@
#include "../nvgpu-gr.h"
#include "nvgpu-gr-falcon-gm20b.h"
#define INVALID_METHOD 0xFF
struct gr_falcon_gm20b_fecs_op {
u32 id;
u32 data;
@@ -164,7 +166,7 @@ static int gr_falcon_gm20b_submit_fecs_mthd_op(struct unit_module *m,
op.cond.ok = fecs_op_stat[i].cond_ok;
op.cond.fail = fecs_op_stat[i].cond_fail;
err = gm20b_gr_falcon_submit_fecs_method_op(g, op, false);
err = gm20b_gr_falcon_submit_fecs_method_op(g, op, false, INVALID_METHOD);
if ((fecs_op_stat[i].result == 0) && err) {
unit_return_fail(m, "submit_fecs_method_op failed\n");
} else if (fecs_op_stat[i].result && (err == 0)){