mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: move patch context update calls to gr/ctx unit
We use below APIs to update patch context gr_gk20a_ctx_patch_write_begin() gr_gk20a_ctx_patch_write_end() gr_gk20a_ctx_patch_write() Since patch context is owned by gr/ctx unit, move these APIs to this unit and rename them to nvgpu_gr_ctx_patch_write_begin() nvgpu_gr_ctx_patch_write_end() nvgpu_gr_ctx_patch_write() Jira NVGPU-1527 Change-Id: Iee19c7a71d074763d3dcb9b1997cb2a3159d5299 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1989214 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
58bc18b794
commit
0ff5a49f45
@@ -24,6 +24,7 @@
|
||||
#include <nvgpu/gr/global_ctx.h>
|
||||
#include <nvgpu/gr/ctx.h>
|
||||
#include <nvgpu/vm.h>
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/gmmu.h>
|
||||
|
||||
static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
|
||||
@@ -528,3 +529,65 @@ int nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Context state can be written directly, or "patched" at times. So that code
|
||||
* can be used in either situation it is written using a series of
|
||||
* _ctx_patch_write(..., patch) statements. However any necessary map overhead
|
||||
* should be minimized; thus, bundle the sequence of these writes together, and
|
||||
* set them up and close with _ctx_patch_write_begin/_ctx_patch_write_end.
|
||||
*/
|
||||
int nvgpu_gr_ctx_patch_write_begin(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx,
|
||||
bool update_patch_count)
|
||||
{
|
||||
if (update_patch_count) {
|
||||
/* reset patch count if ucode has already processed it */
|
||||
gr_ctx->patch_ctx.data_count =
|
||||
g->ops.gr.ctxsw_prog.get_patch_count(g, &gr_ctx->mem);
|
||||
nvgpu_log(g, gpu_dbg_info, "patch count reset to %d",
|
||||
gr_ctx->patch_ctx.data_count);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvgpu_gr_ctx_patch_write_end(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx,
|
||||
bool update_patch_count)
|
||||
{
|
||||
/* Write context count to context image if it is mapped */
|
||||
if (update_patch_count) {
|
||||
g->ops.gr.ctxsw_prog.set_patch_count(g, &gr_ctx->mem,
|
||||
gr_ctx->patch_ctx.data_count);
|
||||
nvgpu_log(g, gpu_dbg_info, "write patch count %d",
|
||||
gr_ctx->patch_ctx.data_count);
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_gr_ctx_patch_write(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx,
|
||||
u32 addr, u32 data, bool patch)
|
||||
{
|
||||
if (patch) {
|
||||
u32 patch_slot = gr_ctx->patch_ctx.data_count *
|
||||
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY;
|
||||
|
||||
if (patch_slot > (PATCH_CTX_ENTRIES_FROM_SIZE(
|
||||
gr_ctx->patch_ctx.mem.size) -
|
||||
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY)) {
|
||||
nvgpu_err(g, "failed to access patch_slot %d",
|
||||
patch_slot);
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_mem_wr32(g, &gr_ctx->patch_ctx.mem, patch_slot, addr);
|
||||
nvgpu_mem_wr32(g, &gr_ctx->patch_ctx.mem, patch_slot + 1U, data);
|
||||
gr_ctx->patch_ctx.data_count++;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_info,
|
||||
"patch addr = 0x%x data = 0x%x data_count %d",
|
||||
addr, data, gr_ctx->patch_ctx.data_count);
|
||||
} else {
|
||||
nvgpu_writel(g, addr, data);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user