gpu: nvgpu: s/NVGPU_GR_CTX_*_VA/NVGPU_GR_GLOBAL_CTX_*_VA

Indices for global ctx buffer virtual address array were named with
prefix GR_CTX and defined in ctx.h. Prefix those with GR_GLOBAL_CTX
and move to global_ctx.h

Also remove the flag global_ctx_buffer_mapped as it is not used.

Bug 3677982

Change-Id: I9042e1c2bd8e8e10e97893484daeff0f97a96ea0
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2704855
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2022-04-05 10:26:42 +05:30
committed by mobile promotions
parent 7fa6976a98
commit 65e7baf856
7 changed files with 45 additions and 55 deletions

View File

@@ -182,7 +182,7 @@ static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
for (i = 0U; i < NVGPU_GR_CTX_VA_COUNT; i++) { for (i = 0U; i < NVGPU_GR_GLOBAL_CTX_VA_COUNT; i++) {
if (g_bfr_va[i] != 0ULL) { if (g_bfr_va[i] != 0ULL) {
nvgpu_gr_global_ctx_buffer_unmap(global_ctx_buffer, nvgpu_gr_global_ctx_buffer_unmap(global_ctx_buffer,
g_bfr_index[i], vm, g_bfr_va[i]); g_bfr_index[i], vm, g_bfr_va[i]);
@@ -191,8 +191,6 @@ static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
(void) memset(g_bfr_va, 0, sizeof(gr_ctx->global_ctx_buffer_va)); (void) memset(g_bfr_va, 0, sizeof(gr_ctx->global_ctx_buffer_va));
(void) memset(g_bfr_index, 0, sizeof(gr_ctx->global_ctx_buffer_index)); (void) memset(g_bfr_index, 0, sizeof(gr_ctx->global_ctx_buffer_index));
gr_ctx->global_ctx_buffer_mapped = false;
} }
static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g, static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g,
@@ -216,14 +214,14 @@ static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g,
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR, NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR,
vm, NVGPU_VM_MAP_CACHEABLE, true); vm, NVGPU_VM_MAP_CACHEABLE, true);
g_bfr_index[NVGPU_GR_CTX_CIRCULAR_VA] = g_bfr_index[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA] =
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR; NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR;
} else { } else {
#endif #endif
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR, NVGPU_GR_GLOBAL_CTX_CIRCULAR,
vm, NVGPU_VM_MAP_CACHEABLE, true); vm, NVGPU_VM_MAP_CACHEABLE, true);
g_bfr_index[NVGPU_GR_CTX_CIRCULAR_VA] = g_bfr_index[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA] =
NVGPU_GR_GLOBAL_CTX_CIRCULAR; NVGPU_GR_GLOBAL_CTX_CIRCULAR;
#ifdef CONFIG_NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
} }
@@ -231,7 +229,7 @@ static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g,
if (gpu_va == 0ULL) { if (gpu_va == 0ULL) {
goto clean_up; goto clean_up;
} }
g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va; g_bfr_va[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA] = gpu_va;
return 0; return 0;
@@ -260,14 +258,14 @@ static int nvgpu_gr_ctx_map_ctx_attribute_buffer(struct gk20a *g,
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR,
vm, NVGPU_VM_MAP_CACHEABLE, false); vm, NVGPU_VM_MAP_CACHEABLE, false);
g_bfr_index[NVGPU_GR_CTX_ATTRIBUTE_VA] = g_bfr_index[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA] =
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR; NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR;
} else { } else {
#endif #endif
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, NVGPU_GR_GLOBAL_CTX_ATTRIBUTE,
vm, NVGPU_VM_MAP_CACHEABLE, false); vm, NVGPU_VM_MAP_CACHEABLE, false);
g_bfr_index[NVGPU_GR_CTX_ATTRIBUTE_VA] = g_bfr_index[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA] =
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE; NVGPU_GR_GLOBAL_CTX_ATTRIBUTE;
#ifdef CONFIG_NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
} }
@@ -275,7 +273,7 @@ static int nvgpu_gr_ctx_map_ctx_attribute_buffer(struct gk20a *g,
if (gpu_va == 0ULL) { if (gpu_va == 0ULL) {
goto clean_up; goto clean_up;
} }
g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va; g_bfr_va[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA] = gpu_va;
return 0; return 0;
@@ -305,14 +303,14 @@ static int nvgpu_gr_ctx_map_ctx_pagepool_buffer(struct gk20a *g,
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR, NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR,
vm, NVGPU_VM_MAP_CACHEABLE, true); vm, NVGPU_VM_MAP_CACHEABLE, true);
g_bfr_index[NVGPU_GR_CTX_PAGEPOOL_VA] = g_bfr_index[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA] =
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR; NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR;
} else { } else {
#endif #endif
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer, gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL, NVGPU_GR_GLOBAL_CTX_PAGEPOOL,
vm, NVGPU_VM_MAP_CACHEABLE, true); vm, NVGPU_VM_MAP_CACHEABLE, true);
g_bfr_index[NVGPU_GR_CTX_PAGEPOOL_VA] = g_bfr_index[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA] =
NVGPU_GR_GLOBAL_CTX_PAGEPOOL; NVGPU_GR_GLOBAL_CTX_PAGEPOOL;
#ifdef CONFIG_NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
} }
@@ -320,7 +318,7 @@ static int nvgpu_gr_ctx_map_ctx_pagepool_buffer(struct gk20a *g,
if (gpu_va == 0ULL) { if (gpu_va == 0ULL) {
goto clean_up; goto clean_up;
} }
g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va; g_bfr_va[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA] = gpu_va;
return 0; return 0;
@@ -402,7 +400,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) { NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) {
err = nvgpu_gr_ctx_map_ctx_buffer(g, err = nvgpu_gr_ctx_map_ctx_buffer(g,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER,
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA, NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER_VA,
gr_ctx, global_ctx_buffer, vm); gr_ctx, global_ctx_buffer, vm);
if (err != 0) { if (err != 0) {
nvgpu_err(g, nvgpu_err(g,
@@ -416,7 +414,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
/* Priv register Access Map */ /* Priv register Access Map */
err = nvgpu_gr_ctx_map_ctx_buffer(g, err = nvgpu_gr_ctx_map_ctx_buffer(g,
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP, NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP,
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA, NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_VA,
gr_ctx, global_ctx_buffer, vm); gr_ctx, global_ctx_buffer, vm);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "cannot map ctx priv access buffer"); nvgpu_err(g, "cannot map ctx priv access buffer");
@@ -428,7 +426,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) { if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) {
err = nvgpu_gr_ctx_map_ctx_buffer(g, err = nvgpu_gr_ctx_map_ctx_buffer(g,
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER, NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER,
NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA, NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER_VA,
gr_ctx, global_ctx_buffer, vm); gr_ctx, global_ctx_buffer, vm);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "cannot map ctx fecs trace buffer"); nvgpu_err(g, "cannot map ctx fecs trace buffer");
@@ -437,8 +435,6 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
} }
#endif #endif
gr_ctx->global_ctx_buffer_mapped = true;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done"); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done");
return 0; return 0;
@@ -518,7 +514,7 @@ void nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
g->allow_all); g->allow_all);
g->ops.gr.ctxsw_prog.set_priv_access_map_addr(g, mem, g->ops.gr.ctxsw_prog.set_priv_access_map_addr(g, mem,
nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA)); NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_VA));
#endif #endif
#ifdef CONFIG_NVGPU_HAL_NON_FUSA #ifdef CONFIG_NVGPU_HAL_NON_FUSA

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -150,19 +150,13 @@ struct nvgpu_gr_ctx {
* Array to store GPU virtual addresses of all global context * Array to store GPU virtual addresses of all global context
* buffers. * buffers.
*/ */
u64 global_ctx_buffer_va[NVGPU_GR_CTX_VA_COUNT]; u64 global_ctx_buffer_va[NVGPU_GR_GLOBAL_CTX_VA_COUNT];
/** /**
* Array to store indexes of global context buffers * Array to store indexes of global context buffers
* corresponding to GPU virtual addresses above. * corresponding to GPU virtual addresses above.
*/ */
u32 global_ctx_buffer_index[NVGPU_GR_CTX_VA_COUNT]; u32 global_ctx_buffer_index[NVGPU_GR_GLOBAL_CTX_VA_COUNT];
/**
* Flag to indicate if global context buffers are mapped and
* #global_ctx_buffer_va array is populated.
*/
bool global_ctx_buffer_mapped;
/** /**
* TSG identifier corresponding to the graphics context. * TSG identifier corresponding to the graphics context.

View File

@@ -637,7 +637,7 @@ int nvgpu_gr_fecs_trace_bind_channel(struct gk20a *g,
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) { if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) {
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA); NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER_VA);
nvgpu_log(g, gpu_dbg_ctxsw, "gpu_va=%llx", addr); nvgpu_log(g, gpu_dbg_ctxsw, "gpu_va=%llx", addr);
aperture_mask = 0; aperture_mask = 0;
} else { } else {

View File

@@ -352,7 +352,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
/* global pagepool buffer */ /* global pagepool buffer */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_PAGEPOOL_VA); NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA);
size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size( size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size(
global_ctx_buffer, global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL)); NVGPU_GR_GLOBAL_CTX_PAGEPOOL));
@@ -362,7 +362,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
/* global bundle cb */ /* global bundle cb */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_CIRCULAR_VA); NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA);
size = nvgpu_safe_cast_u64_to_u32( size = nvgpu_safe_cast_u64_to_u32(
g->ops.gr.init.get_bundle_cb_default_size(g)); g->ops.gr.init.get_bundle_cb_default_size(g));
@@ -371,7 +371,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
/* global attrib cb */ /* global attrib cb */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_ATTRIBUTE_VA); NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA);
g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx, g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx,
nvgpu_gr_config_get_tpc_count(config), nvgpu_gr_config_get_tpc_count(config),
@@ -384,7 +384,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
if (g->ops.gr.init.commit_rtv_cb != NULL) { if (g->ops.gr.init.commit_rtv_cb != NULL) {
/* RTV circular buffer */ /* RTV circular buffer */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA); NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER_VA);
g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch); g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch);
} }

View File

@@ -95,7 +95,7 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
/* set priv access map */ /* set priv access map */
g->ops.gr.ctxsw_prog.set_priv_access_map_addr(g, ctxheader, g->ops.gr.ctxsw_prog.set_priv_access_map_addr(g, ctxheader,
nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA)); NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_VA));
#endif #endif
g->ops.gr.ctxsw_prog.set_patch_addr(g, ctxheader, g->ops.gr.ctxsw_prog.set_patch_addr(g, ctxheader,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -108,26 +108,6 @@ struct zcull_ctx_desc;
#define NVGPU_GR_CTX_COUNT 3U #define NVGPU_GR_CTX_COUNT 3U
#endif #endif
/*
* either ATTRIBUTE or ATTRIBUTE_VPR maps to NVGPU_GR_CTX_ATTRIBUTE_VA.
*/
/** S/W defined index for circular context buffer virtual address. */
#define NVGPU_GR_CTX_CIRCULAR_VA 0U
/** S/W defined index for pagepool context buffer virtual address. */
#define NVGPU_GR_CTX_PAGEPOOL_VA 1U
/** S/W defined index for attribute context buffer virtual address. */
#define NVGPU_GR_CTX_ATTRIBUTE_VA 2U
/** S/W defined index for access map buffer virtual address. */
#define NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA 3U
/** S/W defined index for RTV circular context buffer virtual address. */
#define NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA 4U
#ifdef CONFIG_NVGPU_FECS_TRACE
/** S/W defined index for fecs trace buffer virtual address. */
#define NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA 5U
#endif
/** Number of context buffer virtual addresses. */
#define NVGPU_GR_CTX_VA_COUNT 6U
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
/* PM Context Switch Modes */ /* PM Context Switch Modes */
/** This mode says that the pms are not to be context switched. */ /** This mode says that the pms are not to be context switched. */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -87,6 +87,26 @@ typedef void (*global_ctx_mem_destroy_fn)(struct gk20a *g,
/** Number of global context buffers. */ /** Number of global context buffers. */
#define NVGPU_GR_GLOBAL_CTX_COUNT 9U #define NVGPU_GR_GLOBAL_CTX_COUNT 9U
/*
* either ATTRIBUTE or ATTRIBUTE_VPR maps to NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA.
*/
/** S/W defined index for circular context buffer virtual address. */
#define NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA 0U
/** S/W defined index for pagepool context buffer virtual address. */
#define NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA 1U
/** S/W defined index for attribute context buffer virtual address. */
#define NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA 2U
/** S/W defined index for access map buffer virtual address. */
#define NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_VA 3U
/** S/W defined index for RTV circular context buffer virtual address. */
#define NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER_VA 4U
#ifdef CONFIG_NVGPU_FECS_TRACE
/** S/W defined index for fecs trace buffer virtual address. */
#define NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER_VA 5U
#endif
/** Number of context buffer virtual addresses. */
#define NVGPU_GR_GLOBAL_CTX_VA_COUNT 6U
/** /**
* @brief Initialize global context descriptor structure. * @brief Initialize global context descriptor structure.
* *