mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: s/NVGPU_GR_CTX_*_VA/NVGPU_GR_GLOBAL_CTX_*_VA
Indices for global ctx buffer virtual address array were named with prefix GR_CTX and defined in ctx.h. Prefix those with GR_GLOBAL_CTX and move to global_ctx.h Also remove the flag global_ctx_buffer_mapped as it is not used. Bug 3677982 Change-Id: I9042e1c2bd8e8e10e97893484daeff0f97a96ea0 Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2704855 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
7fa6976a98
commit
65e7baf856
@@ -182,7 +182,7 @@ static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
for (i = 0U; i < NVGPU_GR_CTX_VA_COUNT; i++) {
|
||||
for (i = 0U; i < NVGPU_GR_GLOBAL_CTX_VA_COUNT; i++) {
|
||||
if (g_bfr_va[i] != 0ULL) {
|
||||
nvgpu_gr_global_ctx_buffer_unmap(global_ctx_buffer,
|
||||
g_bfr_index[i], vm, g_bfr_va[i]);
|
||||
@@ -191,8 +191,6 @@ static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
|
||||
|
||||
(void) memset(g_bfr_va, 0, sizeof(gr_ctx->global_ctx_buffer_va));
|
||||
(void) memset(g_bfr_index, 0, sizeof(gr_ctx->global_ctx_buffer_index));
|
||||
|
||||
gr_ctx->global_ctx_buffer_mapped = false;
|
||||
}
|
||||
|
||||
static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g,
|
||||
@@ -216,14 +214,14 @@ static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g,
|
||||
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR,
|
||||
vm, NVGPU_VM_MAP_CACHEABLE, true);
|
||||
g_bfr_index[NVGPU_GR_CTX_CIRCULAR_VA] =
|
||||
g_bfr_index[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA] =
|
||||
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR;
|
||||
} else {
|
||||
#endif
|
||||
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_CIRCULAR,
|
||||
vm, NVGPU_VM_MAP_CACHEABLE, true);
|
||||
g_bfr_index[NVGPU_GR_CTX_CIRCULAR_VA] =
|
||||
g_bfr_index[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA] =
|
||||
NVGPU_GR_GLOBAL_CTX_CIRCULAR;
|
||||
#ifdef CONFIG_NVGPU_VPR
|
||||
}
|
||||
@@ -231,7 +229,7 @@ static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g,
|
||||
if (gpu_va == 0ULL) {
|
||||
goto clean_up;
|
||||
}
|
||||
g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va;
|
||||
g_bfr_va[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA] = gpu_va;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -260,14 +258,14 @@ static int nvgpu_gr_ctx_map_ctx_attribute_buffer(struct gk20a *g,
|
||||
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR,
|
||||
vm, NVGPU_VM_MAP_CACHEABLE, false);
|
||||
g_bfr_index[NVGPU_GR_CTX_ATTRIBUTE_VA] =
|
||||
g_bfr_index[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA] =
|
||||
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR;
|
||||
} else {
|
||||
#endif
|
||||
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE,
|
||||
vm, NVGPU_VM_MAP_CACHEABLE, false);
|
||||
g_bfr_index[NVGPU_GR_CTX_ATTRIBUTE_VA] =
|
||||
g_bfr_index[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA] =
|
||||
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE;
|
||||
#ifdef CONFIG_NVGPU_VPR
|
||||
}
|
||||
@@ -275,7 +273,7 @@ static int nvgpu_gr_ctx_map_ctx_attribute_buffer(struct gk20a *g,
|
||||
if (gpu_va == 0ULL) {
|
||||
goto clean_up;
|
||||
}
|
||||
g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va;
|
||||
g_bfr_va[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA] = gpu_va;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -305,14 +303,14 @@ static int nvgpu_gr_ctx_map_ctx_pagepool_buffer(struct gk20a *g,
|
||||
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR,
|
||||
vm, NVGPU_VM_MAP_CACHEABLE, true);
|
||||
g_bfr_index[NVGPU_GR_CTX_PAGEPOOL_VA] =
|
||||
g_bfr_index[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA] =
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR;
|
||||
} else {
|
||||
#endif
|
||||
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL,
|
||||
vm, NVGPU_VM_MAP_CACHEABLE, true);
|
||||
g_bfr_index[NVGPU_GR_CTX_PAGEPOOL_VA] =
|
||||
g_bfr_index[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA] =
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL;
|
||||
#ifdef CONFIG_NVGPU_VPR
|
||||
}
|
||||
@@ -320,7 +318,7 @@ static int nvgpu_gr_ctx_map_ctx_pagepool_buffer(struct gk20a *g,
|
||||
if (gpu_va == 0ULL) {
|
||||
goto clean_up;
|
||||
}
|
||||
g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va;
|
||||
g_bfr_va[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA] = gpu_va;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -402,7 +400,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) {
|
||||
err = nvgpu_gr_ctx_map_ctx_buffer(g,
|
||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER,
|
||||
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA,
|
||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER_VA,
|
||||
gr_ctx, global_ctx_buffer, vm);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g,
|
||||
@@ -416,7 +414,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
||||
/* Priv register Access Map */
|
||||
err = nvgpu_gr_ctx_map_ctx_buffer(g,
|
||||
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP,
|
||||
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA,
|
||||
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_VA,
|
||||
gr_ctx, global_ctx_buffer, vm);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "cannot map ctx priv access buffer");
|
||||
@@ -428,7 +426,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
||||
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) {
|
||||
err = nvgpu_gr_ctx_map_ctx_buffer(g,
|
||||
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER,
|
||||
NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA,
|
||||
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER_VA,
|
||||
gr_ctx, global_ctx_buffer, vm);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "cannot map ctx fecs trace buffer");
|
||||
@@ -437,8 +435,6 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
||||
}
|
||||
#endif
|
||||
|
||||
gr_ctx->global_ctx_buffer_mapped = true;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done");
|
||||
return 0;
|
||||
|
||||
@@ -518,7 +514,7 @@ void nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
|
||||
g->allow_all);
|
||||
g->ops.gr.ctxsw_prog.set_priv_access_map_addr(g, mem,
|
||||
nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA));
|
||||
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_VA));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -150,19 +150,13 @@ struct nvgpu_gr_ctx {
|
||||
* Array to store GPU virtual addresses of all global context
|
||||
* buffers.
|
||||
*/
|
||||
u64 global_ctx_buffer_va[NVGPU_GR_CTX_VA_COUNT];
|
||||
u64 global_ctx_buffer_va[NVGPU_GR_GLOBAL_CTX_VA_COUNT];
|
||||
|
||||
/**
|
||||
* Array to store indexes of global context buffers
|
||||
* corresponding to GPU virtual addresses above.
|
||||
*/
|
||||
u32 global_ctx_buffer_index[NVGPU_GR_CTX_VA_COUNT];
|
||||
|
||||
/**
|
||||
* Flag to indicate if global context buffers are mapped and
|
||||
* #global_ctx_buffer_va array is populated.
|
||||
*/
|
||||
bool global_ctx_buffer_mapped;
|
||||
u32 global_ctx_buffer_index[NVGPU_GR_GLOBAL_CTX_VA_COUNT];
|
||||
|
||||
/**
|
||||
* TSG identifier corresponding to the graphics context.
|
||||
|
||||
@@ -637,7 +637,7 @@ int nvgpu_gr_fecs_trace_bind_channel(struct gk20a *g,
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) {
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA);
|
||||
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER_VA);
|
||||
nvgpu_log(g, gpu_dbg_ctxsw, "gpu_va=%llx", addr);
|
||||
aperture_mask = 0;
|
||||
} else {
|
||||
|
||||
@@ -352,7 +352,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
|
||||
/* global pagepool buffer */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_PAGEPOOL_VA);
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA);
|
||||
size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size(
|
||||
global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL));
|
||||
@@ -362,7 +362,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
|
||||
/* global bundle cb */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_CIRCULAR_VA);
|
||||
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA);
|
||||
size = nvgpu_safe_cast_u64_to_u32(
|
||||
g->ops.gr.init.get_bundle_cb_default_size(g));
|
||||
|
||||
@@ -371,7 +371,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
|
||||
/* global attrib cb */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_ATTRIBUTE_VA);
|
||||
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA);
|
||||
|
||||
g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx,
|
||||
nvgpu_gr_config_get_tpc_count(config),
|
||||
@@ -384,7 +384,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
if (g->ops.gr.init.commit_rtv_cb != NULL) {
|
||||
/* RTV circular buffer */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA);
|
||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER_VA);
|
||||
|
||||
g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch);
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
|
||||
/* set priv access map */
|
||||
g->ops.gr.ctxsw_prog.set_priv_access_map_addr(g, ctxheader,
|
||||
nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA));
|
||||
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_VA));
|
||||
#endif
|
||||
|
||||
g->ops.gr.ctxsw_prog.set_patch_addr(g, ctxheader,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -108,26 +108,6 @@ struct zcull_ctx_desc;
|
||||
#define NVGPU_GR_CTX_COUNT 3U
|
||||
#endif
|
||||
|
||||
/*
|
||||
* either ATTRIBUTE or ATTRIBUTE_VPR maps to NVGPU_GR_CTX_ATTRIBUTE_VA.
|
||||
*/
|
||||
/** S/W defined index for circular context buffer virtual address. */
|
||||
#define NVGPU_GR_CTX_CIRCULAR_VA 0U
|
||||
/** S/W defined index for pagepool context buffer virtual address. */
|
||||
#define NVGPU_GR_CTX_PAGEPOOL_VA 1U
|
||||
/** S/W defined index for attribute context buffer virtual address. */
|
||||
#define NVGPU_GR_CTX_ATTRIBUTE_VA 2U
|
||||
/** S/W defined index for access map buffer virtual address. */
|
||||
#define NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA 3U
|
||||
/** S/W defined index for RTV circular context buffer virtual address. */
|
||||
#define NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA 4U
|
||||
#ifdef CONFIG_NVGPU_FECS_TRACE
|
||||
/** S/W defined index for fecs trace buffer virtual address. */
|
||||
#define NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA 5U
|
||||
#endif
|
||||
/** Number of context buffer virtual addresses. */
|
||||
#define NVGPU_GR_CTX_VA_COUNT 6U
|
||||
|
||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||
/* PM Context Switch Modes */
|
||||
/** This mode says that the pms are not to be context switched. */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -87,6 +87,26 @@ typedef void (*global_ctx_mem_destroy_fn)(struct gk20a *g,
|
||||
/** Number of global context buffers. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_COUNT 9U
|
||||
|
||||
/*
|
||||
* either ATTRIBUTE or ATTRIBUTE_VPR maps to NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA.
|
||||
*/
|
||||
/** S/W defined index for circular context buffer virtual address. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_CIRCULAR_VA 0U
|
||||
/** S/W defined index for pagepool context buffer virtual address. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VA 1U
|
||||
/** S/W defined index for attribute context buffer virtual address. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VA 2U
|
||||
/** S/W defined index for access map buffer virtual address. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_VA 3U
|
||||
/** S/W defined index for RTV circular context buffer virtual address. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER_VA 4U
|
||||
#ifdef CONFIG_NVGPU_FECS_TRACE
|
||||
/** S/W defined index for fecs trace buffer virtual address. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER_VA 5U
|
||||
#endif
|
||||
/** Number of context buffer virtual addresses. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_VA_COUNT 6U
|
||||
|
||||
/**
|
||||
* @brief Initialize global context descriptor structure.
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user