Files
linux-nvgpu/drivers/gpu/nvgpu/include/nvgpu/gr/ctx.h
Deepak Nibade fe27a7f934 gpu: nvgpu: add gr/ctx and gr/subctx APIs to set hwpm ctxsw mode
gr_gk20a_update_hwpm_ctxsw_mode() right now validates the incoming
hwpm mode, checks if it is already set, and if not, it will go ahead
and set the new hwpm mode by calling g->ops.gr.ctxsw_prog HALs

Instead of programming hwpm mode in gr_gk20a.c, move the programming
to gr/ctx and gr/subctx units by adding below APIs
nvgpu_gr_ctx_prepare_hwpm_mode() - validate the incoming mode and
                                   check if it is already set
nvgpu_gr_ctx_set_hwpm_mode() - set pm mode in graphics context
nvgpu_gr_subctx_set_hwpm_mode() - set pm mode in subcontext

Add gpu_va field to struct pm_ctx_desc to store the gpu_va to be
programmed into context

Rename NVGPU_DBG_HWPM_CTXSW_MODE_* to NVGPU_GR_CTX_HWPM_CTXSW_MODE_*
and move them to gr/ctx.h

Remove below HALs since they are no longer used
g->ops.gr.ctxsw_prog.set_pm_mode_no_ctxsw()
g->ops.gr.ctxsw_prog.set_pm_mode_ctxsw()
g->ops.gr.ctxsw_prog.set_pm_mode_stream_out_ctxsw()

Jira NVGPU-1527
Jira NVGPU-1613

Change-Id: Id2a4d498182ec0e3586dc7265f73a25870ca2ef7
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2011093
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2019-02-11 10:25:34 -08:00

204 lines
6.2 KiB
C

/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_INCLUDE_GR_CTX_H
#define NVGPU_INCLUDE_GR_CTX_H
#include <nvgpu/types.h>
#include <nvgpu/nvgpu_mem.h>
/*
* allocate a minimum of 1 page (4KB) worth of patch space, this is 512 entries
* of address and data pairs
*/
#define PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY 2U
#define PATCH_CTX_SLOTS_PER_PAGE \
(PAGE_SIZE/(PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY * (u32)sizeof(u32)))
#define PATCH_CTX_ENTRIES_FROM_SIZE(size) ((size)/sizeof(u32))
struct gk20a;
struct vm_gk20a;
enum nvgpu_gr_ctx_index {
NVGPU_GR_CTX_CTX = 0,
NVGPU_GR_CTX_PM_CTX ,
NVGPU_GR_CTX_PATCH_CTX ,
NVGPU_GR_CTX_PREEMPT_CTXSW ,
NVGPU_GR_CTX_SPILL_CTXSW ,
NVGPU_GR_CTX_BETACB_CTXSW ,
NVGPU_GR_CTX_PAGEPOOL_CTXSW ,
NVGPU_GR_CTX_GFXP_RTVCB_CTXSW ,
NVGPU_GR_CTX_COUNT
};
/*
* either ATTRIBUTE or ATTRIBUTE_VPR maps to NVGPU_GR_CTX_ATTRIBUTE_VA
*/
enum nvgpu_gr_ctx_global_ctx_va {
NVGPU_GR_CTX_CIRCULAR_VA = 0,
NVGPU_GR_CTX_PAGEPOOL_VA = 1,
NVGPU_GR_CTX_ATTRIBUTE_VA = 2,
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA = 3,
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA = 4,
NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA = 5,
NVGPU_GR_CTX_VA_COUNT = 6
};
struct patch_desc {
struct nvgpu_mem mem;
u32 data_count;
};
struct zcull_ctx_desc {
u64 gpu_va;
u32 ctx_sw_mode;
};
/* PM Context Switch Mode */
/*This mode says that the pms are not to be context switched. */
#define NVGPU_GR_CTX_HWPM_CTXSW_MODE_NO_CTXSW (0x00000000U)
/* This mode says that the pms in Mode-B are to be context switched */
#define NVGPU_GR_CTX_HWPM_CTXSW_MODE_CTXSW (0x00000001U)
/* This mode says that the pms in Mode-E (stream out) are to be context switched. */
#define NVGPU_GR_CTX_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW (0x00000002U)
struct pm_ctx_desc {
struct nvgpu_mem mem;
u64 gpu_va;
u32 pm_mode;
};
struct nvgpu_gr_ctx_desc {
u32 size[NVGPU_GR_CTX_COUNT];
};
struct nvgpu_gr_ctx {
u32 ctx_id;
bool ctx_id_valid;
struct nvgpu_mem mem;
struct nvgpu_mem preempt_ctxsw_buffer;
struct nvgpu_mem spill_ctxsw_buffer;
struct nvgpu_mem betacb_ctxsw_buffer;
struct nvgpu_mem pagepool_ctxsw_buffer;
struct nvgpu_mem gfxp_rtvcb_ctxsw_buffer;
struct patch_desc patch_ctx;
struct zcull_ctx_desc zcull_ctx;
struct pm_ctx_desc pm_ctx;
u32 graphics_preempt_mode;
u32 compute_preempt_mode;
bool golden_img_loaded;
bool cilp_preempt_pending;
bool boosted_ctx;
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
u64 virt_ctx;
#endif
u64 global_ctx_buffer_va[NVGPU_GR_CTX_VA_COUNT];
int global_ctx_buffer_index[NVGPU_GR_CTX_VA_COUNT];
bool global_ctx_buffer_mapped;
u32 tsgid;
};
struct nvgpu_gr_ctx_desc *
nvgpu_gr_ctx_desc_alloc(struct gk20a *g);
void nvgpu_gr_ctx_desc_free(struct gk20a *g,
struct nvgpu_gr_ctx_desc *desc);
void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc,
enum nvgpu_gr_ctx_index index, u32 size);
int nvgpu_gr_ctx_alloc(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct vm_gk20a *vm);
void nvgpu_gr_ctx_free(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
struct vm_gk20a *vm);
int nvgpu_gr_ctx_alloc_pm_ctx(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct vm_gk20a *vm,
u64 gpu_va);
void nvgpu_gr_ctx_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx);
int nvgpu_gr_ctx_alloc_patch_ctx(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct vm_gk20a *vm);
void nvgpu_gr_ctx_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx);
void nvgpu_gr_ctx_set_zcull_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
u32 mode, u64 gpu_va);
int nvgpu_gr_ctx_alloc_ctxsw_buffers(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct vm_gk20a *vm);
int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
struct vm_gk20a *vm, bool vpr);
u64 nvgpu_gr_ctx_get_global_ctx_va(struct nvgpu_gr_ctx *gr_ctx,
enum nvgpu_gr_ctx_global_ctx_va index);
int nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_global_ctx_local_golden_image *local_golden_image,
bool cde);
int nvgpu_gr_ctx_patch_write_begin(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
bool update_patch_count);
void nvgpu_gr_ctx_patch_write_end(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
bool update_patch_count);
void nvgpu_gr_ctx_patch_write(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
u32 addr, u32 data, bool patch);
u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
int nvgpu_gr_ctx_init_zcull(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
int nvgpu_gr_ctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
bool set_zcull_ptr);
int nvgpu_gr_ctx_set_smpc_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
bool enable);
int nvgpu_gr_ctx_prepare_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
u32 mode, bool *skip_update);
int nvgpu_gr_ctx_set_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
bool set_pm_ptr);
#endif /* NVGPU_INCLUDE_GR_CTX_H */