Files
linux-nvgpu/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
Deepak Nibade 45e1207223 gpu: nvgpu: add common.gr.obj_ctx apis to initialize/set preemption mode
These HALs are used to initialize and set preeemption modes
g->ops.gr.init_ctxsw_preemption_mode()
g->ops.gr.set_ctxsw_preemption_mode()
g->ops.gr.update_ctxsw_preemption_mode()

They are all h/w independent except for the functional support for
GFXP/CILP preemption support which is only present on gp10b+ chips

Add a characteristics flag NVGPU_SUPPORT_PREEMPTION_GFXP for these
preemption modes and set this flag for gp10b+ chips

Use this flag and unify all above HALs into below common functions
nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode()
nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode()
nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode()

vGPU specific code also directly calls below vGPU specific APIs
vgpu_gr_init_ctxsw_preemption_mode()
vgpu_gr_set_ctxsw_preemption_mode()

g->ops.gr.update_ctxsw_preemption_mode() is not needed for vGPU since
it is handled by vserver

Above g->ops.gr.*_ctxsw_preemption_mode() HALs are no more required
hence delete them

Jira NVGPU-1887

Change-Id: I9b3164bcf01e5e3c27e52369c9364e0ee23a9662
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2088507
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2019-04-04 11:35:09 -07:00

90 lines
4.0 KiB
C

/*
* GM20B GPC MMU
*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_GM20B_GR_GM20B_H
#define NVGPU_GM20B_GR_GM20B_H
struct gk20a;
struct nvgpu_warpstate;
#define MAXWELL_B 0xB197U
#define MAXWELL_COMPUTE_B 0xB1C0U
#define KEPLER_INLINE_TO_MEMORY_B 0xA140U
#define MAXWELL_DMA_COPY_A 0xB0B5U
#define MAXWELL_CHANNEL_GPFIFO_A 0xB06FU
#define NVB197_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc
#define NVB197_SET_CIRCULAR_BUFFER_SIZE 0x1280
#define NVB197_SET_SHADER_EXCEPTIONS 0x1528
#define NVB197_SET_RD_COALESCE 0x102c
#define NVB1C0_SET_SHADER_EXCEPTIONS 0x1528
#define NVB1C0_SET_RD_COALESCE 0x0228
#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE U32(0)
int gm20b_gr_tpc_disable_override(struct gk20a *g, u32 mask);
int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, bool patch);
int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data);
void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data);
void gr_gm20b_set_hww_esr_report_mask(struct gk20a *g);
bool gr_gm20b_is_valid_class(struct gk20a *g, u32 class_num);
bool gr_gm20b_is_valid_gfx_class(struct gk20a *g, u32 class_num);
bool gr_gm20b_is_valid_compute_class(struct gk20a *g, u32 class_num);
void gr_gm20b_init_sm_dsm_reg_info(void);
void gr_gm20b_get_sm_dsm_perf_regs(struct gk20a *g,
u32 *num_sm_dsm_perf_regs,
u32 **sm_dsm_perf_regs,
u32 *perf_register_stride);
void gr_gm20b_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
u32 *num_sm_dsm_perf_ctrl_regs,
u32 **sm_dsm_perf_ctrl_regs,
u32 *ctrl_register_stride);
void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
void gr_gm20b_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset);
bool gr_gm20b_is_tpc_addr(struct gk20a *g, u32 addr);
u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr);
void gr_gm20b_detect_sm_arch(struct gk20a *g);
int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
struct gk20a_debug_output *o);
int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
bool enable);
u32 *gr_gm20b_rop_l2_en_mask(struct gk20a *g);
void gr_gm20b_init_cyclestats(struct gk20a *g);
void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state);
int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc,
u32 tpc, u32 sm, struct channel_gk20a *fault_ch);
int gm20b_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id);
int gr_gm20b_get_preemption_mode_flags(struct gk20a *g,
struct nvgpu_preemption_modes_rec *preemption_modes_rec);
void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
u32 global_esr);
u32 gr_gm20b_get_pmm_per_chiplet_offset(void);
void gm20b_gr_set_debug_mode(struct gk20a *g, bool enable);
#endif /* NVGPU_GM20B_GR_GM20B_H */