mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
- rename vgpu_gr_gm20b_init_cyclestats() to vgpu_gr_init_cyclestats()
moving to gr_vgpu.c common to all vgpu chips.
- rename vgpu_gr_gp10b_init_ctxsw_preemption_mode() to
vgpu_gr_init_ctxsw_preemption_mode() moving to ctx_vgpu.c common
to all vgpu chips.
- rename vgpu_gr_gp10b_set_ctxsw_preemption_mode() to
vgpu_gr_set_ctxsw_preemption_mode() moving to ctx_vgpu.c common
to all vgpu chips.
- rename vgpu_gr_gp10b_set_preemption_mode() to
vgpu_gr_set_preemption_mode() moving to ctx_vgpu.c common
to all vgpu chips.
- rename vgpu_gr_gp10b_init_ctx_state() to vgpu_gr_init_ctx_state()
moving to ctx_vgpu.c common to all vgpu chips.
- combine vgpu_gr_gv11b_commit_ins() to vgpu_gr_commit_inst()
executing alloc/free subctx header code only if chip supports
subctx.
- remove inclusion of hw header files from vgpu gr code by
introducing hal ops for the following:
- alloc_global_ctx_buffers:
- hal op for getting global ctx cb buffer
- hal op for getting global ctx pagepool buffer size
- set_ctxsw_preemption_mode:
- hal op for getting ctx spill size
- hal op for getting ctx pagepool size
- hal op for getting ctx betacb size
- hal op for getting ctx attrib cb size
These chip specific function definitions are currently implemented in
chip specific gr files which will need to be moved to hal units.
Also use these hal ops for corresponding functions for native. This
makes gr_gv11b_set_ctxsw_preemption_mode() function redundant. Use
gr_gp10b_set_ctxsw_preemption_mode() for gv11b as well.
Jira GVSCI-334
Change-Id: I60be86f932e555176a972c125e3ea31270e6cba7
Signed-off-by: Aparna Das <aparnad@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2025428
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
96 lines
3.9 KiB
C
96 lines
3.9 KiB
C
/*
|
|
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
* DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#ifndef NVGPU_GR_VGPU_H
|
|
#define NVGPU_GR_VGPU_H
|
|
|
|
#include <nvgpu/types.h>
|
|
|
|
struct gk20a;
|
|
struct channel_gk20a;
|
|
struct gr_gk20a;
|
|
struct gr_zcull_info;
|
|
struct nvgpu_gr_zbc;
|
|
struct nvgpu_gr_zbc_entry;
|
|
struct nvgpu_gr_zbc_query_params;
|
|
struct dbg_session_gk20a;
|
|
struct tsg_gk20a;
|
|
struct vm_gk20a;
|
|
struct nvgpu_gr_ctx;
|
|
|
|
void vgpu_gr_detect_sm_arch(struct gk20a *g);
|
|
int vgpu_gr_init_ctx_state(struct gk20a *g);
|
|
int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g);
|
|
void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
|
|
void vgpu_gr_free_tsg_ctx(struct tsg_gk20a *tsg);
|
|
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
|
|
int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
|
|
struct channel_gk20a *c, u64 zcull_va,
|
|
u32 mode);
|
|
int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
|
|
struct gr_zcull_info *zcull_params);
|
|
u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config,
|
|
u32 gpc_index);
|
|
u32 vgpu_gr_get_max_fbps_count(struct gk20a *g);
|
|
u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g);
|
|
u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g);
|
|
u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g);
|
|
u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g);
|
|
int vgpu_gr_add_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
|
|
struct nvgpu_gr_zbc_entry *zbc_val);
|
|
int vgpu_gr_query_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
|
|
struct nvgpu_gr_zbc_query_params *query_params);
|
|
int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
|
|
struct channel_gk20a *ch, bool enable);
|
|
int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
|
|
struct channel_gk20a *ch, u64 sms, bool enable);
|
|
int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
|
|
struct channel_gk20a *ch, u64 gpu_va, u32 mode);
|
|
int vgpu_gr_clear_sm_error_state(struct gk20a *g,
|
|
struct channel_gk20a *ch, u32 sm_id);
|
|
int vgpu_gr_suspend_contexts(struct gk20a *g,
|
|
struct dbg_session_gk20a *dbg_s,
|
|
int *ctx_resident_ch_fd);
|
|
int vgpu_gr_resume_contexts(struct gk20a *g,
|
|
struct dbg_session_gk20a *dbg_s,
|
|
int *ctx_resident_ch_fd);
|
|
int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va);
|
|
int vgpu_gr_init_sm_id_table(struct gk20a *g);
|
|
int vgpu_gr_init_fs_state(struct gk20a *g);
|
|
int vgpu_gr_update_pc_sampling(struct channel_gk20a *ch, bool enable);
|
|
void vgpu_gr_init_cyclestats(struct gk20a *g);
|
|
int vgpu_gr_init_ctxsw_preemption_mode(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct vm_gk20a *vm,
|
|
u32 class,
|
|
u32 flags);
|
|
int vgpu_gr_set_ctxsw_preemption_mode(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct vm_gk20a *vm, u32 class,
|
|
u32 graphics_preempt_mode,
|
|
u32 compute_preempt_mode);
|
|
int vgpu_gr_set_preemption_mode(struct channel_gk20a *ch,
|
|
u32 graphics_preempt_mode,
|
|
u32 compute_preempt_mode);
|
|
|
|
#endif /* NVGPU_GR_VGPU_H */
|