mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: Conditional enable for replayable fault
Enable replayable fault only for contexts where they are requested. This required moving the code to initialize subcontexts to happen later. Fix signedness issues in definition of flags. JIRA NVGPU-714 Change-Id: I472004e13b1ea46c1bd202f9b12d2ce221b756f9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1773262 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
bbebc611bc
commit
0ddd219697
@@ -42,18 +42,18 @@ struct fifo_profile_gk20a;
|
||||
#include "fence_gk20a.h"
|
||||
|
||||
/* Flags to be passed to gk20a_channel_alloc_gpfifo() */
|
||||
#define NVGPU_GPFIFO_FLAGS_SUPPORT_VPR (1 << 0)
|
||||
#define NVGPU_GPFIFO_FLAGS_SUPPORT_DETERMINISTIC (1 << 1)
|
||||
#define NVGPU_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE (1 << 2)
|
||||
#define NVGPU_GPFIFO_FLAGS_USERMODE_SUPPORT (1 << 3)
|
||||
#define NVGPU_GPFIFO_FLAGS_SUPPORT_VPR (1U << 0U)
|
||||
#define NVGPU_GPFIFO_FLAGS_SUPPORT_DETERMINISTIC (1U << 1U)
|
||||
#define NVGPU_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE (1U << 2U)
|
||||
#define NVGPU_GPFIFO_FLAGS_USERMODE_SUPPORT (1U << 3U)
|
||||
|
||||
/* Flags to be passed to nvgpu_submit_channel_gpfifo() */
|
||||
#define NVGPU_SUBMIT_FLAGS_FENCE_WAIT (1 << 0)
|
||||
#define NVGPU_SUBMIT_FLAGS_FENCE_GET (1 << 1)
|
||||
#define NVGPU_SUBMIT_FLAGS_HW_FORMAT (1 << 2)
|
||||
#define NVGPU_SUBMIT_FLAGS_SYNC_FENCE (1 << 3)
|
||||
#define NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI (1 << 4)
|
||||
#define NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING (1 << 5)
|
||||
#define NVGPU_SUBMIT_FLAGS_FENCE_WAIT (1U << 0U)
|
||||
#define NVGPU_SUBMIT_FLAGS_FENCE_GET (1U << 1U)
|
||||
#define NVGPU_SUBMIT_FLAGS_HW_FORMAT (1U << 2U)
|
||||
#define NVGPU_SUBMIT_FLAGS_SYNC_FENCE (1U << 3U)
|
||||
#define NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI (1U << 4U)
|
||||
#define NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING (1U << 5U)
|
||||
|
||||
/*
|
||||
* The binary format of 'struct nvgpu_channel_fence' introduced here
|
||||
|
||||
@@ -138,11 +138,17 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
|
||||
struct gk20a *g = c->g;
|
||||
struct nvgpu_mem *mem = &c->inst_block;
|
||||
u32 data;
|
||||
bool replayable = false;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
|
||||
|
||||
if ((flags & NVGPU_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE) != 0) {
|
||||
replayable = true;
|
||||
}
|
||||
gv11b_init_subcontext_pdb(c->vm, mem, replayable);
|
||||
|
||||
nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
|
||||
pbdma_gp_base_offset_f(
|
||||
u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
|
||||
|
||||
@@ -61,7 +61,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
|
||||
if (big_page_size && g->ops.mm.set_big_page_size)
|
||||
g->ops.mm.set_big_page_size(g, inst_block, big_page_size);
|
||||
|
||||
gv11b_init_subcontext_pdb(vm, inst_block);
|
||||
gv11b_init_subcontext_pdb(vm, inst_block, false);
|
||||
}
|
||||
|
||||
bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
|
||||
|
||||
@@ -37,7 +37,8 @@
|
||||
static void gv11b_subctx_commit_valid_mask(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *inst_block);
|
||||
static void gv11b_subctx_commit_pdb(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *inst_block);
|
||||
struct nvgpu_mem *inst_block,
|
||||
bool replayable);
|
||||
|
||||
void gv11b_free_subctx_header(struct channel_gk20a *c)
|
||||
{
|
||||
@@ -84,9 +85,10 @@ int gv11b_alloc_subctx_header(struct channel_gk20a *c)
|
||||
}
|
||||
|
||||
void gv11b_init_subcontext_pdb(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *inst_block)
|
||||
struct nvgpu_mem *inst_block,
|
||||
bool replayable)
|
||||
{
|
||||
gv11b_subctx_commit_pdb(vm, inst_block);
|
||||
gv11b_subctx_commit_pdb(vm, inst_block, replayable);
|
||||
gv11b_subctx_commit_valid_mask(vm, inst_block);
|
||||
|
||||
}
|
||||
@@ -157,8 +159,9 @@ void gv11b_subctx_commit_valid_mask(struct vm_gk20a *vm,
|
||||
nvgpu_mem_wr32(g, inst_block, 167, 0xffffffff);
|
||||
}
|
||||
|
||||
void gv11b_subctx_commit_pdb(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *inst_block)
|
||||
static void gv11b_subctx_commit_pdb(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *inst_block,
|
||||
bool replayable)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
u32 lo, hi;
|
||||
@@ -179,11 +182,16 @@ void gv11b_subctx_commit_pdb(struct vm_gk20a *vm,
|
||||
aperture, 0) |
|
||||
ram_in_sc_page_dir_base_vol_f(
|
||||
ram_in_sc_page_dir_base_vol_true_v(), 0) |
|
||||
ram_in_sc_page_dir_base_fault_replay_tex_f(1, 0) |
|
||||
ram_in_sc_page_dir_base_fault_replay_gcc_f(1, 0) |
|
||||
ram_in_sc_use_ver2_pt_format_f(1, 0) |
|
||||
ram_in_sc_big_page_size_f(1, 0) |
|
||||
ram_in_sc_page_dir_base_lo_0_f(pdb_addr_lo);
|
||||
|
||||
if (replayable) {
|
||||
format_word |=
|
||||
ram_in_sc_page_dir_base_fault_replay_tex_f(1, 0) |
|
||||
ram_in_sc_page_dir_base_fault_replay_gcc_f(1, 0);
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_info, " pdb info lo %x hi %x",
|
||||
format_word, pdb_addr_hi);
|
||||
for (subctx_id = 0; subctx_id < max_subctx_count; subctx_id++) {
|
||||
|
||||
@@ -32,6 +32,7 @@ void gv11b_free_subctx_header(struct channel_gk20a *c);
|
||||
int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va);
|
||||
|
||||
void gv11b_init_subcontext_pdb(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *inst_block);
|
||||
struct nvgpu_mem *inst_block,
|
||||
bool replayable);
|
||||
|
||||
#endif /* __SUBCONTEXT_GV11B_H__ */
|
||||
|
||||
Reference in New Issue
Block a user