gpu: nvgpu: remove nvgpu_next files

Remove all nvgpu_next files and move the code into corresponding
nvgpu files.

Merge nvgpu-next-*.yaml into nvgpu-.yaml files.

Jira NVGPU-4771

Change-Id: I595311be3c7bbb4f6314811e68712ff01763801e
Signed-off-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2547557
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Antony Clince Alex
2021-06-19 10:17:51 +00:00
committed by mobile promotions
parent c7d43f5292
commit f9cac0c64d
126 changed files with 2351 additions and 4554 deletions

View File

@@ -23,10 +23,72 @@
#ifndef NVGPU_CIC_H
#define NVGPU_CIC_H
#include <nvgpu/types.h>
#include <nvgpu/static_analysis.h>
#include <nvgpu/log.h>
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_cic.h"
#define U32_BITS 32U
#define DIV_BY_U32_BITS(x) ((x) / U32_BITS)
#define MOD_BY_U32_BITS(x) ((x) % U32_BITS)
#define RESET_ID_TO_REG_IDX(x) DIV_BY_U32_BITS((x))
#define RESET_ID_TO_REG_BIT(x) MOD_BY_U32_BITS((x))
#define RESET_ID_TO_REG_MASK(x) BIT32(RESET_ID_TO_REG_BIT((x)))
#define GPU_VECTOR_TO_LEAF_REG(i) DIV_BY_U32_BITS((i))
#define GPU_VECTOR_TO_LEAF_BIT(i) MOD_BY_U32_BITS((i))
#define GPU_VECTOR_TO_LEAF_MASK(i) (BIT32(GPU_VECTOR_TO_LEAF_BIT(i)))
#define GPU_VECTOR_TO_SUBTREE(i) ((GPU_VECTOR_TO_LEAF_REG(i)) / 2U)
#define GPU_VECTOR_TO_LEAF_SHIFT(i) \
(nvgpu_safe_mult_u32(((GPU_VECTOR_TO_LEAF_REG(i)) % 2U), 32U))
#define HOST2SOC_0_SUBTREE 0U
#define HOST2SOC_1_SUBTREE 1U
#define HOST2SOC_2_SUBTREE 2U
#define HOST2SOC_3_SUBTREE 3U
#define HOST2SOC_NUM_SUBTREE 4U
#define HOST2SOC_SUBTREE_TO_TOP_IDX(i) ((i) / 32U)
#define HOST2SOC_SUBTREE_TO_TOP_BIT(i) ((i) % 32U)
#define HOST2SOC_SUBTREE_TO_LEAF0(i) \
(nvgpu_safe_mult_u32((i), 2U))
#define HOST2SOC_SUBTREE_TO_LEAF1(i) \
(nvgpu_safe_add_u32((nvgpu_safe_mult_u32((i), 2U)), 1U))
#define STALL_SUBTREE_TOP_IDX 0U
#define STALL_SUBTREE_TOP_BITS \
((BIT32(HOST2SOC_SUBTREE_TO_TOP_BIT(HOST2SOC_1_SUBTREE))) | \
(BIT32(HOST2SOC_SUBTREE_TO_TOP_BIT(HOST2SOC_2_SUBTREE))) | \
(BIT32(HOST2SOC_SUBTREE_TO_TOP_BIT(HOST2SOC_3_SUBTREE))))
/**
* These should not contradict NVGPU_CIC_INTR_UNIT_* defines.
*/
#define NVGPU_CIC_INTR_UNIT_MMU_FAULT_ECC_ERROR 10U
#define NVGPU_CIC_INTR_UNIT_MMU_NON_REPLAYABLE_FAULT_ERROR 11U
#define NVGPU_CIC_INTR_UNIT_MMU_REPLAYABLE_FAULT_ERROR 12U
#define NVGPU_CIC_INTR_UNIT_MMU_NON_REPLAYABLE_FAULT 13U
#define NVGPU_CIC_INTR_UNIT_MMU_REPLAYABLE_FAULT 14U
#define NVGPU_CIC_INTR_UNIT_MMU_INFO_FAULT 15U
#define NVGPU_CIC_INTR_UNIT_RUNLIST_TREE_0 16U
#define NVGPU_CIC_INTR_UNIT_RUNLIST_TREE_1 17U
#define NVGPU_CIC_INTR_UNIT_GR_STALL 18U
#define NVGPU_CIC_INTR_UNIT_CE_STALL 19U
#define NVGPU_CIC_INTR_UNIT_MAX 20U
#define NVGPU_CIC_INTR_VECTORID_SIZE_MAX 32U
#define NVGPU_CIC_INTR_VECTORID_SIZE_ONE 1U
#define RUNLIST_INTR_TREE_0 0U
#define RUNLIST_INTR_TREE_1 1U
void nvgpu_cic_intr_unit_vectorid_init(struct gk20a *g, u32 unit, u32 *vectorid,
u32 num_entries);
bool nvgpu_cic_intr_is_unit_info_valid(struct gk20a *g, u32 unit);
bool nvgpu_cic_intr_get_unit_info(struct gk20a *g, u32 unit, u32 *subtree,
u64 *subtree_mask);
#endif
struct nvgpu_err_desc;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -103,6 +103,16 @@
#define TURING_CHANNEL_GPFIFO_A 0xC46FU
#define TURING_COMPUTE_A 0xC5C0U
#define TURING_DMA_COPY_A 0xC5B5U
#define AMPERE_SMC_PARTITION_REF 0xC637U
#define AMPERE_B 0xC797U
#define AMPERE_A 0xC697U
#define AMPERE_DMA_COPY_A 0xC6B5U
#define AMPERE_DMA_COPY_B 0xC7B5U
#define AMPERE_COMPUTE_A 0xC6C0U
#define AMPERE_COMPUTE_B 0xC7C0U
#define AMPERE_CHANNEL_GPFIFO_A 0xC56FU
#define AMPERE_CHANNEL_GPFIFO_B 0xC76FU
#endif
#endif /* NVGPU_CLASS_H */

View File

@@ -31,10 +31,7 @@
#include <nvgpu/types.h>
#include <nvgpu/list.h>
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_device.h"
#endif
#include <nvgpu/pbdma.h>
struct gk20a;
@@ -93,6 +90,38 @@ struct gk20a;
#define NVGPU_DEVICE_TOKEN_INIT 0U
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_HAL_NON_FUSA)
struct nvgpu_device_next {
/**
* True if the device is an method engine behind host.
*/
bool engine;
/**
* Runlist Engine ID; only valid if #engine is true.
*/
u32 rleng_id;
/**
* Runlist PRI base - byte aligned based address. CHRAM offset can
* be computed from this.
*/
u32 rl_pri_base;
/**
* PBDMA info for this device. It may contain multiple PBDMAs as
* there can now be multiple PBDMAs per runlist.
*
* This is in some ways awkward; devices seem to be more directly
* linked to runlists; runlists in turn have PBDMAs. Granted that
* means there's a computable relation between devices and PBDMAs
* it may make sense to not have this link.
*/
struct nvgpu_next_pbdma_info pbdma_info;
};
#endif
/**
* Structure definition for storing information for the devices and the engines
* available on the chip.

View File

@@ -173,7 +173,10 @@ struct nvgpu_ecc {
/** SM icache uncorrected error count. */
struct nvgpu_ecc_stat **sm_icache_ecc_uncorrected_err_count;
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/gr/nvgpu_next_gr_ecc.h"
/** SM RAMS corrected error count. */
struct nvgpu_ecc_stat **sm_rams_ecc_corrected_err_count;
/** SM RAMS uncorrected error count. */
struct nvgpu_ecc_stat **sm_rams_ecc_uncorrected_err_count;
#endif
/** GCC l1.5-cache corrected error count. */
@@ -226,7 +229,18 @@ struct nvgpu_ecc {
/** hubmmu fillunit uncorrected error count. */
struct nvgpu_ecc_stat *mmu_fillunit_ecc_uncorrected_err_count;
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_ecc.h"
/* Leave extra tab to fit into nvgpu_ecc.fb structure */
struct nvgpu_ecc_stat *mmu_l2tlb_ecc_corrected_unique_err_count;
/** hubmmu l2tlb uncorrected unique error count. */
struct nvgpu_ecc_stat *mmu_l2tlb_ecc_uncorrected_unique_err_count;
/** hubmmu hubtlb corrected unique error count. */
struct nvgpu_ecc_stat *mmu_hubtlb_ecc_corrected_unique_err_count;
/** hubmmu hubtlb uncorrected unique error count. */
struct nvgpu_ecc_stat *mmu_hubtlb_ecc_uncorrected_unique_err_count;
/** hubmmu fillunit corrected unique error count. */
struct nvgpu_ecc_stat *mmu_fillunit_ecc_corrected_unique_err_count;
/** hubmmu fillunit uncorrected unique error count. */
struct nvgpu_ecc_stat *mmu_fillunit_ecc_uncorrected_unique_err_count;
#endif
} fb;

View File

@@ -23,10 +23,6 @@
#ifndef NVGPU_ENGINE_STATUS_H
#define NVGPU_ENGINE_STATUS_H
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_engine_status.h"
#endif
/**
* @file
*
@@ -95,13 +91,20 @@ enum nvgpu_engine_status_ctx_status {
NVGPU_CTX_STATUS_CTXSW_SWITCH,
};
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_HAL_NON_FUSA)
struct nvgpu_next_engine_status_info {
/** Engine status_1 h/w register's read value. */
u32 reg1_data;
};
#endif
struct nvgpu_engine_status_info {
/** Engine status h/w register's read value. */
u32 reg_data;
#if defined(CONFIG_NVGPU_NON_FUSA)
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
/* nvgpu next engine status additions */
struct nvgpu_next_engine_status_info nvgpu_next;
/* Ampere+ engine status additions */
struct nvgpu_next_engine_status_info nvgpu_next;
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#endif
/** Channel or tsg id that is currently assigned to the engine. */

View File

@@ -30,9 +30,15 @@
#include <nvgpu/types.h>
struct gk20a;
struct nvgpu_device;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_engines.h"
#define ENGINE_PBDMA_INSTANCE0 0U
int nvgpu_next_engine_init_one_dev(struct gk20a *g,
const struct nvgpu_device *dev);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -35,7 +35,14 @@ struct gk20a;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_errata.h"
#define ERRATA_FLAGS_NEXT \
/* GA100 */ \
DEFINE_ERRATA(NVGPU_ERRATA_200601972, "GA100", "LTC TSTG"), \
/* GA10B */ \
DEFINE_ERRATA(NVGPU_ERRATA_2969956, "GA10B", "FMODEL FB LTCS"), \
DEFINE_ERRATA(NVGPU_ERRATA_200677649, "GA10B", "UCODE"), \
DEFINE_ERRATA(NVGPU_ERRATA_3154076, "GA10B", "PROD VAL"), \
DEFINE_ERRATA(NVGPU_ERRATA_3288192, "GA10B", "L4 SCF NOT SUPPORTED"),
#else
#define ERRATA_FLAGS_NEXT
#endif

View File

@@ -24,7 +24,44 @@
#define NVGPU_FB_H
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_fb.h"
/* VAB track all accesses (read and write) */
#define NVGPU_VAB_MODE_ACCESS BIT32(0U)
/* VAB track only writes (writes and read-modify-writes) */
#define NVGPU_VAB_MODE_DIRTY BIT32(1U)
/* No change to VAB logging with VPR setting requested */
#define NVGPU_VAB_LOGGING_VPR_NONE 0U
/* VAB logging disabled if vpr IN_USE=1, regardless of PROTECTED_MODE */
#define NVGPU_VAB_LOGGING_VPR_IN_USE_DISABLED BIT32(0U)
/* VAB logging disabled if vpr PROTECTED_MODE=1, regardless of IN_USE */
#define NVGPU_VAB_LOGGING_VPR_PROTECTED_DISABLED BIT32(1U)
/* VAB logging enabled regardless of IN_USE and PROTECTED_MODE */
#define NVGPU_VAB_LOGGING_VPR_ENABLED BIT32(2U)
/* VAB logging disabled regardless of IN_USE and PROTECTED_MODE */
#define NVGPU_VAB_LOGGING_VPR_DISABLED BIT32(3U)
struct nvgpu_vab_range_checker {
/*
* in: starting physical address. Must be aligned by
* 1 << (granularity_shift + bitmask_size_shift) where
* bitmask_size_shift is a HW specific constant.
*/
u64 start_phys_addr;
/* in: log2 of coverage granularity per bit */
u8 granularity_shift;
u8 reserved[7];
};
struct nvgpu_vab {
u32 user_num_range_checkers;
struct nvgpu_mem buffer;
};
int nvgpu_fb_vab_init_hal(struct gk20a *g);
int nvgpu_fb_vab_teardown_hal(struct gk20a *g);
#endif
/**

View File

@@ -33,7 +33,43 @@ struct gk20a;
#include <nvgpu/errno.h>
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_fuse.h"
struct nvgpu_fuse_feature_override_ecc {
/** overide_ecc register feature */
/** sm_lrf enable */
bool sm_lrf_enable;
/** sm_lrf override */
bool sm_lrf_override;
/** sm_l1_data enable */
bool sm_l1_data_enable;
/** sm_l1_data overide */
bool sm_l1_data_override;
/** sm_l1_tag enable */
bool sm_l1_tag_enable;
/** sm_l1_tag overide */
bool sm_l1_tag_override;
/** ltc enable */
bool ltc_enable;
/** ltc overide */
bool ltc_override;
/** dram enable */
bool dram_enable;
/** dram overide */
bool dram_override;
/** sm_cbu enable */
bool sm_cbu_enable;
/** sm_cbu overide */
bool sm_cbu_override;
/** override_ecc_1 register feature */
/** sm_l0_icache enable */
bool sm_l0_icache_enable;
/** sm_l0_icache overide */
bool sm_l0_icache_override;
/** sm_l1_icache enable */
bool sm_l1_icache_enable;
/** sm_l1_icache overide */
bool sm_l1_icache_override;
};
#endif
#define GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK BIT32(0)

View File

@@ -267,6 +267,11 @@ struct railgate_stats {
#define GPU_LIT_MAX_RUNLISTS_SUPPORTED 49
#define GPU_LIT_NUM_LTC_LTS_SETS 50
#define GPU_LIT_NUM_LTC_LTS_WAYS 51
#define GPU_LIT_ROP_IN_GPC_BASE 52
#define GPU_LIT_ROP_IN_GPC_SHARED_BASE 53
#define GPU_LIT_ROP_IN_GPC_PRI_SHARED_IDX 54
#define GPU_LIT_ROP_IN_GPC_STRIDE 55
/** @endcond */
/** Macro to get litter values corresponding to the litter defines. */

View File

@@ -147,7 +147,7 @@ struct gops_ce {
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_ce.h"
void (*intr_retrigger)(struct gk20a *g, u32 inst_id);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -62,7 +62,20 @@ struct gops_cg {
void (*blcg_xbar_load_gating_prod)(struct gk20a *g, bool prod);
void (*blcg_hshub_load_gating_prod)(struct gk20a *g, bool prod);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_cg.h"
void (*slcg_runlist_load_gating_prod)(struct gk20a *g, bool prod);
void (*blcg_runlist_load_gating_prod)(struct gk20a *g, bool prod);
/* Ring station slcg prod gops */
void (*slcg_rs_ctrl_fbp_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_ctrl_gpc_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_ctrl_sys_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_fbp_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_gpc_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_sys_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_timer_load_gating_prod)(struct gk20a *g, bool prod);
void (*elcg_ce_load_gating_prod)(struct gk20a *g, bool prod);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
};

View File

@@ -103,7 +103,9 @@ struct gops_perf {
int (*wait_for_idle_pmm_routers)(struct gk20a *g);
int (*wait_for_idle_pma)(struct gk20a *g);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_perf.h"
void (*enable_hs_streaming)(struct gk20a *g, bool enable);
void (*reset_hs_streaming_credits)(struct gk20a *g);
void (*enable_pmasys_legacy_mode)(struct gk20a *g, bool enable);
#endif
};
struct gops_perfbuf {

View File

@@ -162,7 +162,41 @@ struct gops_fb_ecc {
};
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_fb_vab.h"
struct nvgpu_vab_range_checker;
struct gops_fb_vab {
/**
* @brief Initialize VAB
*
*/
int (*init)(struct gk20a *g);
/**
* @brief Initialize VAB range checkers and enable VAB tracking
*
*/
int (*reserve)(struct gk20a *g, u32 vab_mode, u32 num_range_checkers,
struct nvgpu_vab_range_checker *vab_range_checker);
/**
* @brief Trigger VAB dump, copy buffer to user and clear
*
*/
int (*dump_and_clear)(struct gk20a *g, u64 *user_buf,
u64 user_buf_size);
/**
* @brief Disable VAB
*
*/
int (*release)(struct gk20a *g);
/**
* @brief Free VAB resources
*
*/
int (*teardown)(struct gk20a *g);
};
#endif
/**
@@ -441,7 +475,14 @@ struct gops_fb {
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_fb.h"
u32 (*get_num_active_ltcs)(struct gk20a *g);
#ifdef CONFIG_NVGPU_MIG
int (*config_veid_smc_map)(struct gk20a *g, bool enable);
int (*set_smc_eng_config)(struct gk20a *g, bool enable);
int (*set_remote_swizid)(struct gk20a *g, bool enable);
#endif
struct gops_fb_vab vab;
#endif
#ifdef CONFIG_NVGPU_DGPU

View File

@@ -215,7 +215,7 @@ struct gops_fifo {
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_fifo.h"
void (*runlist_intr_retrigger)(struct gk20a *g, u32 intr_tree);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -224,7 +224,12 @@ struct gops_fuse {
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_fuse.h"
void (*write_feature_override_ecc)(struct gk20a *g, u32 val);
void (*write_feature_override_ecc_1)(struct gk20a *g, u32 val);
void (*read_feature_override_ecc)(struct gk20a *g,
struct nvgpu_fuse_feature_override_ecc *ecc_feature);
u32 (*fuse_opt_sm_ttu_en)(struct gk20a *g);
u32 (*opt_sec_source_isolation_en)(struct gk20a *g);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
};

View File

@@ -454,7 +454,8 @@ struct gops_gr_intr {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_gr_intr.h"
void (*retrigger)(struct gk20a *g);
u32 (*enable_mask)(struct gk20a *g);
#endif
int (*handle_fecs_error)(struct gk20a *g,
struct nvgpu_channel *ch,
@@ -835,7 +836,11 @@ struct gops_gr_init {
bool (*is_allowed_sw_bundle)(struct gk20a *g,
u32 bundle_addr, u32 bundle_value, int *context);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_gr_init.h"
void (*auto_go_idle)(struct gk20a *g, bool enable);
void (*eng_config)(struct gk20a *g);
int (*reset_gpcs)(struct gk20a *g);
int (*sm_id_config_early)(struct gk20a *g,
struct nvgpu_gr_config *config);
#endif
/** @endcond */
};
@@ -967,7 +972,21 @@ struct gops_gr_ctxsw_prog {
u32 aperture_mask);
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_gr_ctxsw_prog.h"
#ifdef CONFIG_NVGPU_DEBUGGER
u32 (*hw_get_main_header_size)(void);
u32 (*hw_get_gpccs_header_stride)(void);
u32 (*get_compute_sysreglist_offset)(u32 *fecs_hdr);
u32 (*get_gfx_sysreglist_offset)(u32 *fecs_hdr);
u32 (*get_ltsreglist_offset)(u32 *fecs_hdr);
u32 (*get_compute_gpcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_gfx_gpcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_compute_tpcreglist_offset)(u32 *gpccs_hdr, u32 tpc_num);
u32 (*get_gfx_tpcreglist_offset)(u32 *gpccs_hdr, u32 tpc_num);
u32 (*get_compute_ppcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_gfx_ppcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_compute_etpcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_gfx_etpcreglist_offset)(u32 *gpccs_hdr);
#endif
#endif
};
/** @endcond */
@@ -1283,7 +1302,8 @@ struct gops_gr {
struct gops_gr_zcull zcull;
#endif /* CONFIG_NVGPU_GRAPHICS */
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_gr.h"
void (*vab_init)(struct gk20a *g, u32 vab_reg);
void (*vab_release)(struct gk20a *g, u32 vab_reg);
#endif
/** @endcond */
};

View File

@@ -77,8 +77,16 @@ struct gops_grmgr {
void (*get_gpcgrp_count)(struct gk20a *g);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_MIG)
#include "include/nvgpu/nvgpu_next_gops_grmgr.h"
u32 (*get_max_sys_pipes)(struct gk20a *g);
const struct nvgpu_mig_gpu_instance_config* (*get_mig_config_ptr)(
struct gk20a *g);
u32 (*get_allowed_swizzid_size)(struct gk20a *g);
int (*get_gpc_instance_gpcgrp_id)(struct gk20a *g,
u32 gpu_instance_id, u32 gr_syspipe_id, u32 *gpcgrp_id);
int (*get_mig_gpu_instance_config)(struct gk20a *g,
const char **config_name, u32 *num_config_supported);
void (*load_timestamp_prod)(struct gk20a *g);
#endif
};
#endif /* NVGPU_NEXT_GOPS_GRMGR_H */
#endif /* NVGPU_GOPS_GRMGR_H */

View File

@@ -53,7 +53,8 @@ struct gops_ltc_intr {
void (*configure)(struct gk20a *g);
void (*en_illegal_compstat)(struct gk20a *g, bool enable);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_ltc_intr.h"
void (*isr_extra)(struct gk20a *g, u32 ltc, u32 slice, u32 *reg_value);
void (*ltc_intr3_configure_extra)(struct gk20a *g, u32 *reg);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
};
@@ -161,7 +162,8 @@ struct gops_ltc {
int (*set_l2_sector_promotion)(struct gk20a *g, struct nvgpu_tsg *tsg,
u32 policy);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_ltc.h"
u32 (*pri_shared_addr)(struct gk20a *g, u32 addr);
void (*ltc_lts_set_mgmt_setup)(struct gk20a *g);
#endif
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -259,7 +259,36 @@ struct gops_mc {
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_mc.h"
/**
* @brief Reset HW engines.
*
* @param g [in] The GPU driver struct.
* @param devtype [in] Type of device.
*
* This function is invoked to reset the engines while initializing
* GR, CE and other engines during #nvgpu_finalize_poweron.
*
* Steps:
* - Compute reset mask for all engines of given devtype.
* - Disable given HW engines.
* - Acquire g->mc.enable_lock spinlock.
* - Read mc_device_enable_r register and clear the bits in read value
* corresponding to HW engines to be disabled.
* - Write mc_device_enable_r with the updated value.
* - Poll mc_device_enable_r to confirm register write success.
* - Release g->mc.enable_lock spinlock.
* - If GR engines are being reset, reset GPCs.
* - Enable the HW engines.
* - Acquire g->mc.enable_lock spinlock.
* - Read mc_device_enable_r register and set the bits in read value
* corresponding to HW engines to be enabled.
* - Write mc_device_enable_r with the updated value.
* - Poll mc_device_enable_r to confirm register write success.
* - Release g->mc.enable_lock spinlock.
*/
int (*reset_engines_all)(struct gk20a *g, u32 devtype);
void (*elpg_enable)(struct gk20a *g);
bool (*intr_get_unit_info)(struct gk20a *g, u32 unit);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -90,7 +90,11 @@ struct gops_pbdma {
void (*dump_status)(struct gk20a *g,
struct nvgpu_debug_context *o);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_pbdma.h"
u32 (*set_channel_info_chid)(u32 chid);
u32 (*set_intr_notify)(u32 eng_intr_vector);
u32 (*get_mmu_fault_id)(struct gk20a *g, u32 pbdma_id);
void (*pbdma_force_ce_split)(struct gk20a *g);
u32 (*get_num_of_pbdmas)(void);
#endif
};

View File

@@ -218,7 +218,7 @@ struct gops_pmu {
* @param void
*
* @return Chip specific PMU Engine Falcon2 base address.
* For NEXT_GPUID, NEXT_GPUID PMU Engine Falcon2 base address
* For Ampere+, PMU Engine Falcon2 base address
* will be returned.
*/
u32 (*falcon2_base_addr)(void);

View File

@@ -146,8 +146,10 @@ struct gops_priv_ring {
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_priv_ring.h"
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_MIG)
int (*config_gr_remap_window)(struct gk20a *g, u32 gr_syspipe_indx,
bool enable);
int (*config_gpc_rs_map)(struct gk20a *g, bool enable);
#endif
};

View File

@@ -90,7 +90,15 @@ struct gops_runlist {
void (*init_enginfo)(struct gk20a *g, struct nvgpu_fifo *f);
u32 (*get_tsg_max_timeslice)(void);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_runlist.h"
u32 (*get_runlist_id)(struct gk20a *g, u32 runlist_pri_base);
u32 (*get_engine_id_from_rleng_id)(struct gk20a *g,
u32 rleng_id, u32 runlist_pri_base);
u32 (*get_chram_bar0_offset)(struct gk20a *g, u32 runlist_pri_base);
void (*get_pbdma_info)(struct gk20a *g, u32 runlist_pri_base,
struct nvgpu_next_pbdma_info *pbdma_info);
u32 (*get_engine_intr_id)(struct gk20a *g, u32 runlist_pri_base,
u32 rleng_id);
u32 (*get_esched_fb_thread_id)(struct gk20a *g, u32 runlist_pri_base);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -53,7 +53,7 @@ struct nvgpu_gr_config;
int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/gr/nvgpu_next_fs_state.h"
int nvgpu_gr_init_sm_id_early_config(struct gk20a *g, struct nvgpu_gr_config *config);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -118,6 +118,7 @@
struct gk20a;
struct nvgpu_gr;
struct nvgpu_gr_config;
struct netlist_av_list;
/**
* @brief Allocate memory for GR struct and initialize the minimum SW
@@ -353,7 +354,8 @@ int nvgpu_gr_reset(struct gk20a *g);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/gr/nvgpu_next_gr.h"
void nvgpu_gr_init_reset_enable_hw_non_ctx_local(struct gk20a *g);
void nvgpu_gr_init_reset_enable_hw_non_ctx_global(struct gk20a *g);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -1,31 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_FS_STATE_H
#define NVGPU_NEXT_FS_STATE_H
struct gk20a;
struct nvgpu_gr_config;
int nvgpu_gr_init_sm_id_early_config(struct gk20a *g, struct nvgpu_gr_config *config);
#endif

View File

@@ -1,37 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GR_H
#define NVGPU_NEXT_GR_H
/**
* @file
*
*/
#include <nvgpu/types.h>
struct gk20a;
struct netlist_av_list;
void nvgpu_next_gr_init_reset_enable_hw_non_ctx_local(struct gk20a *g);
void nvgpu_next_gr_init_reset_enable_hw_non_ctx_global(struct gk20a *g);
#endif /* NVGPU_NEXT_GR_H */

View File

@@ -1,30 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GR_ECC_H
#define NVGPU_NEXT_GR_ECC_H
/** SM RAMS corrected error count. */
struct nvgpu_ecc_stat **sm_rams_ecc_corrected_err_count;
/** SM RAMS uncorrected error count. */
struct nvgpu_ecc_stat **sm_rams_ecc_uncorrected_err_count;
#endif /* NVGPU_NEXT_GR_ECC_H */

View File

@@ -117,14 +117,11 @@
#include <nvgpu/atomic.h>
#include <nvgpu/lock.h>
#include <nvgpu/bitops.h>
#include <nvgpu/cic.h>
struct gk20a;
struct nvgpu_device;
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_mc.h"
#endif
#define MC_ENABLE_DELAY_US 20U
#define MC_RESET_DELAY_US 20U
#define MC_RESET_CE_DELAY_US 500U
@@ -163,6 +160,44 @@ struct nvgpu_device;
/** Bit offset of the Architecture field in the HW version register */
#define NVGPU_GPU_ARCHITECTURE_SHIFT 4U
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_intr_unit_info {
/**
* top bit 0 -> subtree 0 -> leaf0, leaf1 -> leaf 0, 1
* top bit 1 -> subtree 1 -> leaf0, leaf1 -> leaf 2, 3
* top bit 2 -> subtree 2 -> leaf0, leaf1 -> leaf 4, 5
* top bit 3 -> subtree 3 -> leaf0, leaf1 -> leaf 6, 7
*/
/**
* h/w defined vectorids for the s/w defined intr unit.
* Upto 32 vectorids (32 bits of a leaf register) are supported for
* the intr units that support multiple vector ids.
*/
u32 vectorid[NVGPU_CIC_INTR_VECTORID_SIZE_MAX];
/** number of vectorid supported by the intr unit */
u32 vectorid_size;
u32 subtree; /** subtree number corresponding to vectorid */
u64 subtree_mask; /** leaf1_leaf0 value for the intr unit */
/**
* This flag will be set to true after all the fields
* of nvgpu_intr_unit_info are configured.
*/
bool valid;
};
struct nvgpu_next_mc {
/**
* intr info array indexed by s/w defined intr unit name
*/
struct nvgpu_intr_unit_info intr_unit_info[NVGPU_CIC_INTR_UNIT_MAX];
/**
* Leaf mask per subtree. Subtree is a pair of leaf registers.
* Each subtree corresponds to a bit in intr_top register.
*/
u64 subtree_mask_restore[HOST2SOC_NUM_SUBTREE];
};
#endif
/**
* This struct holds the variables needed to manage the configuration and
* interrupt handling of the units/engines.

View File

@@ -457,7 +457,8 @@ struct mm_gk20a {
struct nvgpu_mem mmu_rd_mem;
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_mm.h"
/** VAB struct */
struct nvgpu_vab vab;
#endif
};

View File

@@ -29,6 +29,7 @@
#include <nvgpu/types.h>
struct gk20a;
struct nvgpu_netlist_vars;
/**
* Description of netlist Address-Value(av) structure.
@@ -347,11 +348,6 @@ u32 *nvgpu_netlist_get_gpccs_inst_list(struct gk20a *g);
*/
u32 *nvgpu_netlist_get_gpccs_data_list(struct gk20a *g);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
#include <nvgpu/nvgpu_next_netlist.h>
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
struct netlist_aiv_list *nvgpu_netlist_get_sys_ctxsw_regs(struct gk20a *g);
struct netlist_aiv_list *nvgpu_netlist_get_gpc_ctxsw_regs(struct gk20a *g);
@@ -410,6 +406,60 @@ struct netlist_u32_list *nvgpu_netlist_get_gpccs_data(struct gk20a *g);
void nvgpu_netlist_vars_set_dynamic(struct gk20a *g, bool set);
void nvgpu_netlist_vars_set_buffer_size(struct gk20a *g, u32 size);
void nvgpu_netlist_vars_set_regs_base_index(struct gk20a *g, u32 index);
bool nvgpu_next_netlist_handle_sw_bundles_region_id(struct gk20a *g,
u32 region_id, u8 *src, u32 size,
struct nvgpu_netlist_vars *netlist_vars, int *err_code);
void nvgpu_next_netlist_deinit_ctx_vars(struct gk20a *g);
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_local_compute_load_av_list(
struct gk20a *g);
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_global_compute_load_av_list(
struct gk20a *g);
#ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_local_gfx_load_av_list(
struct gk20a *g);
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_global_gfx_load_av_list(
struct gk20a *g);
#endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef CONFIG_NVGPU_DEBUGGER
bool nvgpu_next_netlist_handle_debugger_region_id(struct gk20a *g,
u32 region_id, u8 *src, u32 size,
struct nvgpu_netlist_vars *netlist_vars, int *err_code);
void nvgpu_next_netlist_deinit_ctxsw_regs(struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_sys_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_gpc_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_tpc_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_ppc_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_etpc_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_lts_ctxsw_regs(
struct gk20a *g);
#ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_aiv_list *nvgpu_next_netlist_get_sys_gfx_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_gpc_gfx_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_tpc_gfx_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_ppc_gfx_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_etpc_gfx_ctxsw_regs(
struct gk20a *g);
#endif /* CONFIG_NVGPU_GRAPHICS */
u32 nvgpu_next_netlist_get_sys_ctxsw_regs_count(struct gk20a *g);
u32 nvgpu_next_netlist_get_ppc_ctxsw_regs_count(struct gk20a *g);
u32 nvgpu_next_netlist_get_gpc_ctxsw_regs_count(struct gk20a *g);
u32 nvgpu_next_netlist_get_tpc_ctxsw_regs_count(struct gk20a *g);
u32 nvgpu_next_netlist_get_etpc_ctxsw_regs_count(struct gk20a *g);
void nvgpu_next_netlist_print_ctxsw_reg_info(struct gk20a *g);
#endif /* CONFIG_NVGPU_DEBUGGER */
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -101,6 +101,11 @@ struct mmu_fault_info;
#define GPU_SM_L1_TAG_S2R_PIXPRF_ECC_UNCORRECTED (17U)
#define GPU_SM_MACHINE_CHECK_ERROR (18U)
#define GPU_SM_ICACHE_L1_PREDECODE_ECC_UNCORRECTED (20U)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#define GPU_SM_RAMS_ECC_CORRECTED (21U)
#define GPU_SM_RAMS_ECC_UNCORRECTED (22U)
#endif
/**
* @}
*/
@@ -814,8 +819,4 @@ void nvgpu_report_mmu_err(struct gk20a *g, u32 hw_unit,
void gr_intr_report_ctxsw_error(struct gk20a *g, u32 err_type, u32 chid,
u32 mailbox_value);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_err.h"
#endif
#endif /* NVGPU_NVGPU_ERR_H */

View File

@@ -1,95 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_CIC_H
#define NVGPU_NEXT_CIC_H
/**
* @file
*
* Declare intr specific struct and defines.
*/
#include <nvgpu/types.h>
#include <nvgpu/static_analysis.h>
#define U32_BITS 32U
#define DIV_BY_U32_BITS(x) ((x) / U32_BITS)
#define MOD_BY_U32_BITS(x) ((x) % U32_BITS)
#define RESET_ID_TO_REG_IDX(x) DIV_BY_U32_BITS((x))
#define RESET_ID_TO_REG_BIT(x) MOD_BY_U32_BITS((x))
#define RESET_ID_TO_REG_MASK(x) BIT32(RESET_ID_TO_REG_BIT((x)))
#define GPU_VECTOR_TO_LEAF_REG(i) DIV_BY_U32_BITS((i))
#define GPU_VECTOR_TO_LEAF_BIT(i) MOD_BY_U32_BITS((i))
#define GPU_VECTOR_TO_LEAF_MASK(i) (BIT32(GPU_VECTOR_TO_LEAF_BIT(i)))
#define GPU_VECTOR_TO_SUBTREE(i) ((GPU_VECTOR_TO_LEAF_REG(i)) / 2U)
#define GPU_VECTOR_TO_LEAF_SHIFT(i) \
(nvgpu_safe_mult_u32(((GPU_VECTOR_TO_LEAF_REG(i)) % 2U), 32U))
#define HOST2SOC_0_SUBTREE 0U
#define HOST2SOC_1_SUBTREE 1U
#define HOST2SOC_2_SUBTREE 2U
#define HOST2SOC_3_SUBTREE 3U
#define HOST2SOC_NUM_SUBTREE 4U
#define HOST2SOC_SUBTREE_TO_TOP_IDX(i) ((i) / 32U)
#define HOST2SOC_SUBTREE_TO_TOP_BIT(i) ((i) % 32U)
#define HOST2SOC_SUBTREE_TO_LEAF0(i) \
(nvgpu_safe_mult_u32((i), 2U))
#define HOST2SOC_SUBTREE_TO_LEAF1(i) \
(nvgpu_safe_add_u32((nvgpu_safe_mult_u32((i), 2U)), 1U))
#define STALL_SUBTREE_TOP_IDX 0U
#define STALL_SUBTREE_TOP_BITS \
((BIT32(HOST2SOC_SUBTREE_TO_TOP_BIT(HOST2SOC_1_SUBTREE))) | \
(BIT32(HOST2SOC_SUBTREE_TO_TOP_BIT(HOST2SOC_2_SUBTREE))) | \
(BIT32(HOST2SOC_SUBTREE_TO_TOP_BIT(HOST2SOC_3_SUBTREE))))
/**
* These should not contradict NVGPU_CIC_INTR_UNIT_* defines.
*/
#define NVGPU_CIC_INTR_UNIT_MMU_FAULT_ECC_ERROR 10U
#define NVGPU_CIC_INTR_UNIT_MMU_NON_REPLAYABLE_FAULT_ERROR 11U
#define NVGPU_CIC_INTR_UNIT_MMU_REPLAYABLE_FAULT_ERROR 12U
#define NVGPU_CIC_INTR_UNIT_MMU_NON_REPLAYABLE_FAULT 13U
#define NVGPU_CIC_INTR_UNIT_MMU_REPLAYABLE_FAULT 14U
#define NVGPU_CIC_INTR_UNIT_MMU_INFO_FAULT 15U
#define NVGPU_CIC_INTR_UNIT_RUNLIST_TREE_0 16U
#define NVGPU_CIC_INTR_UNIT_RUNLIST_TREE_1 17U
#define NVGPU_CIC_INTR_UNIT_GR_STALL 18U
#define NVGPU_CIC_INTR_UNIT_CE_STALL 19U
#define NVGPU_CIC_INTR_UNIT_MAX 20U
#define NVGPU_CIC_INTR_VECTORID_SIZE_MAX 32U
#define NVGPU_CIC_INTR_VECTORID_SIZE_ONE 1U
#define RUNLIST_INTR_TREE_0 0U
#define RUNLIST_INTR_TREE_1 1U
void nvgpu_cic_intr_unit_vectorid_init(struct gk20a *g, u32 unit, u32 *vectorid,
u32 num_entries);
bool nvgpu_cic_intr_is_unit_info_valid(struct gk20a *g, u32 unit);
bool nvgpu_cic_intr_get_unit_info(struct gk20a *g, u32 unit, u32 *subtree,
u64 *subtree_mask);
#endif /* NVGPU_NEXT_CIC_H */

View File

@@ -1,36 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_CLASS_H
#define NVGPU_NEXT_CLASS_H
#define AMPERE_SMC_PARTITION_REF 0xC637U
#define AMPERE_B 0xC797U
#define AMPERE_A 0xC697U
#define AMPERE_DMA_COPY_A 0xC6B5U
#define AMPERE_DMA_COPY_B 0xC7B5U
#define AMPERE_COMPUTE_A 0xC6C0U
#define AMPERE_COMPUTE_B 0xC7C0U
#define AMPERE_CHANNEL_GPFIFO_A 0xC56FU
#define AMPERE_CHANNEL_GPFIFO_B 0xC76FU
#endif /* NVGPU_NEXT_CLASS_H */

View File

@@ -1,58 +0,0 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_TOP_H
#define NVGPU_NEXT_TOP_H
#include <nvgpu/pbdma.h>
struct nvgpu_device_next {
/**
* True if the device is an method engine behind host.
*/
bool engine;
/**
* Runlist Engine ID; only valid if #engine is true.
*/
u32 rleng_id;
/**
* Runlist PRI base - byte aligned based address. CHRAM offset can
* be computed from this.
*/
u32 rl_pri_base;
/**
* PBDMA info for this device. It may contain multiple PBDMAs as
* there can now be multiple PBDMAs per runlist.
*
* This is in some ways awkward; devices seem to be more directly
* linked to runlists; runlists in turn have PBDMAs. Granted that
* means there's a computable relation between devices and PBDMAs
* it may make sense to not have this link.
*/
struct nvgpu_next_pbdma_info pbdma_info;
};
#endif

View File

@@ -1,38 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_ECC_H
#define NVGPU_NEXT_ECC_H
/* Leave extra tab to fit into nvgpu_ecc.fb structure */
struct nvgpu_ecc_stat *mmu_l2tlb_ecc_corrected_unique_err_count;
/** hubmmu l2tlb uncorrected unique error count. */
struct nvgpu_ecc_stat *mmu_l2tlb_ecc_uncorrected_unique_err_count;
/** hubmmu hubtlb corrected unique error count. */
struct nvgpu_ecc_stat *mmu_hubtlb_ecc_corrected_unique_err_count;
/** hubmmu hubtlb uncorrected unique error count. */
struct nvgpu_ecc_stat *mmu_hubtlb_ecc_uncorrected_unique_err_count;
/** hubmmu fillunit corrected unique error count. */
struct nvgpu_ecc_stat *mmu_fillunit_ecc_corrected_unique_err_count;
/** hubmmu fillunit uncorrected unique error count. */
struct nvgpu_ecc_stat *mmu_fillunit_ecc_uncorrected_unique_err_count;
#endif /* NVGPU_NEXT_ECC_H */

View File

@@ -1,37 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_ENGINE_STATUS_H
#define NVGPU_NEXT_ENGINE_STATUS_H
/**
* @file
*
* Declare device info specific struct and defines.
*/
#include <nvgpu/types.h>
struct nvgpu_next_engine_status_info {
/** Engine status_1 h/w register's read value. */
u32 reg1_data;
};
#endif /* NVGPU_NEXT_ENGINE_STATUS_H */

View File

@@ -1,41 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_ENGINES_H
#define NVGPU_NEXT_ENGINES_H
/**
* @file
*
* Declare engine info specific struct and defines.
*/
#include <nvgpu/types.h>
struct gk20a;
struct nvgpu_device;
#define ENGINE_PBDMA_INSTANCE0 0U
int nvgpu_next_engine_init_one_dev(struct gk20a *g,
const struct nvgpu_device *dev);
#endif /* NVGPU_NEXT_ENGINES_H */

View File

@@ -1,32 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_ERR_H
#define NVGPU_NEXT_ERR_H
/*
* Error IDs for SM unit.
*/
#define GPU_SM_RAMS_ECC_CORRECTED (21U)
#define GPU_SM_RAMS_ECC_UNCORRECTED (22U)
#endif /* NVGPU_NEXT_ERR_H */

View File

@@ -1,35 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_ERRATA_H
#define NVGPU_NEXT_ERRATA_H
#define ERRATA_FLAGS_NEXT \
/* GA100 */ \
DEFINE_ERRATA(NVGPU_ERRATA_200601972, "GA100", "LTC TSTG"), \
/* GA10B */ \
DEFINE_ERRATA(NVGPU_ERRATA_2969956, "GA10B", "FMODEL FB LTCS"), \
DEFINE_ERRATA(NVGPU_ERRATA_200677649, "GA10B", "UCODE"), \
DEFINE_ERRATA(NVGPU_ERRATA_3154076, "GA10B", "PROD VAL"), \
DEFINE_ERRATA(NVGPU_ERRATA_3288192, "GA10B", "L4 SCF NOT SUPPORTED"),
#endif /* NVGPU_NEXT_ERRATA_H */

View File

@@ -1,65 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_FB_H
#define NVGPU_NEXT_FB_H
/* VAB track all accesses (read and write) */
#define NVGPU_VAB_MODE_ACCESS BIT32(0U)
/* VAB track only writes (writes and read-modify-writes) */
#define NVGPU_VAB_MODE_DIRTY BIT32(1U)
/* No change to VAB logging with VPR setting requested */
#define NVGPU_VAB_LOGGING_VPR_NONE 0U
/* VAB logging disabled if vpr IN_USE=1, regardless of PROTECTED_MODE */
#define NVGPU_VAB_LOGGING_VPR_IN_USE_DISABLED BIT32(0U)
/* VAB logging disabled if vpr PROTECTED_MODE=1, regardless of IN_USE */
#define NVGPU_VAB_LOGGING_VPR_PROTECTED_DISABLED BIT32(1U)
/* VAB logging enabled regardless of IN_USE and PROTECTED_MODE */
#define NVGPU_VAB_LOGGING_VPR_ENABLED BIT32(2U)
/* VAB logging disabled regardless of IN_USE and PROTECTED_MODE */
#define NVGPU_VAB_LOGGING_VPR_DISABLED BIT32(3U)
struct nvgpu_vab_range_checker {
/*
* in: starting physical address. Must be aligned by
* 1 << (granularity_shift + bitmask_size_shift) where
* bitmask_size_shift is a HW specific constant.
*/
u64 start_phys_addr;
/* in: log2 of coverage granularity per bit */
u8 granularity_shift;
u8 reserved[7];
};
struct nvgpu_vab {
u32 user_num_range_checkers;
struct nvgpu_mem buffer;
};
int nvgpu_fb_vab_init_hal(struct gk20a *g);
int nvgpu_fb_vab_teardown_hal(struct gk20a *g);
#endif /* NVGPU_NEXT_FB_H */

View File

@@ -1,71 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_FUSE_H
#define NVGPU_NEXT_FUSE_H
/**
* @file
*
* Declare device info specific struct and defines.
*/
#include <nvgpu/types.h>
struct nvgpu_fuse_feature_override_ecc {
/** overide_ecc register feature */
/** sm_lrf enable */
bool sm_lrf_enable;
/** sm_lrf override */
bool sm_lrf_override;
/** sm_l1_data enable */
bool sm_l1_data_enable;
/** sm_l1_data overide */
bool sm_l1_data_override;
/** sm_l1_tag enable */
bool sm_l1_tag_enable;
/** sm_l1_tag overide */
bool sm_l1_tag_override;
/** ltc enable */
bool ltc_enable;
/** ltc overide */
bool ltc_override;
/** dram enable */
bool dram_enable;
/** dram overide */
bool dram_override;
/** sm_cbu enable */
bool sm_cbu_enable;
/** sm_cbu overide */
bool sm_cbu_override;
/** override_ecc_1 register feature */
/** sm_l0_icache enable */
bool sm_l0_icache_enable;
/** sm_l0_icache overide */
bool sm_l0_icache_override;
/** sm_l1_icache enable */
bool sm_l1_icache_enable;
/** sm_l1_icache overide */
bool sm_l1_icache_override;
};
#endif /* NVGPU_NEXT_FUSE_H */

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_CE_H
#define NVGPU_NEXT_GOPS_CE_H
/* Leave extra tab to fit into gops_ce structure */
void (*intr_retrigger)(struct gk20a *g, u32 inst_id);
#endif /* NVGPU_NEXT_GOPS_CE_H */

View File

@@ -1,42 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_CG_H
#define NVGPU_NEXT_GOPS_CG_H
/* Leave extra tab to fit into gops_cg structure */
void (*slcg_runlist_load_gating_prod)(struct gk20a *g, bool prod);
void (*blcg_runlist_load_gating_prod)(struct gk20a *g, bool prod);
/* Ring station slcg prod gops */
void (*slcg_rs_ctrl_fbp_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_ctrl_gpc_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_ctrl_sys_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_fbp_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_gpc_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_rs_sys_load_gating_prod)(struct gk20a *g, bool prod);
void (*slcg_timer_load_gating_prod)(struct gk20a *g, bool prod);
void (*elcg_ce_load_gating_prod)(struct gk20a *g, bool prod);
#endif /* NVGPU_NEXT_GOPS_CG_H */

View File

@@ -1,36 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_FB_H
#define NVGPU_NEXT_GOPS_FB_H
/* Leave extra tab to fit into gops_fb structure */
u32 (*get_num_active_ltcs)(struct gk20a *g);
#ifdef CONFIG_NVGPU_MIG
int (*config_veid_smc_map)(struct gk20a *g, bool enable);
int (*set_smc_eng_config)(struct gk20a *g, bool enable);
int (*set_remote_swizid)(struct gk20a *g, bool enable);
#endif
struct gops_fb_vab vab;
#endif /* NVGPU_NEXT_GOPS_FB_H */

View File

@@ -1,61 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_FB_VAB_H
#define NVGPU_NEXT_GOPS_FB_VAB_H
struct nvgpu_vab_range_checker;
struct gops_fb_vab {
/**
* @brief Initialize VAB
*
*/
int (*init)(struct gk20a *g);
/**
* @brief Initialize VAB range checkers and enable VAB tracking
*
*/
int (*reserve)(struct gk20a *g, u32 vab_mode, u32 num_range_checkers,
struct nvgpu_vab_range_checker *vab_range_checker);
/**
* @brief Trigger VAB dump, copy buffer to user and clear
*
*/
int (*dump_and_clear)(struct gk20a *g, u64 *user_buf,
u64 user_buf_size);
/**
* @brief Disable VAB
*
*/
int (*release)(struct gk20a *g);
/**
* @brief Free VAB resources
*
*/
int (*teardown)(struct gk20a *g);
};
#endif /* NVGPU_NEXT_GOPS_FB_VAB_H */

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_FIFO_H
#define NVGPU_NEXT_GOPS_FIFO_H
/* Leave extra tab to fit into gops_fifo structure */
void (*runlist_intr_retrigger)(struct gk20a *g, u32 intr_tree);
#endif /* NVGPU_NEXT_GOPS_FIFO_H */

View File

@@ -1,34 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_FUSE_H
#define NVGPU_NEXT_GOPS_FUSE_H
/* Leave extra tab to fit into gops_fuse structure */
void (*write_feature_override_ecc)(struct gk20a *g, u32 val);
void (*write_feature_override_ecc_1)(struct gk20a *g, u32 val);
void (*read_feature_override_ecc)(struct gk20a *g,
struct nvgpu_fuse_feature_override_ecc *ecc_feature);
u32 (*fuse_opt_sm_ttu_en)(struct gk20a *g);
u32 (*opt_sec_source_isolation_en)(struct gk20a *g);
#endif /* NVGPU_NEXT_GOPS_FUSE_H */

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_GR_H
#define NVGPU_NEXT_GOPS_GR_H
/* Leave extra tab to fit into gops_gr_intr structure */
void (*vab_init)(struct gk20a *g, u32 vab_reg);
void (*vab_release)(struct gk20a *g, u32 vab_reg);
#endif /* NVGPU_NEXT_GOPS_GR_H */

View File

@@ -1,41 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_GR_CTXSW_PROG_H
#define NVGPU_NEXT_GOPS_GR_CTXSW_PROG_H
#ifdef CONFIG_NVGPU_DEBUGGER
u32 (*hw_get_main_header_size)(void);
u32 (*hw_get_gpccs_header_stride)(void);
u32 (*get_compute_sysreglist_offset)(u32 *fecs_hdr);
u32 (*get_gfx_sysreglist_offset)(u32 *fecs_hdr);
u32 (*get_ltsreglist_offset)(u32 *fecs_hdr);
u32 (*get_compute_gpcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_gfx_gpcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_compute_tpcreglist_offset)(u32 *gpccs_hdr, u32 tpc_num);
u32 (*get_gfx_tpcreglist_offset)(u32 *gpccs_hdr, u32 tpc_num);
u32 (*get_compute_ppcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_gfx_ppcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_compute_etpcreglist_offset)(u32 *gpccs_hdr);
u32 (*get_gfx_etpcreglist_offset)(u32 *gpccs_hdr);
#endif
#endif /* NVGPU_NEXT_GOPS_GR_CTXSW_PROG_H */

View File

@@ -1,33 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_GR_INIT_H
#define NVGPU_NEXT_GOPS_GR_INIT_H
/* Leave extra tab to fit into gops_gr structure */
void (*auto_go_idle)(struct gk20a *g, bool enable);
void (*eng_config)(struct gk20a *g);
int (*reset_gpcs)(struct gk20a *g);
int (*sm_id_config_early)(struct gk20a *g,
struct nvgpu_gr_config *config);
#endif /* NVGPU_NEXT_GOPS_GR_INIT_H */

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_GR_INTR_H
#define NVGPU_NEXT_GOPS_GR_INTR_H
/* Leave extra tab to fit into gops_gr_intr structure */
void (*retrigger)(struct gk20a *g);
u32 (*enable_mask)(struct gk20a *g);
#endif /* NVGPU_NEXT_GOPS_GR_INTR_H */

View File

@@ -1,37 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_GRMGR_H
#define NVGPU_NEXT_GOPS_GRMGR_H
/* Leave extra tab to fit into gops_grmgr structure */
u32 (*get_max_sys_pipes)(struct gk20a *g);
const struct nvgpu_mig_gpu_instance_config* (*get_mig_config_ptr)(
struct gk20a *g);
u32 (*get_allowed_swizzid_size)(struct gk20a *g);
int (*get_gpc_instance_gpcgrp_id)(struct gk20a *g,
u32 gpu_instance_id, u32 gr_syspipe_id, u32 *gpcgrp_id);
int (*get_mig_gpu_instance_config)(struct gk20a *g,
const char **config_name, u32 *num_config_supported);
void (*load_timestamp_prod)(struct gk20a *g);
#endif /* NVGPU_NEXT_GOPS_GRMGR_H */

View File

@@ -1,28 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_LTC_H
#define NVGPU_NEXT_GOPS_LTC_H
/* Leave extra tab to fit into gops_ltc structure */
u32 (*pri_shared_addr)(struct gk20a *g, u32 addr);
void (*ltc_lts_set_mgmt_setup)(struct gk20a *g);
#endif /* NVGPU_NEXT_GOPS_LTC_H */

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_LTC_INTR_H
#define NVGPU_NEXT_GOPS_LTC_INTR_H
/* Leave extra tab to fit into gops_ltc structure */
void (*isr_extra)(struct gk20a *g, u32 ltc, u32 slice, u32 *reg_value);
void (*ltc_intr3_configure_extra)(struct gk20a *g, u32 *reg);
#endif /* NVGPU_NEXT_GOPS_LTC_INTR_H */

View File

@@ -1,60 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_MC_H
#define NVGPU_NEXT_GOPS_MC_H
/* Leave extra tab to fit into gops_mc structure */
/**
* @brief Reset HW engines.
*
* @param g [in] The GPU driver struct.
* @param devtype [in] Type of device.
*
* This function is invoked to reset the engines while initializing
* GR, CE and other engines during #nvgpu_finalize_poweron.
*
* Steps:
* - Compute reset mask for all engines of given devtype.
* - Disable given HW engines.
* - Acquire g->mc.enable_lock spinlock.
* - Read mc_device_enable_r register and clear the bits in read value
* corresponding to HW engines to be disabled.
* - Write mc_device_enable_r with the updated value.
* - Poll mc_device_enable_r to confirm register write success.
* - Release g->mc.enable_lock spinlock.
* - If GR engines are being reset, reset GPCs.
* - Enable the HW engines.
* - Acquire g->mc.enable_lock spinlock.
* - Read mc_device_enable_r register and set the bits in read value
* corresponding to HW engines to be enabled.
* - Write mc_device_enable_r with the updated value.
* - Poll mc_device_enable_r to confirm register write success.
* - Release g->mc.enable_lock spinlock.
*/
int (*reset_engines_all)(struct gk20a *g, u32 devtype);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
void (*elpg_enable)(struct gk20a *g);
#endif
bool (*intr_get_unit_info)(struct gk20a *g, u32 unit);
#endif /* NVGPU_NEXT_GOPS_MC_H */

View File

@@ -1,33 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_PBDMA_H
#define NVGPU_NEXT_GOPS_PBDMA_H
/* Leave extra tab to fit into gops_pbdma structure */
u32 (*set_channel_info_chid)(u32 chid);
u32 (*set_intr_notify)(u32 eng_intr_vector);
u32 (*get_mmu_fault_id)(struct gk20a *g, u32 pbdma_id);
void (*pbdma_force_ce_split)(struct gk20a *g);
u32 (*get_num_of_pbdmas)(void);
#endif /* NVGPU_NEXT_GOPS_PBDMA_H */

View File

@@ -1,30 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_PERF_H
#define NVGPU_NEXT_GOPS_PERF_H
/* Leave extra tab to fit into gops_fifo structure */
void (*enable_hs_streaming)(struct gk20a *g, bool enable);
void (*reset_hs_streaming_credits)(struct gk20a *g);
void (*enable_pmasys_legacy_mode)(struct gk20a *g, bool enable);
#endif /* NVGPU_NEXT_GOPS_PERF_H */

View File

@@ -1,32 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_PRIV_RING_H
#define NVGPU_NEXT_GOPS_PRIV_RING_H
/* Leave extra tab to fit into gops_ce structure */
#ifdef CONFIG_NVGPU_MIG
int (*config_gr_remap_window)(struct gk20a *g, u32 gr_syspipe_indx,
bool enable);
int (*config_gpc_rs_map)(struct gk20a *g, bool enable);
#endif
#endif /* NVGPU_NEXT_GOPS_PRIV_RING_H */

View File

@@ -1,37 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_GOPS_RUNLIST_H
#define NVGPU_NEXT_GOPS_RUNLIST_H
/* Leave extra tab to fit into gops_runlist structure */
u32 (*get_runlist_id)(struct gk20a *g, u32 runlist_pri_base);
u32 (*get_engine_id_from_rleng_id)(struct gk20a *g,
u32 rleng_id, u32 runlist_pri_base);
u32 (*get_chram_bar0_offset)(struct gk20a *g, u32 runlist_pri_base);
void (*get_pbdma_info)(struct gk20a *g, u32 runlist_pri_base,
struct nvgpu_next_pbdma_info *pbdma_info);
u32 (*get_engine_intr_id)(struct gk20a *g, u32 runlist_pri_base,
u32 rleng_id);
u32 (*get_esched_fb_thread_id)(struct gk20a *g, u32 runlist_pri_base);
#endif /* NVGPU_NEXT_GOPS_RUNLIST_H */

View File

@@ -1,36 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_LITTER_H
#define NVGPU_NEXT_LITTER_H
/*
* Litter constants.
* These should be in sync with GPU_LIT_* constants defined in nvgpu/gk20a.h.
*/
#define GPU_LIT_ROP_IN_GPC_BASE 52
#define GPU_LIT_ROP_IN_GPC_SHARED_BASE 53
#define GPU_LIT_ROP_IN_GPC_PRI_SHARED_IDX 54
#define GPU_LIT_ROP_IN_GPC_STRIDE 55
#endif /* NVGPU_NEXT_LITTER_H */

View File

@@ -1,69 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_MC_H
#define NVGPU_NEXT_MC_H
/**
* @file
*
* Declare intr specific struct.
*/
#include <nvgpu/types.h>
#include <nvgpu/cic.h>
struct nvgpu_intr_unit_info {
/**
* top bit 0 -> subtree 0 -> leaf0, leaf1 -> leaf 0, 1
* top bit 1 -> subtree 1 -> leaf0, leaf1 -> leaf 2, 3
* top bit 2 -> subtree 2 -> leaf0, leaf1 -> leaf 4, 5
* top bit 3 -> subtree 3 -> leaf0, leaf1 -> leaf 6, 7
*/
/**
* h/w defined vectorids for the s/w defined intr unit.
* Upto 32 vectorids (32 bits of a leaf register) are supported for
* the intr units that support multiple vector ids.
*/
u32 vectorid[NVGPU_CIC_INTR_VECTORID_SIZE_MAX];
/** number of vectorid supported by the intr unit */
u32 vectorid_size;
u32 subtree; /** subtree number corresponding to vectorid */
u64 subtree_mask; /** leaf1_leaf0 value for the intr unit */
/**
* This flag will be set to true after all the fields
* of nvgpu_intr_unit_info are configured.
*/
bool valid;
};
struct nvgpu_next_mc {
/**
* intr info array indexed by s/w defined intr unit name
*/
struct nvgpu_intr_unit_info intr_unit_info[NVGPU_CIC_INTR_UNIT_MAX];
/**
* Leaf mask per subtree. Subtree is a pair of leaf registers.
* Each subtree corresponds to a bit in intr_top register.
*/
u64 subtree_mask_restore[HOST2SOC_NUM_SUBTREE];
};
#endif /* NVGPU_NEXT_MC_H */

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_MM_H
#define NVGPU_NEXT_MM_H
/** VAB struct */
struct nvgpu_vab vab;
#endif /* NVGPU_NEXT_MM_H */

View File

@@ -1,90 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_NETLIST_H
#define NVGPU_NEXT_NETLIST_H
/**
* @file
*
*/
#include <nvgpu/types.h>
struct gk20a;
struct nvgpu_netlist_vars;
struct netlist_av_list;
bool nvgpu_next_netlist_handle_sw_bundles_region_id(struct gk20a *g,
u32 region_id, u8 *src, u32 size,
struct nvgpu_netlist_vars *netlist_vars, int *err_code);
void nvgpu_next_netlist_deinit_ctx_vars(struct gk20a *g);
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_local_compute_load_av_list(
struct gk20a *g);
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_global_compute_load_av_list(
struct gk20a *g);
#ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_local_gfx_load_av_list(
struct gk20a *g);
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_global_gfx_load_av_list(
struct gk20a *g);
#endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef CONFIG_NVGPU_DEBUGGER
bool nvgpu_next_netlist_handle_debugger_region_id(struct gk20a *g,
u32 region_id, u8 *src, u32 size,
struct nvgpu_netlist_vars *netlist_vars, int *err_code);
void nvgpu_next_netlist_deinit_ctxsw_regs(struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_sys_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_gpc_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_tpc_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_ppc_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_etpc_compute_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_lts_ctxsw_regs(
struct gk20a *g);
#ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_aiv_list *nvgpu_next_netlist_get_sys_gfx_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_gpc_gfx_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_tpc_gfx_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_ppc_gfx_ctxsw_regs(
struct gk20a *g);
struct netlist_aiv_list *nvgpu_next_netlist_get_etpc_gfx_ctxsw_regs(
struct gk20a *g);
#endif /* CONFIG_NVGPU_GRAPHICS */
u32 nvgpu_next_netlist_get_sys_ctxsw_regs_count(struct gk20a *g);
u32 nvgpu_next_netlist_get_ppc_ctxsw_regs_count(struct gk20a *g);
u32 nvgpu_next_netlist_get_gpc_ctxsw_regs_count(struct gk20a *g);
u32 nvgpu_next_netlist_get_tpc_ctxsw_regs_count(struct gk20a *g);
u32 nvgpu_next_netlist_get_etpc_ctxsw_regs_count(struct gk20a *g);
void nvgpu_next_netlist_print_ctxsw_reg_info(struct gk20a *g);
#endif /* CONFIG_NVGPU_DEBUGGER */
#endif /* NVGPU_NEXT_NETLIST_H */

View File

@@ -1,44 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_PBDMA_H
#define NVGPU_NEXT_PBDMA_H
/**
* @file
*
* Declare pbdma specific struct and defines.
*/
#include <nvgpu/types.h>
#define PBDMA_PER_RUNLIST_SIZE 2U
#define NVGPU_INVALID_PBDMA_PRI_BASE U32_MAX
#define NVGPU_INVALID_PBDMA_ID U32_MAX
struct nvgpu_next_pbdma_info {
/** The pri offset of the i'th PBDMA for runlist_pri_base */
u32 pbdma_pri_base[PBDMA_PER_RUNLIST_SIZE];
/** The ID of the i'th PBDMA that runs channels on this runlist */
u32 pbdma_id[PBDMA_PER_RUNLIST_SIZE];
};
#endif /* NVGPU_NEXT_PBDMA_H */

View File

@@ -1,52 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NEXT_RUNLIST_H
#define NVGPU_NEXT_RUNLIST_H
/**
* @file
*
* Declare runlist info specific struct and defines.
*/
#include <nvgpu/types.h>
struct nvgpu_next_pbdma_info;
struct nvgpu_device;
struct nvgpu_fifo;
#define RLENG_PER_RUNLIST_SIZE 3
struct nvgpu_next_runlist {
/** Runlist pri base - offset into device's runlist space */
u32 runlist_pri_base;
/** Channel ram address in bar0 pri space */
u32 chram_bar0_offset;
/** Pointer to pbdma info stored in engine_info*/
const struct nvgpu_next_pbdma_info *pbdma_info;
/** Pointer to engine info for per runlist engine id */
const struct nvgpu_device *rl_dev_list[RLENG_PER_RUNLIST_SIZE];
};
void nvgpu_next_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f);
#endif /* NVGPU_NEXT_RUNLIST_H */

View File

@@ -1,42 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_SIM_NEXT_H
#define NVGPU_SIM_NEXT_H
#include <nvgpu/types.h>
#ifdef CONFIG_NVGPU_SIM
struct gk20a;
void nvgpu_next_init_sim_support(struct gk20a *g);
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
int nvgpu_next_init_sim_netlist_ctxsw_regs(struct gk20a *g);
void nvgpu_next_init_sim_netlist_ctxsw_regs_free(struct gk20a *g);
#endif /* CONFIG_NVGPU_DEBUGGER */
int nvgpu_next_init_sim_netlist_ctx_vars(struct gk20a *g);
void nvgpu_next_init_sim_netlist_ctx_vars_free(struct gk20a *g);
#endif /* NVGPU_SIM_NEXT_H */

View File

@@ -23,6 +23,8 @@
#ifndef NVGPU_PBDMA_COMMON_H
#define NVGPU_PBDMA_COMMON_H
#include <nvgpu/types.h>
/**
* @file
*
@@ -36,7 +38,16 @@ struct gk20a;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_pbdma.h"
#define PBDMA_PER_RUNLIST_SIZE 2U
#define NVGPU_INVALID_PBDMA_PRI_BASE U32_MAX
#define NVGPU_INVALID_PBDMA_ID U32_MAX
struct nvgpu_next_pbdma_info {
/** The pri offset of the i'th PBDMA for runlist_pri_base */
u32 pbdma_pri_base[PBDMA_PER_RUNLIST_SIZE];
/** The ID of the i'th PBDMA that runs channels on this runlist */
u32 pbdma_id[PBDMA_PER_RUNLIST_SIZE];
};
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -184,5 +184,9 @@ void nvgpu_profiler_free_pma_stream(struct nvgpu_profiler_object *prof);
bool nvgpu_profiler_validate_regops_allowlist(struct nvgpu_profiler_object *prof,
u32 offset, enum nvgpu_pm_resource_hwpm_register_type *type);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
void nvgpu_next_profiler_hs_stream_quiesce(struct gk20a *g);
#endif /* CONFIG_NVGPU_HAL_NON_FUSA */
#endif /* CONFIG_NVGPU_PROFILER */
#endif /* NVGPU_PROFILER_H */

View File

@@ -33,17 +33,19 @@
* Runlist interface.
*/
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
#include <nvgpu/nvgpu_next_runlist.h>
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
struct gk20a;
struct nvgpu_tsg;
struct nvgpu_fifo;
struct nvgpu_channel;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_next_pbdma_info;
struct nvgpu_device;
#define RLENG_PER_RUNLIST_SIZE 3
#endif
/**
* Low interleave level for runlist entry. TSGs with this interleave level
* typically appear only once in the runlist.
@@ -79,6 +81,22 @@ struct nvgpu_channel;
/** Runlist identifier is invalid. */
#define NVGPU_INVALID_RUNLIST_ID U32_MAX
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_next_runlist {
/* Ampere+ runlist info additions */
/** Runlist pri base - offset into device's runlist space */
u32 runlist_pri_base;
/** Channel ram address in bar0 pri space */
u32 chram_bar0_offset;
/** Pointer to pbdma info stored in engine_info*/
const struct nvgpu_next_pbdma_info *pbdma_info;
/** Pointer to engine info for per runlist engine id */
const struct nvgpu_device *rl_dev_list[RLENG_PER_RUNLIST_SIZE];
};
#endif
struct nvgpu_runlist {
/** Runlist identifier. */
u32 id;
@@ -103,7 +121,7 @@ struct nvgpu_runlist {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
/* nvgpu next runlist info additions */
/* Ampere+ runlist info additions */
struct nvgpu_next_runlist nvgpu_next;
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
@@ -363,6 +381,11 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
* Walks through all active engines info, and initialize runlist info.
*/
void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f);
#if defined(CONFIG_NVGPU_NON_FUSA)
void nvgpu_next_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#define rl_dbg(g, fmt, arg...) \

View File

@@ -24,13 +24,9 @@
#ifdef CONFIG_NVGPU_SIM
#include <nvgpu/types.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/gk20a.h>
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA)
#include <nvgpu/nvgpu_next_sim.h>
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
/*
* Size of SIM ring buffers.
@@ -108,5 +104,20 @@ static inline u32 *sim_msg_param(struct gk20a *g, u32 byte_offset)
return sim_msg_bfr(g, byte_offset + sim_msg_header_size());
}
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_HAL_NON_FUSA)
void nvgpu_next_init_sim_support(struct gk20a *g);
#ifdef CONFIG_NVGPU_DEBUGGER
int nvgpu_next_init_sim_netlist_ctxsw_regs(struct gk20a *g);
void nvgpu_next_init_sim_netlist_ctxsw_regs_free(struct gk20a *g);
#endif /* CONFIG_NVGPU_DEBUGGER */
int nvgpu_next_init_sim_netlist_ctx_vars(struct gk20a *g);
void nvgpu_next_init_sim_netlist_ctx_vars_free(struct gk20a *g);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#endif
#endif /* NVGPU_SIM_H */