gpu: nvgpu: remove unused code from common.nvgpu on safety build

- remove unused code from common.nvgpu unit on safety build. Also,
remove the code which uses them in other places.
- document use of compiler intrinsics as mandated in code inspection
  checklist.

Jira NVGPU-6876

Change-Id: Ifd16dd197d297f56a517ca155da4ed145015204c
Signed-off-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2561584
(cherry picked from commit 900391071e9a7d0448cbc1bb6ed57677459712a4)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2561583
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Shashank Singh
2021-07-20 07:02:39 +00:00
committed by mobile promotions
parent 94255220f7
commit 19a3b86f06
28 changed files with 294 additions and 129 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -419,6 +419,7 @@ struct nvgpu_falcon *nvgpu_falcon_get_instance(struct gk20a *g, u32 flcn_id)
case FALCON_ID_GSPLITE:
flcn = &g->gsp_flcn;
break;
#ifdef CONFIG_NVGPU_DGPU
case FALCON_ID_NVDEC:
flcn = &g->nvdec_flcn;
break;
@@ -428,6 +429,7 @@ struct nvgpu_falcon *nvgpu_falcon_get_instance(struct gk20a *g, u32 flcn_id)
case FALCON_ID_MINION:
flcn = &g->minion_flcn;
break;
#endif
default:
nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id);
break;

View File

@@ -83,7 +83,9 @@ static int channel_setup_ramfc(struct nvgpu_channel *c,
static struct nvgpu_channel *allocate_channel(struct nvgpu_fifo *f)
{
struct nvgpu_channel *ch = NULL;
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
struct gk20a *g = f->g;
#endif
nvgpu_mutex_acquire(&f->free_chs_mutex);
if (!nvgpu_list_empty(&f->free_chs)) {
@@ -102,11 +104,13 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
}
nvgpu_mutex_release(&f->free_chs_mutex);
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
if ((g->aggressive_sync_destroy_thresh != 0U) &&
(f->used_channels >
g->aggressive_sync_destroy_thresh)) {
g->aggressive_sync_destroy = true;
}
#endif
return ch;
}
@@ -114,7 +118,9 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
static void free_channel(struct nvgpu_fifo *f,
struct nvgpu_channel *ch)
{
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
struct gk20a *g = f->g;
#endif
#ifdef CONFIG_NVGPU_TRACE
trace_gk20a_release_used_channel(ch->chid);
@@ -130,6 +136,7 @@ static void free_channel(struct nvgpu_fifo *f,
* On teardown it is not possible to dereference platform, but ignoring
* this is fine then because no new channels would be created.
*/
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
if ((g->aggressive_sync_destroy_thresh != 0U) &&
(f->used_channels <
@@ -137,6 +144,7 @@ static void free_channel(struct nvgpu_fifo *f,
g->aggressive_sync_destroy = false;
}
}
#endif
}
void nvgpu_channel_commit_va(struct nvgpu_channel *c)
@@ -1909,10 +1917,12 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
if (err != 0) {
nvgpu_err(g, "failed to preempt channel/TSG");
}
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
/* wait for channel update notifiers */
if (g->os_channel.work_completion_cancel_sync != NULL) {
g->os_channel.work_completion_cancel_sync(ch);
}
#endif
g->ops.channel.unbind(ch);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -27,11 +27,13 @@
void nvgpu_channel_worker_enqueue(struct nvgpu_channel *ch);
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
static inline struct nvgpu_channel_worker *
nvgpu_channel_worker_from_worker(struct nvgpu_worker *worker)
{
return (struct nvgpu_channel_worker *)
((uintptr_t)worker - offsetof(struct nvgpu_channel_worker, worker));
};
#endif
#endif /* NVGPU_COMMON_FIFO_CHANNEL_WORKER_H */

View File

@@ -376,10 +376,12 @@ int nvgpu_prepare_poweroff(struct gk20a *g)
}
#endif
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
/* Disable GPCPLL */
if (g->ops.clk.suspend_clk_support != NULL) {
g->ops.clk.suspend_clk_support(g);
}
#endif
#ifdef CONFIG_NVGPU_CLK_ARB
if (g->ops.clk_arb.stop_clk_arb_threads != NULL) {
g->ops.clk_arb.stop_clk_arb_threads(g);
@@ -742,7 +744,9 @@ static int nvgpu_early_init(struct gk20a *g)
* SOB after graphics power saving features (blcg/slcg) are
* enabled. For now, do it here.
*/
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
NVGPU_INIT_TABLE_ENTRY(g->ops.clk.init_clk_support, NO_FLAG),
#endif
#ifdef CONFIG_NVGPU_DGPU
NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_fbpa_ecc, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.fb.init_fbpa, NO_FLAG),
@@ -864,8 +868,10 @@ int nvgpu_finalize_poweron(struct gk20a *g)
NVGPU_INIT_TABLE_ENTRY(g->ops.acr.acr_init,
NVGPU_SEC_PRIVSECURITY),
NVGPU_INIT_TABLE_ENTRY(&nvgpu_sw_quiesce_init_support, NO_FLAG),
#ifdef CONFIG_NVGPU_NVLINK
NVGPU_INIT_TABLE_ENTRY(g->ops.nvlink.init,
NVGPU_SUPPORT_NVLINK),
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
NVGPU_INIT_TABLE_ENTRY(g->ops.ptimer.config_gr_tick_freq,
@@ -1064,8 +1070,12 @@ int nvgpu_init_gpu_characteristics(struct gk20a *g)
* (even if kernel-mode submits aren't enabled where full deterministic
* features matter).
*/
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
if (nvgpu_has_syncpoints(g) &&
g->aggressive_sync_destroy_thresh == 0U) {
#else
if (nvgpu_has_syncpoints(g)) {
#endif
nvgpu_set_enabled(g,
NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL,
true);
@@ -1147,9 +1157,11 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount)
g->ops.ecc.ecc_remove_support(g);
}
#ifdef CONFIG_NVGPU_NON_FUSA
if (g->remove_support != NULL) {
g->remove_support(g);
}
#endif
if (g->ops.ltc.ltc_remove_support != NULL) {
g->ops.ltc.ltc_remove_support(g);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -164,9 +164,11 @@ static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
nvgpu_vm_put(mm->gsp.vm);
}
#ifdef CONFIG_NVGPU_NON_FUSA
if (g->has_cde) {
nvgpu_vm_put(mm->cde.vm);
}
#endif
nvgpu_free_sysmem_flush(g);
@@ -250,6 +252,7 @@ static int nvgpu_init_hwpm(struct mm_gk20a *mm)
return 0;
}
#ifdef CONFIG_NVGPU_NON_FUSA
static int nvgpu_init_cde_vm(struct mm_gk20a *mm)
{
struct gk20a *g = gk20a_from_mm(mm);
@@ -270,6 +273,7 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm)
}
return 0;
}
#endif
static int nvgpu_init_ce_vm(struct mm_gk20a *mm)
{
@@ -454,12 +458,14 @@ static int nvgpu_init_mm_setup_vm(struct gk20a *g)
}
}
#ifdef CONFIG_NVGPU_NON_FUSA
if (g->has_cde) {
err = nvgpu_init_cde_vm(mm);
if (err != 0) {
return err;
}
}
#endif
err = nvgpu_init_ce_vm(mm);
if (err != 0) {

View File

@@ -209,10 +209,12 @@ int nvgpu_pmu_early_init(struct gk20a *g)
g->support_ls_pmu = false;
/* Disable LS PMU global checkers */
#ifdef CONFIG_NVGPU_NON_FUSA
g->can_elpg = false;
g->elpg_enabled = false;
g->aelpg_enabled = false;
g->elpg_ms_enabled = false;
#endif
nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, false);
nvgpu_set_enabled(g, NVGPU_ELPG_MS_ENABLED, false);
#ifdef CONFIG_NVGPU_DGPU

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -165,6 +165,7 @@ u32 ga100_get_litter_value(struct gk20a *g, int value)
case GPU_LIT_GPC_PRIV_STRIDE:
ret = proj_gpc_priv_stride_v();
break;
#ifdef CONFIG_NVGPU_DEBUGGER
case GPU_LIT_PERFMON_PMMGPCTPCA_DOMAIN_START:
ret = 2;
break;
@@ -186,6 +187,7 @@ u32 ga100_get_litter_value(struct gk20a *g, int value)
case GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_COUNT:
ret = 2;
break;
#endif
case GPU_LIT_MAX_RUNLISTS_SUPPORTED:
ret = 24U;
break;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -167,6 +167,7 @@ u32 ga10b_get_litter_value(struct gk20a *g, int value)
* The perfmon start, count for various chiplets are taken
* from the PM programming guide.
*/
#ifdef CONFIG_NVGPU_DEBUGGER
case GPU_LIT_PERFMON_PMMGPCTPCA_DOMAIN_START:
ret = 2;
break;
@@ -188,6 +189,7 @@ u32 ga10b_get_litter_value(struct gk20a *g, int value)
case GPU_LIT_PERFMON_PMMGPC_ROP_DOMAIN_COUNT:
ret = 2;
break;
#endif
case GPU_LIT_ROP_IN_GPC_BASE:
ret = proj_rop_in_gpc_base_v();
break;

View File

@@ -980,8 +980,8 @@ static const struct gops_runlist gv11b_ops_runlist = {
.get_max_channels_per_tsg = gv11b_runlist_get_max_channels_per_tsg,
};
static const struct gops_userd gv11b_ops_userd = {
#ifdef CONFIG_NVGPU_USERD
static const struct gops_userd gv11b_ops_userd = {
.setup_sw = nvgpu_userd_setup_sw,
.cleanup_sw = nvgpu_userd_cleanup_sw,
.init_mem = gk20a_userd_init_mem,
@@ -990,9 +990,9 @@ static const struct gops_userd gv11b_ops_userd = {
.gp_put = gv11b_userd_gp_put,
.pb_get = gv11b_userd_pb_get,
#endif
#endif /* CONFIG_NVGPU_USERD */
.entry_size = gk20a_userd_entry_size,
};
#endif /* CONFIG_NVGPU_USERD */
static const struct gops_channel gv11b_ops_channel = {
.alloc_inst = nvgpu_channel_alloc_inst,
@@ -1263,9 +1263,11 @@ static const struct gops_mc gv11b_ops_mc = {
.is_mmu_fault_pending = gv11b_mc_is_mmu_fault_pending,
};
#ifdef CONFIG_NVGPU_DEBUGGER
static const struct gops_debug gv11b_ops_debug = {
.show_dump = gk20a_debug_show_dump,
};
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
static const struct gops_debugger gv11b_ops_debugger = {
@@ -1524,7 +1526,9 @@ int gv11b_init_hal(struct gk20a *g)
gops->ramfc = gv11b_ops_ramfc;
gops->ramin = gv11b_ops_ramin;
gops->runlist = gv11b_ops_runlist;
#ifdef CONFIG_NVGPU_USERD
gops->userd = gv11b_ops_userd;
#endif
gops->channel = gv11b_ops_channel;
gops->tsg = gv11b_ops_tsg;
gops->usermode = gv11b_ops_usermode;
@@ -1542,8 +1546,8 @@ int gv11b_init_hal(struct gk20a *g)
gops->regops = gv11b_ops_regops;
#endif
gops->mc = gv11b_ops_mc;
gops->debug = gv11b_ops_debug;
#ifdef CONFIG_NVGPU_DEBUGGER
gops->debug = gv11b_ops_debug;
gops->debugger = gv11b_ops_debugger;
gops->perf = gv11b_ops_perf;
gops->perfbuf = gv11b_ops_perfbuf;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -167,6 +167,7 @@ u32 gv11b_get_litter_value(struct gk20a *g, int value)
case GPU_LIT_GPC_PRIV_STRIDE:
ret = proj_gpc_priv_stride_v();
break;
#ifdef CONFIG_NVGPU_DEBUGGER
case GPU_LIT_PERFMON_PMMGPCTPCA_DOMAIN_START:
ret = 2;
break;
@@ -188,6 +189,7 @@ u32 gv11b_get_litter_value(struct gk20a *g, int value)
case GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_COUNT:
ret = 2;
break;
#endif
default:
nvgpu_err(g, "Missing definition %d", value);
BUG();

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -170,6 +170,7 @@ u32 tu104_get_litter_value(struct gk20a *g, int value)
case GPU_LIT_GPC_PRIV_STRIDE:
ret = proj_gpc_priv_stride_v();
break;
#ifdef CONFIG_NVGPU_DEBUGGER
case GPU_LIT_PERFMON_PMMGPCTPCA_DOMAIN_START:
ret = 2;
break;
@@ -191,6 +192,7 @@ u32 tu104_get_litter_value(struct gk20a *g, int value)
case GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_COUNT:
ret = 2;
break;
#endif
default:
nvgpu_err(g, "Missing definition %d", value);
BUG();

View File

@@ -1,7 +1,7 @@
/*
* GM20B Master Control
*
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -144,9 +144,11 @@ static u32 gm20b_mc_unit_reset_mask(struct gk20a *g, u32 unit)
mask = mc_enable_pwr_enabled_f();
break;
#endif
#ifdef CONFIG_NVGPU_NVLINK
case NVGPU_UNIT_NVLINK:
mask = BIT32(g->nvlink.ioctrl_table[0].reset_enum);
break;
#endif
case NVGPU_UNIT_CE2:
mask = mc_enable_ce2_enabled_f();
break;

View File

@@ -74,39 +74,51 @@ struct nvgpu_fifo;
struct nvgpu_channel;
struct nvgpu_gr;
struct nvgpu_fbp;
#ifdef CONFIG_NVGPU_SIM
struct sim_nvgpu;
#endif
#ifdef CONFIG_NVGPU_DGPU
struct nvgpu_ce_app;
#endif
#ifdef CONFIG_NVGPU_FECS_TRACE
struct gk20a_ctxsw_trace;
#endif
#ifdef CONFIG_NVGPU_TRACK_MEM_USAGE
struct nvgpu_mem_alloc_tracker;
#endif
struct nvgpu_profiler_object;
#ifdef CONFIG_NVGPU_DEBUGGER
struct dbg_profiler_object_data;
struct nvgpu_debug_context;
#endif
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
struct nvgpu_clk_pll_debug_data;
#endif
struct nvgpu_nvhost_dev;
struct nvgpu_netlist_vars;
struct netlist_av64_list;
#ifdef CONFIG_NVGPU_FECS_TRACE
struct nvgpu_gr_fecs_trace;
#endif
struct nvgpu_cpu_time_correlation_sample;
#ifdef CONFIG_NVGPU_CLK_ARB
struct nvgpu_clk_arb;
#endif
struct nvgpu_setup_bind_args;
struct boardobjgrp;
struct boardobjgrp_pmu_cmd;
struct boardobjgrpmask;
struct nvgpu_sgt;
struct nvgpu_channel_hw_state;
struct nvgpu_mem;
#ifdef CONFIG_NVGPU_CYCLESTATS
struct gk20a_cs_snapshot_client;
struct gk20a_cs_snapshot;
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
struct dbg_session_gk20a;
struct nvgpu_dbg_reg_op;
struct gk20a_cs_snapshot;
#endif
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
struct _resmgr_context;
struct nvgpu_gpfifo_entry;
struct vm_gk20a_mapping_batch;
struct pmu_pg_stats_data;
#endif
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
struct clk_domains_mon_status_params;
#endif
struct nvgpu_cic_mon;
struct nvgpu_cic_rm;
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
@@ -116,9 +128,9 @@ struct nvgpu_gsp_sched;
struct nvgpu_gsp_test;
#endif
enum nvgpu_flush_op;
enum gk20a_mem_rw_flag;
#ifdef CONFIG_NVGPU_DGPU
enum nvgpu_nvlink_minion_dlcmd;
#endif
enum nvgpu_profiler_pm_resource_type;
enum nvgpu_profiler_pm_reservation_scope;
@@ -134,8 +146,10 @@ enum nvgpu_profiler_pm_reservation_scope;
#include <nvgpu/atomic.h>
#include <nvgpu/barrier.h>
#include <nvgpu/rwsem.h>
#ifdef CONFIG_NVGPU_DGPU
#include <nvgpu/nvlink.h>
#include <nvgpu/nvlink_link_mode_transitions.h>
#endif
#include <nvgpu/ecc.h>
#include <nvgpu/channel.h>
#include <nvgpu/tsg.h>
@@ -260,7 +274,7 @@ struct railgate_stats {
#define GPU_LIT_DMA_COPY_CLASS 36
/** Gpc priv stride. */
#define GPU_LIT_GPC_PRIV_STRIDE 37
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_DEBUGGER
#define GPU_LIT_PERFMON_PMMGPCTPCA_DOMAIN_START 38
#define GPU_LIT_PERFMON_PMMGPCTPCB_DOMAIN_START 39
#define GPU_LIT_PERFMON_PMMGPCTPC_DOMAIN_COUNT 40
@@ -268,6 +282,7 @@ struct railgate_stats {
#define GPU_LIT_PERFMON_PMMFBP_LTC_DOMAIN_COUNT 42
#define GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_START 43
#define GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_COUNT 44
#endif
#define GPU_LIT_SM_UNIQUE_BASE 45
#define GPU_LIT_SM_SHARED_BASE 46
#define GPU_LIT_GPC_ADDR_WIDTH 47
@@ -282,8 +297,6 @@ struct railgate_stats {
#define GPU_LIT_PERFMON_PMMGPC_ROP_DOMAIN_START 56
#define GPU_LIT_PERFMON_PMMGPC_ROP_DOMAIN_COUNT 57
/** @endcond */
/** Macro to get litter values corresponding to the litter defines. */
#define nvgpu_get_litter_value(g, v) ((g)->ops.get_litter_value((g), v))
@@ -309,11 +322,14 @@ struct railgate_stats {
#endif
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
struct nvgpu_gpfifo_userdata {
struct nvgpu_gpfifo_entry nvgpu_user *entries;
struct _resmgr_context *context;
};
#endif
#ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
enum nvgpu_event_id_type {
NVGPU_EVENT_ID_BPT_INT = 0,
NVGPU_EVENT_ID_BPT_PAUSE = 1,
@@ -323,7 +339,7 @@ enum nvgpu_event_id_type {
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN = 5,
NVGPU_EVENT_ID_MAX = 6,
};
/** @endcond */
#endif
/**
* @brief HW version info read from the HW.
@@ -397,10 +413,10 @@ struct gk20a {
*/
unsigned long *enabled_flags;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_NON_FUSA
/** Used by Linux module to keep track of driver usage */
nvgpu_atomic_t usage_count;
/** @endcond */
#endif
/** Used by common.init unit to track users of the driver */
struct nvgpu_ref refcount;
@@ -420,9 +436,9 @@ struct gk20a {
#ifdef CONFIG_PM
bool suspended;
#endif
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_NON_FUSA
bool sw_ready;
/** @endcond */
#endif
/** Flag to indicate that quiesce framework is initialized. */
bool sw_quiesce_init_done;
@@ -443,11 +459,13 @@ struct gk20a {
/** Controls which messages are logged */
u64 log_mask;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_NON_FUSA
u32 log_trace;
#endif
#ifdef CONFIG_NVGPU_STATIC_POWERGATE
struct nvgpu_mutex static_pg_lock;
/** @endcond */
#endif
/** Stored HW version info */
struct nvgpu_gpu_params params;
@@ -471,17 +489,17 @@ struct gk20a {
struct nvgpu_falcon fecs_flcn;
/** Struct holding the gpccs falcon software state. */
struct nvgpu_falcon gpccs_flcn;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_DGPU
struct nvgpu_falcon nvdec_flcn;
struct nvgpu_falcon minion_flcn;
struct nvgpu_falcon gsp_flcn;
struct clk_gk20a clk;
/** @endcond */
#endif
struct nvgpu_falcon gsp_flcn;
/** Top level struct maintaining fifo unit's software state. */
struct nvgpu_fifo fifo;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_DGPU
struct nvgpu_nvlink_dev nvlink;
/** @endcond */
#endif
/** Pointer to struct maintaining multiple GR instance's software state. */
struct nvgpu_gr *gr;
u32 num_gr_instances;
@@ -506,10 +524,10 @@ struct gk20a {
#endif
/** Top level struct maintaining ECC unit's software state. */
struct nvgpu_ecc ecc;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_DGPU
struct pmgr_pmupstate *pmgr_pmu;
struct nvgpu_sec2 sec2;
/** @endcond */
#endif
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
struct nvgpu_sched_ctrl sched_ctrl;
#endif
@@ -522,33 +540,37 @@ struct gk20a {
/** User disabled timeouts */
bool timeouts_disabled_by_user;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_CHANNEL_WDT
unsigned int ch_wdt_init_limit_ms;
/** @endcond */
u32 ctxsw_wdt_period_us;
#endif
/**
* Timeout after which ctxsw timeout interrupt (if enabled by s/w) will
* be triggered by h/w if context fails to context switch.
*/
u32 ctxsw_timeout_period_ms;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
u32 ctxsw_wdt_period_us;
#ifdef CONFIG_NVGPU_NON_FUSA
struct nvgpu_mutex power_lock;
/** @endcond */
#endif
/** Lock to protect accessing \a power_on_state. */
struct nvgpu_spinlock power_spinlock;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
/** Channel priorities */
u32 tsg_timeslice_low_priority_us;
u32 tsg_timeslice_medium_priority_us;
u32 tsg_timeslice_high_priority_us;
u32 tsg_timeslice_min_us;
u32 tsg_timeslice_max_us;
#endif
u32 tsg_dbg_timeslice_max_us;
/**
* Flag to indicate if runlist interleaving is supported or not. Set to
* true for safety.
*/
bool runlist_interleave;
/** @endcond */
/** Lock serializing CG an PG programming for various units */
struct nvgpu_mutex cg_pg_lock;
@@ -558,25 +580,31 @@ struct gk20a {
bool blcg_enabled;
/** ELCG setting read from the platform data */
bool elcg_enabled;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_LS_PMU
bool elpg_enabled;
bool elpg_ms_enabled;
bool aelpg_enabled;
bool can_elpg;
#endif
#ifdef CONFIG_NVGPU_NON_FUSA
bool mscg_enabled;
bool forced_idle;
bool forced_reset;
#endif
/** Allow priv register access to all. */
bool allow_all;
/** @endcond */
/** Ptimer source frequency. */
u32 ptimer_src_freq;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_NON_FUSA
int railgate_delay;
u8 ldiv_slowdown_factor;
#endif
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
unsigned int aggressive_sync_destroy_thresh;
bool aggressive_sync_destroy;
/** @endcond */
#endif
/** Is LS PMU supported? */
bool support_ls_pmu;
@@ -584,11 +612,12 @@ struct gk20a {
/** Is this a virtual GPU? */
bool is_virtual;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_NON_FUSA
/* Whether cde engine is supported or not. */
bool has_cde;
u32 emc3d_ratio;
/** @endcond */
#endif
/**
* A group of semaphore pools. One for each channel.
@@ -637,53 +666,49 @@ struct gk20a {
struct gk20a_cs_snapshot *cs_data;
#endif
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_NON_FUSA
/* Called after all references to driver are gone. Unused in safety */
void (*remove_support)(struct gk20a *g);
#endif
#ifdef CONFIG_NVGPU_POWER_PG
u64 pg_ingating_time_us;
u64 pg_ungating_time_us;
u32 pg_gating_cnt;
u32 pg_ms_gating_cnt;
/** @endcond */
#endif
/** GPU address-space identifier. */
struct gk20a_as as;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
struct nvgpu_mutex client_lock;
int client_refcount; /* open channels and ctrl nodes */
/** @endcond */
/** The HAL function pointers */
struct gpu_ops ops;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_LS_PMU
/*used for change of enum zbc update cmd id from ver 0 to ver1*/
u8 pmu_ver_cmd_id_zbc_table_update;
/** @endcond */
#endif
/** Top level struct managing interrupt handling. */
struct nvgpu_mc mc;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_COMPRESSION
/*
* The deductible memory size for max_comptag_mem (in MBytes)
* Usually close to memory size that running system is taking
*/
u32 comptag_mem_deduct;
#ifdef CONFIG_NVGPU_COMPRESSION
u32 max_comptag_mem; /* max memory size (MB) for comptag */
struct nvgpu_cbc *cbc;
#endif
#ifdef CONFIG_NVGPU_NON_FUSA
u32 ltc_streamid;
struct nvgpu_cbc *cbc;
#endif
/** ltc unit's meta data handle. */
struct nvgpu_ltc *ltc;
/** @endcond */
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
struct nvgpu_channel_worker {
struct nvgpu_worker worker;
@@ -692,19 +717,21 @@ struct gk20a {
struct nvgpu_timeout timeout;
#endif
} channel_worker;
#endif
#ifdef CONFIG_NVGPU_CLK_ARB
struct nvgpu_clk_arb_worker {
struct nvgpu_worker worker;
} clk_arb_worker;
/** @endcond */
#endif
struct {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
void (*open)(struct nvgpu_channel *ch);
/** @endcond */
#endif
/** Os specific callback called at channel closure. */
void (*close)(struct nvgpu_channel *ch, bool force);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
void (*work_completion_signal)(struct nvgpu_channel *ch);
void (*work_completion_cancel_sync)(struct nvgpu_channel *ch);
bool (*os_fence_framework_inst_exists)(struct nvgpu_channel *ch);
@@ -716,7 +743,7 @@ struct gk20a {
int (*copy_user_gpfifo)(struct nvgpu_gpfifo_entry *dest,
struct nvgpu_gpfifo_userdata userdata,
u32 start, u32 length);
/** @endcond */
#endif
/** Os specific callback to allocate usermode buffers. */
int (*alloc_usermode_buffers)(struct nvgpu_channel *c,
struct nvgpu_setup_bind_args *args);
@@ -724,13 +751,11 @@ struct gk20a {
void (*free_usermode_buffers)(struct nvgpu_channel *c);
} os_channel;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_NON_FUSA
/* Used by Linux OS Layer */
struct gk20a_scale_profile *scale_profile;
unsigned long last_freq;
/** @endcond */
#ifdef CONFIG_NVGPU_NON_FUSA
u32 tpc_fs_mask_user;
u32 fecs_feature_override_ecc_val;
@@ -758,39 +783,39 @@ struct gk20a {
u32 valid_tpc_pg_mask[MAX_PG_TPC_CONFIGS];
u32 valid_gpc_fbp_pg_mask[MAX_PG_GPC_FBP_CONFIGS];
#endif
#ifdef CONFIG_NVGPU_DGPU
struct nvgpu_bios *bios;
bool bios_is_init;
#endif
#ifdef CONFIG_NVGPU_CLK_ARB
struct nvgpu_clk_arb *clk_arb;
struct nvgpu_mutex clk_arb_enable_lock;
nvgpu_atomic_t clk_arb_global_nr;
#endif
#ifdef CONFIG_NVGPU_DGPU
struct nvgpu_ce_app *ce_app;
#endif
#ifdef CONFIG_NVGPU_NON_FUSA
/** Flag to control enabling/disabling of illegal compstat intr. */
bool ltc_intr_en_illegal_compstat;
#endif
/** @endcond */
/** Are we currently running on a FUSA device configuration? */
bool is_fusa_sku;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
u16 pci_class;
#ifdef CONFIG_NVGPU_DGPU
/* PCI device identifier */
u16 pci_vendor_id, pci_device_id;
u16 pci_subsystem_vendor_id, pci_subsystem_device_id;
u16 pci_class;
u8 pci_revision;
/**
* The per-device identifier. The iGPUs without a PDI will use
* the SoC PDI if one exists. Zero if neither exists.
*/
u64 per_device_identifier;
/*
* PCI power management: i2c device index, port and address for
* INA3221.
@@ -803,9 +828,13 @@ struct gk20a {
/* PCIe power states. */
bool xve_l0s;
bool xve_l1;
#endif
/* Current warning temp in sfxp24.8 */
s32 curr_warn_temp;
/**
* The per-device identifier. The iGPUs without a PDI will use
* the SoC PDI if one exists. Zero if neither exists.
*/
u64 per_device_identifier;
#if defined(CONFIG_PCI_MSI)
/* Check if msi is enabled */
@@ -816,11 +845,9 @@ struct gk20a {
struct nvgpu_mem_alloc_tracker *kmallocs;
#endif
/* memory training sequence and mclk switch scripts */
u32 mem_config_idx;
#ifdef CONFIG_NVGPU_NON_FUSA
u64 dma_memory_used;
/** @endcond */
#endif
#if defined(CONFIG_TEGRA_GK20A_NVHOST)
/** Full syncpoint aperture base memory address. */
@@ -833,14 +860,14 @@ struct gk20a {
/** Full syncpoint aperture. */
struct nvgpu_mem syncpt_mem;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_LS_PMU
struct nvgpu_list_node boardobj_head;
struct nvgpu_list_node boardobjgrp_head;
struct nvgpu_mem pdb_cache_errata_mem;
/** @endcond */
#endif
#ifdef CONFIG_NVGPU_DGPU
struct nvgpu_mem pdb_cache_errata_mem;
u16 dgpu_max_clk;
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,9 @@
struct gk20a;
struct namemap_cfg;
struct clk_gk20a;
#ifdef CONFIG_NVGPU_CLK_ARB
struct nvgpu_clk_pll_debug_data;
#endif
/**
* @brief clk gops.
@@ -36,7 +39,7 @@ struct clk_gk20a;
* func pointers.
*/
struct gops_clk {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_CLK_ARB
int (*init_debugfs)(struct gk20a *g);
int (*init_clk_support)(struct gk20a *g);
void (*suspend_clk_support)(struct gk20a *g);
@@ -58,7 +61,7 @@ struct gops_clk {
u32 (*get_ref_clock_rate)(struct gk20a *g);
int (*predict_mv_at_hz_cur_tfloor)(struct clk_gk20a *clk,
unsigned long rate);
/** @endcond */
#endif
/**
* @brief Get max rate of gpu clock.
*
@@ -72,7 +75,7 @@ struct gops_clk {
* @return 0 in case of failure and > 0 in case of success
*/
unsigned long (*get_maxrate)(struct gk20a *g, u32 api_domain);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_CLK_ARB
int (*prepare_enable)(struct clk_gk20a *clk);
void (*disable_unprepare)(struct clk_gk20a *clk);
int (*get_voltage)(struct clk_gk20a *clk, u64 *val);
@@ -92,7 +95,7 @@ struct gops_clk {
int (*perf_pmu_vfe_load)(struct gk20a *g);
bool support_vf_point;
u8 lut_num_entries;
/** @endcond */
#endif
};
struct gops_clk_mon {

View File

@@ -74,6 +74,7 @@ struct ctxsw_buf_offset_map_entry;
enum ctxsw_addr_type;
enum nvgpu_event_id_type;
#endif
struct netlist_av64_list;
/**
* This structure stores the GR engine ecc subunit hal pointers.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,6 +22,7 @@
#ifndef NVGPU_GOPS_NVLINK_H
#define NVGPU_GOPS_NVLINK_H
#ifdef CONFIG_NVGPU_DGPU
/* API */
struct gops_nvlink_link_mode_transitions {
int (*setup_pll)(struct gk20a *g,
@@ -90,5 +91,6 @@ struct gops_nvlink {
struct gops_nvlink_minion minion;
struct gops_nvlink_intr intr;
};
#endif
#endif /* NVGPU_GOPS_NVLINK_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -214,7 +214,9 @@ struct gpu_ops {
void (**fn)(struct gk20a *g, struct nvgpu_mem *mem));
struct gops_pmu_perf pmu_perf;
struct gops_debug debug;
#ifdef CONFIG_NVGPU_DGPU
struct gops_nvlink nvlink;
#endif
struct gops_sec2 sec2;
struct gops_gsp gsp;
/** @endcond */

View File

@@ -769,6 +769,11 @@ static inline s32 nvgpu_safe_cast_s64_to_s32(s64 sl_a)
*
* @param v [in] Value to determine precision for.
*
* Returns number of 1-bits (set bits). Compiler intrinsics are used for this
* purpose. __builtin_popcount for unsigned int, __builtin_popcountl for
* unsigned long and __builtin_popcountll for unsigned long long data type is
* used.
*
* @return s32 representation of the precision in bits of the value passed in.
*/
#define NVGPU_PRECISION(v) _Generic(v, \

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -88,7 +88,6 @@ static void nvgpu_init_vars(struct gk20a *g)
nvgpu_mutex_init(&platform->railgate_lock);
nvgpu_mutex_init(&g->dbg_sessions_lock);
nvgpu_mutex_init(&g->client_lock);
nvgpu_mutex_init(&g->power_lock);
nvgpu_mutex_init(&g->static_pg_lock);
nvgpu_mutex_init(&g->clk_arb_enable_lock);

View File

@@ -1,7 +1,7 @@
/*
* Virtualized GPU for Linux
*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -145,7 +145,6 @@ static int vgpu_init_support(struct platform_device *pdev)
}
nvgpu_mutex_init(&g->dbg_sessions_lock);
nvgpu_mutex_init(&g->client_lock);
#if defined(CONFIG_NVGPU_CYCLESTATS)
nvgpu_mutex_init(&g->cs_lock);
#endif

View File

@@ -132,14 +132,22 @@ int gk20a_busy(struct gk20a *g)
return -ENODEV;
}
#endif
#ifdef CONFIG_NVGPU_NON_FUSA
nvgpu_atomic_inc(&g->usage_count);
#else
(void)g;
#endif
return 0;
}
void gk20a_idle(struct gk20a *g)
{
#ifdef CONFIG_NVGPU_NON_FUSA
nvgpu_atomic_dec(&g->usage_count);
#else
(void)g;
#endif
}
static void nvgpu_posix_load_regs(struct gk20a *g)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -340,7 +340,9 @@ static int prepare_gr_hw_sw(struct unit_module *m, struct gk20a *g)
err = nvgpu_gr_enable_hw(g);
if (err != 0) {
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_release(&g->static_pg_lock);
#endif
unit_return_fail(m, "failed to enable gr");
}
@@ -399,7 +401,9 @@ int test_acr_bootstrap_hs_acr(struct unit_module *m,
return -ENOMEM;
}
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_acquire(&g->static_pg_lock);
#endif
/*
* Prepare HW and SW setup needed
@@ -598,7 +602,9 @@ int test_acr_bootstrap_hs_acr(struct unit_module *m,
as expected\n");
}
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_release(&g->static_pg_lock);
#endif
return UNIT_SUCCESS;
}
@@ -619,7 +625,9 @@ int test_acr_construct_execute(struct unit_module *m,
unit_return_fail(m, "Test env init failed\n");
}
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_acquire(&g->static_pg_lock);
#endif
/*
* Prepare HW and SW setup needed for the test
@@ -693,7 +701,9 @@ int test_acr_construct_execute(struct unit_module *m,
unit_return_fail(m, "Bootstrap HS ACR didn't failed as \
expected\n");
}
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_release(&g->static_pg_lock);
#endif
return UNIT_SUCCESS;
}
@@ -712,7 +722,9 @@ int test_acr_is_lsf_lazy_bootstrap(struct unit_module *m,
}
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_acquire(&g->static_pg_lock);
#endif
/*
* Prepare HW and SW setup needed for the test
@@ -762,7 +774,9 @@ int test_acr_is_lsf_lazy_bootstrap(struct unit_module *m,
expected\n");
}
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_release(&g->static_pg_lock);
#endif
return UNIT_SUCCESS;
}
@@ -782,7 +796,9 @@ int test_acr_prepare_ucode_blob(struct unit_module *m,
unit_return_fail(m, "Test env init failed\n");
}
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_acquire(&g->static_pg_lock);
#endif
/*
* Prepare HW and SW setup needed for the test
@@ -882,7 +898,9 @@ int test_acr_prepare_ucode_blob(struct unit_module *m,
unit_return_fail(m, "prepare_ucode_blob test failed\n");
}
#ifdef CONFIG_NVGPU_TPC_POWERGATE
nvgpu_mutex_release(&g->static_pg_lock);
#endif
return UNIT_SUCCESS;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -381,6 +381,7 @@ int test_falcon_sw_init_free(struct unit_module *m, struct gk20a *g,
unit_return_fail(m, "FECS falcon sw not initialized\n");
}
#ifdef CONFIG_NVGPU_DGPU
err = verify_valid_falcon_sw_init(m, g, FALCON_ID_GSPLITE);
if (err != 0) {
unit_return_fail(m, "GSPLITE falcon sw not initialized\n");
@@ -400,6 +401,7 @@ int test_falcon_sw_init_free(struct unit_module *m, struct gk20a *g,
if (err != 0) {
unit_return_fail(m, "MINION falcon sw not initialized\n");
}
#endif
return UNIT_SUCCESS;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -158,12 +158,20 @@ done:
#define F_CHANNEL_OPEN_ALLOC_CH_FAIL BIT(2)
#define F_CHANNEL_OPEN_ALLOC_CH_WARN0 BIT(3)
#define F_CHANNEL_OPEN_ALLOC_CH_WARN1 BIT(4)
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
#define F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE BIT(5)
#define F_CHANNEL_OPEN_BUG_ON BIT(6)
#define F_CHANNEL_OPEN_ALLOC_INST_FAIL BIT(7)
#define F_CHANNEL_OPEN_NOTIFIER_WQ_INIT_FAIL BIT(8)
#define F_CHANNEL_OPEN_SEMAPHORE_WQ_INIT_FAIL BIT(9)
#define F_CHANNEL_OPEN_LAST BIT(10)
#else
#define F_CHANNEL_OPEN_BUG_ON BIT(5)
#define F_CHANNEL_OPEN_ALLOC_INST_FAIL BIT(6)
#define F_CHANNEL_OPEN_NOTIFIER_WQ_INIT_FAIL BIT(7)
#define F_CHANNEL_OPEN_SEMAPHORE_WQ_INIT_FAIL BIT(8)
#define F_CHANNEL_OPEN_LAST BIT(9)
#endif
static const char *f_channel_open[] = {
@@ -278,8 +286,10 @@ int test_channel_open(struct unit_module *m, struct gk20a *g, void *vargs)
u32 runlist_id;
bool privileged;
int err;
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
void (*os_channel_open)(struct nvgpu_channel *ch) =
g->os_channel.open;
#endif
l_cond_fi = nvgpu_cond_get_fault_injection();
@@ -320,10 +330,12 @@ int test_channel_open(struct unit_module *m, struct gk20a *g, void *vargs)
next_ch->referenceable = false;
}
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
if (branches & F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE) {
g->aggressive_sync_destroy_thresh += 1U;
f->used_channels += 2U;
}
#endif
if (branches & F_CHANNEL_OPEN_NOTIFIER_WQ_INIT_FAIL) {
nvgpu_posix_enable_fault_injection(l_cond_fi, true, 0);
@@ -361,12 +373,14 @@ int test_channel_open(struct unit_module *m, struct gk20a *g, void *vargs)
next_ch->referenceable = true;
}
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
if (branches & F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE) {
g->aggressive_sync_destroy_thresh -= 1U;
f->used_channels -= 2U;
unit_assert(g->aggressive_sync_destroy, goto done);
g->aggressive_sync_destroy = false;
}
#endif
if (branches & fail) {
nvgpu_posix_enable_fault_injection(l_cond_fi, false, 0);
@@ -402,7 +416,9 @@ done:
nvgpu_channel_close(ch);
}
g->ops = gops;
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
g->os_channel.open = os_channel_open;
#endif
return ret;
}
@@ -537,11 +553,13 @@ int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs)
g->os_channel.close = branches & F_CHANNEL_CLOSE_OS_CLOSE ?
stub_os_channel_close : NULL;
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
g->aggressive_sync_destroy_thresh =
branches & F_CHANNEL_CLOSE_NONZERO_DESTROY_THRESH_64 ?
64U :
(branches & F_CHANNEL_CLOSE_NONZERO_DESTROY_THRESH_1) ?
1U : 0U;
#endif
if (branches & F_CHANNEL_CLOSE_TSG_BOUND) {
err = nvgpu_tsg_bind_channel(tsg, ch);
@@ -1508,8 +1526,12 @@ done:
#define F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH BIT(0)
#define F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID BIT(1)
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
#define F_CHANNEL_SUSPEND_RESUME_CH_WRK_CMPL_CNCL_SYNC BIT(2)
#define F_CHANNEL_SUSPEND_RESUME_CHS_LAST BIT(3)
#else
#define F_CHANNEL_SUSPEND_RESUME_CHS_LAST BIT(2)
#endif
static const char *f_channel_suspend_resume[] = {
"suspend_resume_unserviceable_channels",
@@ -1536,10 +1558,12 @@ static int stub_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
return 0;
}
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
static void stub_channel_work_completion_cancel_sync(struct nvgpu_channel *ch)
{
}
#endif
int test_channel_suspend_resume_serviceable_chs(struct unit_module *m,
struct gk20a *g, void *vargs)
@@ -1551,9 +1575,14 @@ int test_channel_suspend_resume_serviceable_chs(struct unit_module *m,
bool err;
u32 orig_ch_tsgid;
u32 branches = 0U;
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
u32 prune = F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH |
F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID |
F_CHANNEL_SUSPEND_RESUME_CH_WRK_CMPL_CNCL_SYNC;
#else
u32 prune = F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH |
F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID;
#endif
int ret = UNIT_FAIL;
tsg = nvgpu_tsg_open(g, getpid());
@@ -1588,10 +1617,11 @@ int test_channel_suspend_resume_serviceable_chs(struct unit_module *m,
} else {
ch->unserviceable = false;
}
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
g->os_channel.work_completion_cancel_sync = branches &
F_CHANNEL_SUSPEND_RESUME_CH_WRK_CMPL_CNCL_SYNC ?
stub_channel_work_completion_cancel_sync : NULL;
#endif
ch->tsgid = branches & F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID ?
NVGPU_INVALID_TSG_ID : orig_ch_tsgid;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -185,6 +185,7 @@ int test_get_litter_value(struct unit_module *m,
VOLTA_DMA_COPY_A);
assert(gv11b_get_litter_value(g, GPU_LIT_GPC_PRIV_STRIDE) ==
proj_gpc_priv_stride_v());
#ifdef CONFIG_NVGPU_DEBUGGER
assert(gv11b_get_litter_value(g, GPU_LIT_PERFMON_PMMGPCTPCA_DOMAIN_START) ==
2);
assert(gv11b_get_litter_value(g, GPU_LIT_PERFMON_PMMGPCTPCB_DOMAIN_START) ==
@@ -199,6 +200,7 @@ int test_get_litter_value(struct unit_module *m,
3);
assert(gv11b_get_litter_value(g, GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_COUNT) ==
2);
#endif
if (!EXPECT_BUG(gv11b_get_litter_value(g, U32_MAX))) {
unit_err(m, "%s: failed to detect INVALID value\n",
@@ -291,7 +293,9 @@ int test_get_put(struct unit_module *m,
nvgpu_ref_init(&g->refcount);
/* to cover the cases where these are set */
#ifdef CONFIG_NVGPU_NON_FUSA
g->remove_support = no_return;
#endif
g->gfree = no_return;
g->ops.ecc.ecc_remove_support = no_return;
g->ops.ltc.ltc_remove_support = no_return;
@@ -396,8 +400,12 @@ static void set_poweron_funcs_success(struct gk20a *g)
/* these are the simple case of just taking a g param */
setup_simple_init_func_success(&g->ops.ecc.ecc_init_support, i++);
setup_simple_init_func_success(&g->ops.mm.pd_cache_init, i++);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
setup_simple_init_func_success(&g->ops.clk.init_clk_support, i++);
#endif
#ifdef CONFIG_NVGPU_NVLINK
setup_simple_init_func_success(&g->ops.nvlink.init, i++);
#endif
setup_simple_init_func_success(&g->ops.fifo.reset_enable_hw, i++);
setup_simple_init_func_success(&g->ops.ltc.init_ltc_support, i++);
setup_simple_init_func_success(&g->ops.mm.init_mm_support, i++);
@@ -497,7 +505,9 @@ int test_poweron_branches(struct unit_module *m, struct gk20a *g, void *args)
set_poweron_funcs_success(g);
/* hit all the NULL pointer checks */
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
g->ops.clk.init_clk_support = NULL;
#endif
g->ops.therm.elcg_init_idle_filters = NULL;
g->ops.ecc.ecc_init_support = NULL;
g->ops.channel.resume_all_serviceable_ch = NULL;
@@ -560,7 +570,9 @@ int test_poweroff(struct unit_module *m, struct gk20a *g, void *args)
setup_simple_init_func_success(&g->ops.fifo.fifo_suspend, i++);
simple_init_func_ptrs_count = i;
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
g->ops.clk.suspend_clk_support = no_return;
#endif
g->ops.mc.intr_mask = no_return;
g->ops.falcon.falcon_sw_free = no_return_u32_param;
@@ -583,7 +595,9 @@ int test_poweroff(struct unit_module *m, struct gk20a *g, void *args)
/* Cover branches for NULL ptr checks */
g->ops.mc.intr_mask = NULL;
g->ops.channel.suspend_all_serviceable_ch = NULL;
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
g->ops.clk.suspend_clk_support = NULL;
#endif
err = nvgpu_prepare_poweroff(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_prepare_poweroff returned fail\n");

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -39,11 +39,7 @@ struct unit_module;
*
* Test Type: Feature
*
<<<<<<< HEAD
* Targets: gops_mm.gops_mm_gmmu.get_default_big_page_size,
=======
* Targets: gops_mm_gmmu.get_default_big_page_size,
>>>>>>> 2769ccf4e... gpu: nvgpu: userspace: update "Targets" field for mm
* nvgpu_gmmu_default_big_page_size
*
* Input: None

View File

@@ -668,7 +668,9 @@ int test_handle_mmu_fault_common(struct unit_module *m,
g->ops.channel.free_inst = nvgpu_channel_free_inst;
g->ops.tsg.disable = nvgpu_tsg_disable;
g->ops.fifo.preempt_tsg = nvgpu_fifo_preempt_tsg;
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
g->aggressive_sync_destroy_thresh = 0U;
#endif
g->fifo.g = g;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -258,32 +258,36 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 11,
-ENOMEM, 11);
#ifdef CONFIG_NVGPU_NON_FUSA
/* Disable for now. */
/* Making nvgpu_init_cde_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 25,
-ENOMEM, 12);
//errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 80,
// -ENOMEM, 12);
#endif
/* Making nvgpu_init_ce_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 33,
-ENOMEM, 12);
/* Making nvgpu_init_mmu_debug fail on wr_mem DMA alloc */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 13,
-ENOMEM, 13);
/* Making nvgpu_init_mmu_debug fail on wr_mem DMA alloc */
/* Making nvgpu_init_mmu_debug fail on rd_mem DMA alloc */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 14,
-ENOMEM, 14);
/* Making nvgpu_init_mmu_debug fail on rd_mem DMA alloc */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 15,
-ENOMEM, 15);
/* Making g->ops.mm.mmu_fault.setup_sw fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 16);
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 0,
ARBITRARY_ERROR, 15);
/* Making g->ops.fb.fb_ecc_init fail */
g->ops.fb.ecc.init = int_empty_hal;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 2,
ARBITRARY_ERROR, 17);
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 16);
g->ops.fb.ecc.init = NULL;
#ifdef CONFIG_NVGPU_NON_FUSA
/*
* Extra cases for branch coverage: change support flags to test
* other branches
@@ -300,6 +304,7 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, true);
nvgpu_set_errata(g, NVGPU_ERRATA_MM_FORCE_128K_PMU_VM, true);
g->has_cde = true;
#endif
/*
* Extra cases for branch coverage: remove some HALs to test branches
@@ -452,7 +457,9 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args)
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
p->mm_is_iommuable = true;
#ifdef CONFIG_NVGPU_NON_FUSA
g->has_cde = true;
#endif
g->ops.mc.intr_stall_unit_config = mc_gp10b_intr_stall_unit_config;
g->ops.mc.intr_nonstall_unit_config =
@@ -594,6 +601,7 @@ int test_mm_remove_mm_support(struct unit_module *m, struct gk20a *g,
/* Reset this to NULL to avoid trying to destroy the mutex again */
g->ops.mm.mmu_fault.info_mem_destroy = NULL;
#ifdef CONFIG_NVGPU_NON_FUSA
/* Extra cases for branch coverage */
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, false);
@@ -604,6 +612,7 @@ int test_mm_remove_mm_support(struct unit_module *m, struct gk20a *g,
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, true);
g->has_cde = true;
#endif
return UNIT_SUCCESS;
}