gpu: nvgpu: rename struct fifo_gk20a

Rename
struct fifo_gk20a -> nvgpu_fifo

JIRA NVGPU-2012

Change-Id: Ifb5854592c88894ecd830da092ada27c7f05380d
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2109625
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-05-01 13:50:12 -07:00
committed by mobile promotions
parent fa59958e8a
commit cfb4ff0bfb
46 changed files with 186 additions and 186 deletions

View File

@@ -56,7 +56,7 @@
#include <nvgpu/fence.h>
#include <nvgpu/preempt.h>
static void free_channel(struct fifo_gk20a *f, struct channel_gk20a *ch);
static void free_channel(struct nvgpu_fifo *f, struct channel_gk20a *ch);
static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch);
static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *ch);
@@ -71,7 +71,7 @@ static struct channel_gk20a_job *channel_gk20a_joblist_peek(
struct channel_gk20a *c);
/* allocate GPU channel */
static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
static struct channel_gk20a *allocate_channel(struct nvgpu_fifo *f)
{
struct channel_gk20a *ch = NULL;
struct gk20a *g = f->g;
@@ -96,7 +96,7 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
return ch;
}
static void free_channel(struct fifo_gk20a *f,
static void free_channel(struct nvgpu_fifo *f,
struct channel_gk20a *ch)
{
struct gk20a *g = f->g;
@@ -275,7 +275,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
{
struct gk20a *g = ch->g;
struct tsg_gk20a *tsg;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct vm_gk20a *ch_vm = ch->vm;
unsigned long timeout = nvgpu_get_poll_timeout(g);
struct dbg_session_gk20a *dbg_s;
@@ -651,7 +651,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
bool is_privileged_channel,
pid_t pid, pid_t tid)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct channel_gk20a *ch;
/* compatibility with existing code */
@@ -1580,7 +1580,7 @@ static void nvgpu_channel_wdt_rewind(struct channel_gk20a *ch)
*/
void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid;
for (chid = 0; chid < f->num_channels; chid++) {
@@ -2090,7 +2090,7 @@ void gk20a_channel_update(struct channel_gk20a *c)
*/
void gk20a_channel_deterministic_idle(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid;
/* Grab exclusive access to the hw to block new submits */
@@ -2131,7 +2131,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
*/
void gk20a_channel_deterministic_unidle(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid;
for (chid = 0; chid < f->num_channels; chid++) {
@@ -2175,7 +2175,7 @@ static void nvgpu_channel_destroy(struct gk20a *g, struct channel_gk20a *c)
void nvgpu_channel_cleanup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid;
/*
@@ -2280,7 +2280,7 @@ fail_1:
int nvgpu_channel_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid, i;
int err;
@@ -2330,7 +2330,7 @@ clean_up_mutex:
* maps to *all* gk20a channels */
int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid;
bool channels_in_use = false;
u32 active_runlist_ids = 0;
@@ -2390,7 +2390,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid;
bool channels_in_use = false;
u32 active_runlist_ids = 0;
@@ -2424,7 +2424,7 @@ void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid;
nvgpu_log_fn(g, " ");
@@ -2473,7 +2473,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
struct channel_gk20a *nvgpu_channel_refch_from_inst_ptr(struct gk20a *g,
u64 inst_ptr)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
unsigned int ci;
if (unlikely(f->channel == NULL)) {
@@ -2525,7 +2525,7 @@ void nvgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
void nvgpu_channel_debug_dump_all(struct gk20a *g,
struct gk20a_debug_output *o)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 chid;
struct nvgpu_channel_dump_info **infos;
@@ -2605,7 +2605,7 @@ int nvgpu_channel_deferred_reset_engines(struct gk20a *g,
unsigned long engine_id, engines = 0U;
struct tsg_gk20a *tsg;
bool deferred_reset_pending;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int err = 0;
nvgpu_mutex_acquire(&g->dbg_sessions_lock);

View File

@@ -68,7 +68,7 @@ enum nvgpu_fifo_engine nvgpu_engine_enum_from_type(struct gk20a *g,
struct nvgpu_engine_info *nvgpu_engine_get_active_eng_info(
struct gk20a *g, u32 engine_id)
{
struct fifo_gk20a *f = NULL;
struct nvgpu_fifo *f = NULL;
u32 engine_id_idx;
struct nvgpu_engine_info *info = NULL;
@@ -101,7 +101,7 @@ u32 nvgpu_engine_get_ids(struct gk20a *g,
u32 *engine_ids, u32 engine_id_sz,
enum nvgpu_fifo_engine engine_enum)
{
struct fifo_gk20a *f = NULL;
struct nvgpu_fifo *f = NULL;
u32 instance_cnt = 0;
u32 engine_id_idx;
u32 active_engine_id = 0;
@@ -133,7 +133,7 @@ u32 nvgpu_engine_get_ids(struct gk20a *g,
bool nvgpu_engine_check_valid_id(struct gk20a *g, u32 engine_id)
{
struct fifo_gk20a *f = NULL;
struct nvgpu_fifo *f = NULL;
u32 engine_id_idx;
bool valid = false;
@@ -218,7 +218,7 @@ u32 nvgpu_engine_get_all_ce_reset_mask(struct gk20a *g)
{
u32 reset_mask = 0;
enum nvgpu_fifo_engine engine_enum;
struct fifo_gk20a *f = NULL;
struct nvgpu_fifo *f = NULL;
u32 engine_id_idx;
struct nvgpu_engine_info *engine_info;
u32 active_engine_id = 0;
@@ -453,7 +453,7 @@ int nvgpu_engine_wait_for_idle(struct gk20a *g)
int nvgpu_engine_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int err = 0;
size_t size;
@@ -495,7 +495,7 @@ clean_up_engine_info:
void nvgpu_engine_cleanup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_kfree(g, f->engine_info);
f->engine_info = NULL;
@@ -575,7 +575,7 @@ u32 nvgpu_engine_get_fast_ce_runlist_id(struct gk20a *g)
{
u32 ce_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
enum nvgpu_fifo_engine engine_enum;
struct fifo_gk20a *f = NULL;
struct nvgpu_fifo *f = NULL;
u32 engine_id_idx;
struct nvgpu_engine_info *engine_info;
u32 active_engine_id = 0U;
@@ -634,7 +634,7 @@ end:
bool nvgpu_engine_is_valid_runlist_id(struct gk20a *g, u32 runlist_id)
{
struct fifo_gk20a *f = NULL;
struct nvgpu_fifo *f = NULL;
u32 engine_id_idx;
u32 active_engine_id;
struct nvgpu_engine_info *engine_info;
@@ -683,7 +683,7 @@ u32 nvgpu_engine_mmu_fault_id_to_engine_id(struct gk20a *g, u32 fault_id)
u32 engine_id;
u32 active_engine_id;
struct nvgpu_engine_info *engine_info;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
active_engine_id = f->active_engines_list[engine_id];
@@ -736,7 +736,7 @@ u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg)
return engines;
}
int nvgpu_engine_init_info(struct fifo_gk20a *f)
int nvgpu_engine_init_info(struct nvgpu_fifo *f)
{
struct gk20a *g = f->g;
int ret = 0;
@@ -880,7 +880,7 @@ u32 nvgpu_engine_find_busy_doing_ctxsw(struct gk20a *g,
u32 nvgpu_engine_get_runlist_busy_engines(struct gk20a *g, u32 runlist_id)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 i, eng_bitmask = 0U;
struct nvgpu_engine_status_info engine_status;
@@ -945,7 +945,7 @@ bool nvgpu_engine_should_defer_reset(struct gk20a *g, u32 engine_id,
u32 nvgpu_engine_mmu_fault_id_to_veid(struct gk20a *g, u32 mmu_fault_id,
u32 gr_eng_fault_id)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 num_subctx;
u32 veid = INVAL_ID;
@@ -965,7 +965,7 @@ u32 nvgpu_engine_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g,
u32 engine_id;
u32 act_eng_id;
struct nvgpu_engine_info *engine_info;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
for (engine_id = 0U; engine_id < f->num_engines; engine_id++) {

View File

@@ -37,7 +37,7 @@
void nvgpu_fifo_cleanup_sw_common(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_log_fn(g, " ");
@@ -60,14 +60,14 @@ void nvgpu_fifo_cleanup_sw(struct gk20a *g)
nvgpu_fifo_cleanup_sw_common(g);
}
static void nvgpu_fifo_remove_support(struct fifo_gk20a *f)
static void nvgpu_fifo_remove_support(struct nvgpu_fifo *f)
{
struct gk20a *g = f->g;
g->ops.fifo.cleanup_sw(g);
}
static int nvgpu_fifo_init_locks(struct gk20a *g, struct fifo_gk20a *f)
static int nvgpu_fifo_init_locks(struct gk20a *g, struct nvgpu_fifo *f)
{
int err;
@@ -101,7 +101,7 @@ destroy_0:
int nvgpu_fifo_setup_sw_common(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int err = 0;
nvgpu_log_fn(g, " ");
@@ -180,7 +180,7 @@ clean_up:
int nvgpu_fifo_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int err = 0;
nvgpu_log_fn(g, " ");

View File

@@ -26,7 +26,7 @@
bool nvgpu_pbdma_find_for_runlist(struct gk20a *g,
u32 runlist_id, u32 *pbdma_id)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
bool found_pbdma_for_runlist = false;
u32 runlist_bit;
u32 id;
@@ -46,7 +46,7 @@ bool nvgpu_pbdma_find_for_runlist(struct gk20a *g,
static void nvgpu_pbdma_init_intr_descs(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
if (g->ops.pbdma.device_fatal_0_intr_descs != NULL) {
f->intr.pbdma.device_fatal_0 =
@@ -65,7 +65,7 @@ static void nvgpu_pbdma_init_intr_descs(struct gk20a *g)
int nvgpu_pbdma_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
f->num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
@@ -83,7 +83,7 @@ int nvgpu_pbdma_setup_sw(struct gk20a *g)
void nvgpu_pbdma_cleanup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_kfree(g, f->pbdma_map);
f->pbdma_map = NULL;

View File

@@ -51,7 +51,7 @@ int nvgpu_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
void nvgpu_preempt_poll_tsg_on_pbdma(struct gk20a *g,
struct tsg_gk20a *tsg)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 runlist_id;
unsigned long runlist_served_pbdmas;
unsigned long pbdma_id_bit;

View File

@@ -32,7 +32,7 @@
void nvgpu_runlist_lock_active_runlists(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
u32 i;
@@ -45,7 +45,7 @@ void nvgpu_runlist_lock_active_runlists(struct gk20a *g)
void nvgpu_runlist_unlock_active_runlists(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
u32 i;
@@ -62,7 +62,7 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
u32 *entries_left,
struct tsg_gk20a *tsg)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 runlist_entry_words = f->runlist_entry_size / (u32)sizeof(u32);
struct channel_gk20a *ch;
u32 count = 0;
@@ -123,7 +123,7 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
}
static u32 nvgpu_runlist_append_prio(struct fifo_gk20a *f,
static u32 nvgpu_runlist_append_prio(struct nvgpu_fifo *f,
struct nvgpu_runlist_info *runlist,
u32 **runlist_entry,
u32 *entries_left,
@@ -151,7 +151,7 @@ static u32 nvgpu_runlist_append_prio(struct fifo_gk20a *f,
return count;
}
static u32 nvgpu_runlist_append_hi(struct fifo_gk20a *f,
static u32 nvgpu_runlist_append_hi(struct nvgpu_fifo *f,
struct nvgpu_runlist_info *runlist,
u32 **runlist_entry,
u32 *entries_left)
@@ -167,7 +167,7 @@ static u32 nvgpu_runlist_append_hi(struct fifo_gk20a *f,
NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH);
}
static u32 nvgpu_runlist_append_med(struct fifo_gk20a *f,
static u32 nvgpu_runlist_append_med(struct nvgpu_fifo *f,
struct nvgpu_runlist_info *runlist,
u32 **runlist_entry,
u32 *entries_left)
@@ -206,7 +206,7 @@ static u32 nvgpu_runlist_append_med(struct fifo_gk20a *f,
return count;
}
static u32 nvgpu_runlist_append_low(struct fifo_gk20a *f,
static u32 nvgpu_runlist_append_low(struct nvgpu_fifo *f,
struct nvgpu_runlist_info *runlist,
u32 **runlist_entry,
u32 *entries_left)
@@ -266,7 +266,7 @@ static u32 nvgpu_runlist_append_low(struct fifo_gk20a *f,
return count;
}
static u32 nvgpu_runlist_append_flat(struct fifo_gk20a *f,
static u32 nvgpu_runlist_append_flat(struct nvgpu_fifo *f,
struct nvgpu_runlist_info *runlist,
u32 **runlist_entry,
u32 *entries_left)
@@ -291,7 +291,7 @@ static u32 nvgpu_runlist_append_flat(struct fifo_gk20a *f,
return count;
}
u32 nvgpu_runlist_construct_locked(struct fifo_gk20a *f,
u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
struct nvgpu_runlist_info *runlist,
u32 buf_id,
u32 max_entries)
@@ -317,7 +317,7 @@ u32 nvgpu_runlist_construct_locked(struct fifo_gk20a *f,
static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist = NULL;
struct tsg_gk20a *tsg = NULL;
@@ -363,7 +363,7 @@ static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
static int gk20a_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
u32 buf_id, bool add_entries)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist = NULL;
runlist = f->runlist_info[runlist_id];
@@ -393,7 +393,7 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
bool wait_for_finish)
{
int ret = 0;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist = NULL;
u32 buf_id;
bool add_entries;
@@ -498,7 +498,7 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish)
{
struct nvgpu_runlist_info *runlist = NULL;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
int ret = 0;
@@ -622,7 +622,7 @@ void nvgpu_fifo_runlist_set_state(struct gk20a *g, u32 runlists_mask,
void nvgpu_runlist_cleanup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 i, j;
struct nvgpu_runlist_info *runlist;
@@ -656,7 +656,7 @@ void nvgpu_runlist_cleanup_sw(struct gk20a *g)
f->max_runlists = 0;
}
static void nvgpu_init_runlist_enginfo(struct gk20a *g, struct fifo_gk20a *f)
static void nvgpu_init_runlist_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
{
struct nvgpu_runlist_info *runlist;
struct nvgpu_engine_info *engine_info;
@@ -697,7 +697,7 @@ static void nvgpu_init_runlist_enginfo(struct gk20a *g, struct fifo_gk20a *f)
int nvgpu_runlist_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
unsigned int runlist_id;
u32 i, j;
@@ -806,7 +806,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
unsigned int id_type, u32 act_eng_bitmask, u32 pbdma_bitmask)
{
u32 i, runlists_mask = 0;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
/* engine and/or pbdma ids are known */
@@ -852,7 +852,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
void nvgpu_runlist_unlock_runlists(struct gk20a *g, u32 runlists_mask)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
u32 i;

View File

@@ -58,7 +58,7 @@ struct tsg_gk20a *nvgpu_tsg_check_and_get_from_id(struct gk20a *g, u32 tsgid)
struct tsg_gk20a *nvgpu_tsg_get_from_id(struct gk20a *g, u32 tsgid)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
return &f->tsg[tsgid];
}
@@ -66,7 +66,7 @@ struct tsg_gk20a *nvgpu_tsg_get_from_id(struct gk20a *g, u32 tsgid)
static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
unsigned int i;
@@ -306,7 +306,7 @@ int nvgpu_tsg_force_reset_ch(struct channel_gk20a *ch,
void nvgpu_tsg_cleanup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 tsgid;
for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
@@ -344,7 +344,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
int nvgpu_tsg_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 tsgid, i;
int err;
@@ -593,14 +593,14 @@ void nvgpu_tsg_disable_sched(struct gk20a *g, struct tsg_gk20a *tsg)
RUNLIST_DISABLED);
}
static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
static void release_used_tsg(struct nvgpu_fifo *f, struct tsg_gk20a *tsg)
{
nvgpu_mutex_acquire(&f->tsg_inuse_mutex);
f->tsg[tsg->tsgid].in_use = false;
nvgpu_mutex_release(&f->tsg_inuse_mutex);
}
static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct fifo_gk20a *f)
static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct nvgpu_fifo *f)
{
struct tsg_gk20a *tsg = NULL;
unsigned int tsgid;
@@ -762,7 +762,7 @@ struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch)
if (tsgid != NVGPU_INVALID_TSG_ID) {
struct gk20a *g = ch->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
tsg = &f->tsg[tsgid];
} else {

View File

@@ -34,7 +34,7 @@
#ifdef NVGPU_USERD
int nvgpu_userd_init_slabs(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int err;
err = nvgpu_mutex_init(&f->userd_mutex);
@@ -65,7 +65,7 @@ clean_up:
void nvgpu_userd_free_slabs(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 slab;
for (slab = 0; slab < f->num_userd_slabs; slab++) {
@@ -81,7 +81,7 @@ void nvgpu_userd_free_slabs(struct gk20a *g)
int nvgpu_userd_init_channel(struct gk20a *g, struct channel_gk20a *c)
{
#ifdef NVGPU_USERD
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_mem *mem;
u32 slab = c->chid / f->num_channels_per_slab;
int err = 0;
@@ -129,7 +129,7 @@ done:
int nvgpu_userd_setup_sw(struct gk20a *g)
{
#ifdef NVGPU_USERD
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int err;
u32 size, num_pages;
@@ -164,7 +164,7 @@ clean_up:
void nvgpu_userd_cleanup_sw(struct gk20a *g)
{
#ifdef NVGPU_USERD
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
if (f->userd_gpu_va != 0ULL) {
(void) nvgpu_vm_area_free(g->mm.bar1.vm, f->userd_gpu_va);

View File

@@ -235,7 +235,7 @@ static void gr_intr_report_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
struct channel_gk20a *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
u32 curr_ctx, u32 *curr_tsgid)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_gr_intr *intr = g->gr->intr;
u32 chid;
u32 tsgid = NVGPU_INVALID_TSG_ID;

View File

@@ -34,7 +34,7 @@ static void nvgpu_cg_set_mode(struct gk20a *g, int cgmode, int mode_config)
u32 engine_idx;
u32 active_engine_id = 0;
struct nvgpu_engine_info *engine_info = NULL;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_log_fn(g, " ");

View File

@@ -71,7 +71,7 @@ void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
RC_TYPE_CTXSW_TIMEOUT);
}
void nvgpu_rc_pbdma_fault(struct gk20a *g, struct fifo_gk20a *f,
void nvgpu_rc_pbdma_fault(struct gk20a *g, struct nvgpu_fifo *f,
u32 pbdma_id, u32 error_notifier)
{
u32 id;

View File

@@ -160,7 +160,7 @@ void vgpu_channel_disable(struct channel_gk20a *ch)
WARN_ON(err || msg.ret);
}
int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
int vgpu_fifo_init_engine_info(struct nvgpu_fifo *f)
{
struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g);
struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
@@ -210,7 +210,7 @@ void vgpu_fifo_cleanup_sw(struct gk20a *g)
int vgpu_fifo_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
int err = 0;
@@ -248,7 +248,7 @@ clean_up:
int vgpu_init_fifo_setup_hw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 v, v1 = 0x33, v2 = 0x55;
struct nvgpu_mem *mem = &f->userd_slabs[0];
u32 bar1_vaddr;

View File

@@ -27,7 +27,7 @@
struct gk20a;
struct channel_gk20a;
struct fifo_gk20a;
struct nvgpu_fifo;
struct tsg_gk20a;
struct tegra_vgpu_fifo_intr_info;
struct tegra_vgpu_channel_event_info;
@@ -43,7 +43,7 @@ void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch);
void vgpu_channel_enable(struct channel_gk20a *ch);
void vgpu_channel_disable(struct channel_gk20a *ch);
u32 vgpu_channel_count(struct gk20a *g);
int vgpu_fifo_init_engine_info(struct fifo_gk20a *f);
int vgpu_fifo_init_engine_info(struct nvgpu_fifo *f);
int vgpu_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch);
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice);

View File

@@ -76,7 +76,7 @@ done:
static bool vgpu_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
runlist = f->runlist_info[runlist_id];
@@ -101,7 +101,7 @@ static bool vgpu_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
static void vgpu_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
bool add_entries)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
runlist = f->runlist_info[runlist_id];
@@ -131,7 +131,7 @@ static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add,
bool wait_for_finish)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
bool add_entries;
@@ -168,7 +168,7 @@ static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish)
{
struct nvgpu_runlist_info *runlist = NULL;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 ret = 0;
nvgpu_log_fn(g, " ");

View File

@@ -32,7 +32,7 @@
int vgpu_userd_setup_sw(struct gk20a *g)
{
#ifdef NVGPU_USERD
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
f->userd_entry_size = g->ops.userd.entry_size(g);

View File

@@ -150,7 +150,7 @@ int vgpu_gv11b_fifo_get_sync_ro_map(struct vm_gk20a *vm,
int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
f->max_subctx_count = priv->constants.max_subctx_count;

View File

@@ -34,7 +34,7 @@
struct channel_gk20a;
struct tsg_gk20a;
struct fifo_gk20a {
struct nvgpu_fifo {
struct gk20a *g;
unsigned int num_channels;
unsigned int runlist_entry_size;
@@ -86,7 +86,7 @@ struct fifo_gk20a {
struct tsg_gk20a *tsg;
struct nvgpu_mutex tsg_inuse_mutex;
void (*remove_support)(struct fifo_gk20a *f);
void (*remove_support)(struct nvgpu_fifo *f);
bool sw_ready;
struct {
/* share info between isrs and non-isr code */

View File

@@ -59,7 +59,7 @@ bool gk20a_fifo_handle_ctxsw_timeout(struct gk20a *g)
bool recover = false;
struct channel_gk20a *ch = NULL;
struct tsg_gk20a *tsg = NULL;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 ms = 0;
bool debug_dump = false;

View File

@@ -35,7 +35,7 @@ bool gm20b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
return (engine_subid == fifo_intr_mmu_fault_info_engine_subid_gpc_v());
}
int gm20b_engine_init_ce_info(struct fifo_gk20a *f)
int gm20b_engine_init_ce_info(struct nvgpu_fifo *f)
{
struct gk20a *g = f->g;
int ret = 0;

View File

@@ -26,9 +26,9 @@
#include <nvgpu/types.h>
struct gk20a;
struct fifo_gk20a;
struct nvgpu_fifo;
bool gm20b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid);
int gm20b_engine_init_ce_info(struct fifo_gk20a *f);
int gm20b_engine_init_ce_info(struct nvgpu_fifo *f);
#endif /* NVGPU_ENGINE_GM20B_H */

View File

@@ -30,7 +30,7 @@
#include "engines_gp10b.h"
int gp10b_engine_init_ce_info(struct fifo_gk20a *f)
int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
{
struct gk20a *g = f->g;
int ret = 0;

View File

@@ -25,8 +25,8 @@
#include <nvgpu/types.h>
struct fifo_gk20a;
struct nvgpu_fifo;
int gp10b_engine_init_ce_info(struct fifo_gk20a *f);
int gp10b_engine_init_ce_info(struct nvgpu_fifo *f);
#endif /* NVGPU_ENGINE_GP10B_H */

View File

@@ -64,7 +64,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
int gk20a_init_fifo_setup_hw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u64 shifted_addr;
nvgpu_log_fn(g, " ");

View File

@@ -73,7 +73,7 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
int gv11b_init_fifo_setup_hw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
f->max_subctx_count = g->ops.gr.init.get_max_subctx_count();

View File

@@ -206,7 +206,7 @@ void gk20a_fifo_intr_handle_runlist_event(struct gk20a *g)
u32 gk20a_fifo_pbdma_isr(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 pbdma_id;
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
u32 pbdma_pending_bitmask = nvgpu_readl(g, fifo_intr_pbdma_id_r());

View File

@@ -241,7 +241,7 @@ bool gk20a_fifo_handle_mmu_fault_locked(
bool debug_dump = true;
struct nvgpu_engine_status_info engine_status;
bool deferred_reset_pending = false;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_log_fn(g, " ");

View File

@@ -139,7 +139,7 @@ void gm20b_pbdma_intr_enable(struct gk20a *g, bool enable)
bool gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_0, u32 *error_notifier)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
bool recover = false;
u32 i;
unsigned long pbdma_intr_err;

View File

@@ -83,7 +83,7 @@ static int gv11b_fifo_preempt_locked(struct gk20a *g, u32 id,
*/
void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
@@ -357,7 +357,7 @@ static int gv11b_fifo_preempt_poll_eng(struct gk20a *g, u32 id,
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
unsigned long runlist_served_pbdmas;
unsigned long runlist_served_engines;
unsigned long bit;
@@ -416,7 +416,7 @@ int gv11b_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;

View File

@@ -43,7 +43,7 @@ u64 gv11b_usermode_bus_base(struct gk20a *g)
u32 gv11b_usermode_doorbell_token(struct channel_gk20a *ch)
{
struct gk20a *g = ch->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 hw_chid = f->channel_base + ch->chid;
return usermode_notify_channel_pending_id_f(hw_chid);

View File

@@ -56,7 +56,7 @@ void tu104_usermode_setup_hw(struct gk20a *g)
u32 tu104_usermode_doorbell_token(struct channel_gk20a *ch)
{
struct gk20a *g = ch->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u32 hw_chid = f->channel_base + ch->chid;
return ctrl_doorbell_vector_f(hw_chid) |

View File

@@ -46,7 +46,7 @@ void gk20a_mm_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
u64 gk20a_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
u64 gpu_va = f->userd_gpu_va + offset;
return nvgpu_gmmu_map_fixed(g->mm.bar1.vm, mem, gpu_va,

View File

@@ -47,7 +47,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
unsigned int rc_type,
u32 runlists_mask)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct tsg_gk20a *tsg = NULL;
unsigned long tsgid;
struct nvgpu_runlist_info *runlist = NULL;
@@ -139,7 +139,7 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,
struct nvgpu_runlist_info *runlist = NULL;
u32 engine_id;
u32 client_type = ~U32(0U);
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
bool deferred_reset_pending = false;
nvgpu_log_info(g, "acquire engines_reset_mutex");

View File

@@ -83,7 +83,7 @@ int gm20b_elcg_init_idle_filters(struct gk20a *g)
u32 gate_ctrl, idle_filter;
u32 engine_id;
u32 active_engine_id = 0;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_log_fn(g, " ");

View File

@@ -68,7 +68,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g)
u32 gate_ctrl, idle_filter;
u32 engine_id;
u32 active_engine_id = 0;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_log_fn(g, " ");

View File

@@ -1,7 +1,7 @@
/*
* GP10B Therm
*
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -37,17 +37,17 @@ int gp10b_init_therm_setup_hw(struct gk20a *g)
nvgpu_log_fn(g, " ");
/* program NV_THERM registers */
gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
nvgpu_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
therm_use_a_ext_therm_1_enable_f() |
therm_use_a_ext_therm_2_enable_f());
gk20a_writel(g, therm_evt_ext_therm_0_r(),
nvgpu_writel(g, therm_evt_ext_therm_0_r(),
therm_evt_ext_therm_0_slow_factor_f(0x2));
gk20a_writel(g, therm_evt_ext_therm_1_r(),
nvgpu_writel(g, therm_evt_ext_therm_1_r(),
therm_evt_ext_therm_1_slow_factor_f(0x6));
gk20a_writel(g, therm_evt_ext_therm_2_r(),
nvgpu_writel(g, therm_evt_ext_therm_2_r(),
therm_evt_ext_therm_2_slow_factor_f(0xe));
gk20a_writel(g, therm_grad_stepping_table_r(0),
nvgpu_writel(g, therm_grad_stepping_table_r(0),
therm_grad_stepping_table_slowdown_factor0_f(
therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f()) |
therm_grad_stepping_table_slowdown_factor1_f(
@@ -59,7 +59,7 @@ int gp10b_init_therm_setup_hw(struct gk20a *g)
therm_grad_stepping_table_slowdown_factor4_f(
therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()));
gk20a_writel(g, therm_grad_stepping_table_r(1),
nvgpu_writel(g, therm_grad_stepping_table_r(1),
therm_grad_stepping_table_slowdown_factor0_f(
therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) |
therm_grad_stepping_table_slowdown_factor1_f(
@@ -71,21 +71,21 @@ int gp10b_init_therm_setup_hw(struct gk20a *g)
therm_grad_stepping_table_slowdown_factor4_f(
therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()));
v = gk20a_readl(g, therm_clk_timing_r(0));
v = nvgpu_readl(g, therm_clk_timing_r(0));
v |= therm_clk_timing_grad_slowdown_enabled_f();
gk20a_writel(g, therm_clk_timing_r(0), v);
nvgpu_writel(g, therm_clk_timing_r(0), v);
v = gk20a_readl(g, therm_config2_r());
v = nvgpu_readl(g, therm_config2_r());
v |= therm_config2_grad_enable_f(1);
v |= therm_config2_slowdown_factor_extended_f(1);
gk20a_writel(g, therm_config2_r(), v);
nvgpu_writel(g, therm_config2_r(), v);
gk20a_writel(g, therm_grad_stepping1_r(),
nvgpu_writel(g, therm_grad_stepping1_r(),
therm_grad_stepping1_pdiv_duration_f(32));
v = gk20a_readl(g, therm_grad_stepping0_r());
v = nvgpu_readl(g, therm_grad_stepping0_r());
v |= therm_grad_stepping0_feature_enable_f();
gk20a_writel(g, therm_grad_stepping0_r(), v);
nvgpu_writel(g, therm_grad_stepping0_r(), v);
return 0;
}
@@ -95,13 +95,13 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g)
u32 gate_ctrl, idle_filter;
u32 engine_id;
u32 active_engine_id = 0;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_log_fn(g, " ");
for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
active_engine_id = f->active_engines_list[engine_id];
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id));
gate_ctrl = nvgpu_readl(g, therm_gate_ctrl_r(active_engine_id));
if (nvgpu_platform_is_simulation(g)) {
gate_ctrl = set_field(gate_ctrl,
@@ -119,17 +119,17 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g)
gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_delay_before_m(),
therm_gate_ctrl_eng_delay_before_f(4));
gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl);
nvgpu_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl);
}
/* default fecs_idle_filter to 0 */
idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r());
idle_filter = nvgpu_readl(g, therm_fecs_idle_filter_r());
idle_filter &= ~therm_fecs_idle_filter_value_m();
gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter);
nvgpu_writel(g, therm_fecs_idle_filter_r(), idle_filter);
/* default hubmmu_idle_filter to 0 */
idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r());
idle_filter = nvgpu_readl(g, therm_hubmmu_idle_filter_r());
idle_filter &= ~therm_hubmmu_idle_filter_value_m();
gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
nvgpu_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
nvgpu_log_fn(g, "done");
return 0;

View File

@@ -1,7 +1,7 @@
/*
* GV11B Therm
*
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -37,17 +37,17 @@ int gv11b_init_therm_setup_hw(struct gk20a *g)
nvgpu_log_fn(g, " ");
/* program NV_THERM registers */
gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
nvgpu_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
therm_use_a_ext_therm_1_enable_f() |
therm_use_a_ext_therm_2_enable_f());
gk20a_writel(g, therm_evt_ext_therm_0_r(),
nvgpu_writel(g, therm_evt_ext_therm_0_r(),
therm_evt_ext_therm_0_slow_factor_f(0x2));
gk20a_writel(g, therm_evt_ext_therm_1_r(),
nvgpu_writel(g, therm_evt_ext_therm_1_r(),
therm_evt_ext_therm_1_slow_factor_f(0x6));
gk20a_writel(g, therm_evt_ext_therm_2_r(),
nvgpu_writel(g, therm_evt_ext_therm_2_r(),
therm_evt_ext_therm_2_slow_factor_f(0xe));
gk20a_writel(g, therm_grad_stepping_table_r(0),
nvgpu_writel(g, therm_grad_stepping_table_r(0),
therm_grad_stepping_table_slowdown_factor0_f(
therm_grad_stepping_table_slowdown_factor0_fpdiv_by1_f()) |
therm_grad_stepping_table_slowdown_factor1_f(
@@ -59,7 +59,7 @@ int gv11b_init_therm_setup_hw(struct gk20a *g)
therm_grad_stepping_table_slowdown_factor4_f(
therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()));
gk20a_writel(g, therm_grad_stepping_table_r(1),
nvgpu_writel(g, therm_grad_stepping_table_r(1),
therm_grad_stepping_table_slowdown_factor0_f(
therm_grad_stepping_table_slowdown_factor0_fpdiv_by16_f()) |
therm_grad_stepping_table_slowdown_factor1_f(
@@ -71,29 +71,29 @@ int gv11b_init_therm_setup_hw(struct gk20a *g)
therm_grad_stepping_table_slowdown_factor4_f(
therm_grad_stepping_table_slowdown_factor0_fpdiv_by32_f()));
v = gk20a_readl(g, therm_clk_timing_r(0));
v = nvgpu_readl(g, therm_clk_timing_r(0));
v |= therm_clk_timing_grad_slowdown_enabled_f();
gk20a_writel(g, therm_clk_timing_r(0), v);
nvgpu_writel(g, therm_clk_timing_r(0), v);
v = gk20a_readl(g, therm_config2_r());
v = nvgpu_readl(g, therm_config2_r());
v |= therm_config2_grad_enable_f(1);
v |= therm_config2_slowdown_factor_extended_f(1);
v = set_field(v, therm_config2_grad_step_duration_m(),
therm_config2_grad_step_duration_f(0));
gk20a_writel(g, therm_config2_r(), v);
nvgpu_writel(g, therm_config2_r(), v);
gk20a_writel(g, therm_grad_stepping1_r(),
nvgpu_writel(g, therm_grad_stepping1_r(),
therm_grad_stepping1_pdiv_duration_f(0xbf4));
v = gk20a_readl(g, therm_grad_stepping0_r());
v = nvgpu_readl(g, therm_grad_stepping0_r());
v |= therm_grad_stepping0_feature_enable_f();
gk20a_writel(g, therm_grad_stepping0_r(), v);
nvgpu_writel(g, therm_grad_stepping0_r(), v);
/* disable idle clock slowdown */
v = therm_clk_slowdown_2_idle_condition_a_select_f(0) |
therm_clk_slowdown_2_idle_condition_a_type_never_f() |
therm_clk_slowdown_2_idle_condition_b_type_never_f();
gk20a_writel(g, therm_clk_slowdown_2_r(0), v);
nvgpu_writel(g, therm_clk_slowdown_2_r(0), v);
return 0;
}
@@ -106,7 +106,7 @@ void gv11b_therm_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
return;
}
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
gate_ctrl = nvgpu_readl(g, therm_gate_ctrl_r(engine));
switch (mode) {
case ELCG_RUN:
@@ -132,7 +132,7 @@ void gv11b_therm_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
break;
}
gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl);
nvgpu_writel(g, therm_gate_ctrl_r(engine), gate_ctrl);
}
int gv11b_elcg_init_idle_filters(struct gk20a *g)
@@ -140,7 +140,7 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g)
u32 gate_ctrl, idle_filter;
u32 engine_id;
u32 active_engine_id = 0;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
if (nvgpu_platform_is_simulation(g)) {
return 0;
@@ -151,7 +151,7 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g)
for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
active_engine_id = f->active_engines_list[engine_id];
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id));
gate_ctrl = nvgpu_readl(g, therm_gate_ctrl_r(active_engine_id));
gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_idle_filt_exp_m(),
therm_gate_ctrl_eng_idle_filt_exp__prod_f());
@@ -164,20 +164,20 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g)
gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_delay_after_m(),
therm_gate_ctrl_eng_delay_after__prod_f());
gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl);
nvgpu_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl);
}
idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r());
idle_filter = nvgpu_readl(g, therm_fecs_idle_filter_r());
idle_filter = set_field(idle_filter,
therm_fecs_idle_filter_value_m(),
therm_fecs_idle_filter_value__prod_f());
gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter);
nvgpu_writel(g, therm_fecs_idle_filter_r(), idle_filter);
idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r());
idle_filter = nvgpu_readl(g, therm_hubmmu_idle_filter_r());
idle_filter = set_field(idle_filter,
therm_hubmmu_idle_filter_value_m(),
therm_hubmmu_idle_filter_value__prod_f());
gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
nvgpu_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
return 0;
}

View File

@@ -28,7 +28,7 @@
#define NVGPU_INVALID_ENG_ID (~U32(0U))
struct gk20a;
struct fifo_gk20a;
struct nvgpu_fifo;
enum nvgpu_fifo_engine {
NVGPU_ENGINE_GR = 0U,
@@ -104,7 +104,7 @@ u32 nvgpu_engine_id_to_mmu_fault_id(struct gk20a *g, u32 engine_id);
u32 nvgpu_engine_mmu_fault_id_to_engine_id(struct gk20a *g, u32 fault_id);
u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg);
int nvgpu_engine_init_info(struct fifo_gk20a *f);
int nvgpu_engine_init_info(struct nvgpu_fifo *f);
void nvgpu_engine_get_id_and_type(struct gk20a *g, u32 engine_id,
u32 *id, u32 *type);

View File

@@ -48,7 +48,7 @@ struct nvgpu_runlist_info;
struct channel_gk20a;
struct tsg_gk20a;
struct fifo_gk20a {
struct nvgpu_fifo {
struct gk20a *g;
unsigned int num_channels;
unsigned int runlist_entry_size;
@@ -100,7 +100,7 @@ struct fifo_gk20a {
struct tsg_gk20a *tsg;
struct nvgpu_mutex tsg_inuse_mutex;
void (*remove_support)(struct fifo_gk20a *f);
void (*remove_support)(struct nvgpu_fifo *f);
bool sw_ready;
struct {
/* share info between isrs and non-isr code */

View File

@@ -25,7 +25,7 @@
#define GK20A_H
struct gk20a;
struct fifo_gk20a;
struct nvgpu_fifo;
struct channel_gk20a;
struct nvgpu_gr;
struct nvgpu_fbp;
@@ -1101,8 +1101,8 @@ struct gpu_ops {
u32 engine_subid);
u32 (*get_mask_on_id)(struct gk20a *g,
u32 id, bool is_tsg);
int (*init_info)(struct fifo_gk20a *f);
int (*init_ce_info)(struct fifo_gk20a *f);
int (*init_info)(struct nvgpu_fifo *f);
int (*init_ce_info)(struct nvgpu_fifo *f);
} engine;
struct {
@@ -1965,7 +1965,7 @@ struct gk20a {
struct nvgpu_falcon minion_flcn;
struct nvgpu_falcon gsp_flcn;
struct clk_gk20a clk;
struct fifo_gk20a fifo;
struct nvgpu_fifo fifo;
struct nvgpu_nvlink_dev nvlink;
struct nvgpu_gr *gr;
struct nvgpu_fbp *fbp;

View File

@@ -27,7 +27,7 @@
#include <nvgpu/types.h>
struct gk20a;
struct fifo_gk20a;
struct nvgpu_fifo;
void nvgpu_cg_init_gr_load_gating_prod(struct gk20a *g);
void nvgpu_cg_elcg_enable(struct gk20a *g);

View File

@@ -38,14 +38,14 @@
#define INVAL_ID (~U32(0U))
struct gk20a;
struct fifo_gk20a;
struct nvgpu_fifo;
struct tsg_gk20a;
struct channel_gk20a;
void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
struct tsg_gk20a *tsg, bool debug_dump);
void nvgpu_rc_pbdma_fault(struct gk20a *g, struct fifo_gk20a *f,
void nvgpu_rc_pbdma_fault(struct gk20a *g, struct nvgpu_fifo *f,
u32 pbdma_id, u32 error_notifier);
void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id);

View File

@@ -29,7 +29,7 @@
struct gk20a;
struct tsg_gk20a;
struct fifo_gk20a;
struct nvgpu_fifo;
struct channel_gk20a;
#define NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW 0U
@@ -66,7 +66,7 @@ struct nvgpu_runlist_info {
};
u32 nvgpu_runlist_construct_locked(struct fifo_gk20a *f,
u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
struct nvgpu_runlist_info *runlist,
u32 buf_id,
u32 max_entries);

View File

@@ -32,7 +32,7 @@ static void *gk20a_fifo_sched_debugfs_seq_start(
struct seq_file *s, loff_t *pos)
{
struct gk20a *g = s->private;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
if (*pos >= f->num_channels)
return NULL;
@@ -44,7 +44,7 @@ static void *gk20a_fifo_sched_debugfs_seq_next(
struct seq_file *s, void *v, loff_t *pos)
{
struct gk20a *g = s->private;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
++(*pos);
if (*pos >= f->num_channels)
@@ -62,7 +62,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
struct seq_file *s, void *v)
{
struct gk20a *g = s->private;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct channel_gk20a *ch = v;
struct tsg_gk20a *tsg = NULL;
@@ -145,7 +145,7 @@ static const struct file_operations gk20a_fifo_sched_debugfs_fops = {
static int gk20a_fifo_profile_enable(void *data, u64 val)
{
struct gk20a *g = (struct gk20a *) data;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_mutex_acquire(&f->profile.lock);
@@ -339,7 +339,7 @@ void nvgpu_profile_snapshot(struct nvgpu_profile *profile, int idx)
void __gk20a_fifo_profile_free(struct nvgpu_ref *ref)
{
struct fifo_gk20a *f = container_of(ref, struct fifo_gk20a,
struct nvgpu_fifo *f = container_of(ref, struct nvgpu_fifo,
profile.ref);
nvgpu_vfree(f->g, f->profile.data);
nvgpu_vfree(f->g, f->profile.sorted);
@@ -350,7 +350,7 @@ void __gk20a_fifo_profile_free(struct nvgpu_ref *ref)
*/
struct nvgpu_profile *nvgpu_profile_acquire(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_profile *profile;
unsigned int index;
@@ -372,7 +372,7 @@ void nvgpu_profile_release(struct gk20a *g,
void gk20a_fifo_debugfs_deinit(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
nvgpu_mutex_acquire(&f->profile.lock);
if (f->profile.enabled) {

View File

@@ -529,7 +529,7 @@ free_gpfifo:
int nvgpu_channel_init_support_linux(struct nvgpu_os_linux *l)
{
struct gk20a *g = &l->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int chid;
int err;
@@ -580,7 +580,7 @@ err_clean:
void nvgpu_channel_remove_support_linux(struct nvgpu_os_linux *l)
{
struct gk20a *g = &l->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
unsigned int chid;
for (chid = 0; chid < f->num_channels; chid++) {

View File

@@ -144,7 +144,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
struct nvgpu_sched_get_tsgs_by_pid_args *arg)
{
struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct tsg_gk20a *tsg;
u64 *bitmap;
unsigned int tsgid;
@@ -188,7 +188,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
struct nvgpu_sched_tsg_get_params_args *arg)
{
struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct tsg_gk20a *tsg;
u32 tsgid = arg->tsgid;
@@ -222,7 +222,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
struct nvgpu_sched_tsg_timeslice_args *arg)
{
struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct tsg_gk20a *tsg;
u32 tsgid = arg->tsgid;
int err;
@@ -257,7 +257,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
struct nvgpu_sched_tsg_runlist_interleave_args *arg)
{
struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct tsg_gk20a *tsg;
u32 tsgid = arg->tsgid;
int err;
@@ -326,7 +326,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
struct nvgpu_sched_tsg_refcount_args *arg)
{
struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct tsg_gk20a *tsg;
u32 tsgid = arg->tsgid;
@@ -363,7 +363,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
struct nvgpu_sched_tsg_refcount_args *arg)
{
struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct tsg_gk20a *tsg;
u32 tsgid = arg->tsgid;
@@ -519,7 +519,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
{
struct gk20a_sched_ctrl *sched = filp->private_data;
struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct tsg_gk20a *tsg;
unsigned int tsgid;
@@ -596,7 +596,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int err;
if (sched->sw_ready)

View File

@@ -39,7 +39,7 @@ static void setup_fifo(struct gk20a *g, unsigned long *tsg_map,
struct nvgpu_runlist_info **runlists, u32 *rl_data,
bool interleave)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist = runlists[0];
/* we only use the runlist 0 here */
@@ -116,7 +116,7 @@ static void setup_tsg_multich(struct tsg_gk20a *tsgs, struct channel_gk20a *chs,
}
}
static int run_format_test(struct unit_module *m, struct fifo_gk20a *f,
static int run_format_test(struct unit_module *m, struct nvgpu_fifo *f,
struct tsg_gk20a *tsg, struct channel_gk20a *chs,
u32 prio, u32 n_ch, u32 *rl_data,
u32 *expect_header, u32 *expect_channel)
@@ -172,7 +172,7 @@ static struct tsg_fmt_test_args {
static int test_tsg_format_gen(struct unit_module *m, struct gk20a *g,
void *args)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info runlist;
struct nvgpu_runlist_info *runlists = &runlist;
unsigned long active_tsgs_map = 0;
@@ -236,7 +236,7 @@ static int test_common_gen(struct unit_module *m, struct gk20a *g,
u32 *levels, u32 levels_count,
u32 *expected, u32 expect_count)
{
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info runlist;
struct nvgpu_runlist_info *runlists = &runlist;
unsigned long active_tsgs_map = 0;