gpu: nvgpu: update .read_state to use runlist_id and chid

Moving to use IDs rather than struct makes it reusable on server side.

Jira GVSCI-15770

Change-Id: Ia5e30ebb0e8092b9cdc4c3f3cd524f585fd4b410
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2863437
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Dinesh T <dt@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Richard Zhao
2023-02-14 14:59:31 -08:00
committed by mobile promotions
parent 2ff110f722
commit d9c8d317f0
14 changed files with 39 additions and 45 deletions

View File

@@ -2244,7 +2244,8 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
} }
#endif #endif
g->ops.channel.read_state(g, ch, &info->hw_state); g->ops.channel.read_state(g, ch->runlist->id, ch->chid,
&info->hw_state);
g->ops.ramfc.capture_ram_dump(g, ch, info); g->ops.ramfc.capture_ram_dump(g, ch, info);
nvgpu_channel_put(ch); nvgpu_channel_put(ch);

View File

@@ -795,7 +795,7 @@ int nvgpu_tsg_unbind_channel_hw_state_check(struct nvgpu_tsg *tsg,
int err = 0; int err = 0;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
g->ops.channel.read_state(g, ch, &hw_state); g->ops.channel.read_state(g, ch->runlist->id, ch->chid, &hw_state);
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
if (g->ops.tsg.unbind_channel_check_hw_next != NULL) { if (g->ops.tsg.unbind_channel_check_hw_next != NULL) {

View File

@@ -35,7 +35,7 @@ void ga10b_channel_disable(struct nvgpu_channel *ch);
void ga10b_channel_bind(struct nvgpu_channel *ch); void ga10b_channel_bind(struct nvgpu_channel *ch);
void ga10b_channel_unbind(struct nvgpu_channel *ch); void ga10b_channel_unbind(struct nvgpu_channel *ch);
void ga10b_channel_clear(struct gk20a *g, u32 runlist_id, u32 chid); void ga10b_channel_clear(struct gk20a *g, u32 runlist_id, u32 chid);
void ga10b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, void ga10b_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch, void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch,
bool eng, bool pbdma); bool eng, bool pbdma);

View File

@@ -182,7 +182,7 @@ static const char * const chram_status_str[] = {
[runlist_chram_channel_acquire_fail_m()] = "acquire_fail", [runlist_chram_channel_acquire_fail_m()] = "acquire_fail",
}; };
void ga10b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, void ga10b_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state) struct nvgpu_channel_hw_state *state)
{ {
u32 reg = 0U; u32 reg = 0U;
@@ -190,13 +190,11 @@ void ga10b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
unsigned long status_str_bits = 0UL; unsigned long status_str_bits = 0UL;
u32 status_str_count = 0U; u32 status_str_count = 0U;
bool idle = true; bool idle = true;
struct nvgpu_runlist *runlist = NULL; struct nvgpu_runlist *runlist = g->fifo.runlists[runlist_id];
const char **chram_status_list = NULL; const char *chram_status_list[NUM_STATUS_STR] = {};
runlist = ch->runlist;
reg = nvgpu_chram_bar0_readl(g, runlist, reg = nvgpu_chram_bar0_readl(g, runlist,
runlist_chram_channel_r(ch->chid)); runlist_chram_channel_r(chid));
state->next = runlist_chram_channel_next_v(reg) == state->next = runlist_chram_channel_next_v(reg) ==
runlist_chram_channel_next_true_v(); runlist_chram_channel_next_true_v();
@@ -218,14 +216,6 @@ void ga10b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
/* Construct status string for below status fields */ /* Construct status string for below status fields */
status_str_bits = (u64)(reg & ga10b_channel_status_mask()); status_str_bits = (u64)(reg & ga10b_channel_status_mask());
/* Allocate memory for status string list */
chram_status_list = nvgpu_kzalloc(g, (sizeof(char *) * NUM_STATUS_STR));
if (chram_status_list == NULL) {
nvgpu_err(g, "Status string list pointer allocation failed");
state->status_string[0] = '\0';
return;
}
/* /*
* Status is true if the corresponding bit is set. * Status is true if the corresponding bit is set.
* Go through each set bit and copy status string to status string list. * Go through each set bit and copy status string to status string list.
@@ -249,15 +239,13 @@ void ga10b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
nvgpu_log_info(g, "Channel id:%d state next:%s enabled:%s ctx_reload:%s" nvgpu_log_info(g, "Channel id:%d state next:%s enabled:%s ctx_reload:%s"
" busy:%s pending_acquire:%s eng_faulted:%s status_string:%s", " busy:%s pending_acquire:%s eng_faulted:%s status_string:%s",
ch->chid, chid,
state->next ? "true" : "false", state->next ? "true" : "false",
state->enabled ? "true" : "false", state->enabled ? "true" : "false",
state->ctx_reload ? "true" : "false", state->ctx_reload ? "true" : "false",
state->busy ? "true" : "false", state->busy ? "true" : "false",
state->pending_acquire ? "true" : "false", state->pending_acquire ? "true" : "false",
state->eng_faulted ? "true" : "false", state->status_string); state->eng_faulted ? "true" : "false", state->status_string);
nvgpu_kfree(g, chram_status_list);
} }
void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch, void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -31,7 +31,7 @@ struct nvgpu_channel_dump_info;
void gk20a_channel_enable(struct nvgpu_channel *ch); void gk20a_channel_enable(struct nvgpu_channel *ch);
void gk20a_channel_disable(struct nvgpu_channel *ch); void gk20a_channel_disable(struct nvgpu_channel *ch);
void gk20a_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, void gk20a_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA #ifdef CONFIG_NVGPU_HAL_NON_FUSA

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -70,12 +70,14 @@ static const char * const ccsr_chan_status_str[] = {
"N/A", "N/A",
}; };
void gk20a_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, void gk20a_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state) struct nvgpu_channel_hw_state *state)
{ {
u32 reg = nvgpu_readl(g, ccsr_channel_r(ch->chid)); u32 reg = nvgpu_readl(g, ccsr_channel_r(chid));
u32 status_v = ccsr_channel_status_v(reg); u32 status_v = ccsr_channel_status_v(reg);
(void)runlist_id;
state->next = ccsr_channel_next_v(reg) == ccsr_channel_next_true_v(); state->next = ccsr_channel_next_v(reg) == ccsr_channel_next_true_v();
state->enabled = ccsr_channel_enable_v(reg) == state->enabled = ccsr_channel_enable_v(reg) ==
ccsr_channel_enable_in_use_v(); ccsr_channel_enable_in_use_v();

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ struct nvgpu_debug_context;
void gv11b_channel_bind(struct nvgpu_channel *ch); void gv11b_channel_bind(struct nvgpu_channel *ch);
void gv11b_channel_unbind(struct nvgpu_channel *ch); void gv11b_channel_unbind(struct nvgpu_channel *ch);
u32 gv11b_channel_count(struct gk20a *g); u32 gv11b_channel_count(struct gk20a *g);
void gv11b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, void gv11b_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void gv11b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch, void gv11b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch,
bool eng, bool pbdma); bool eng, bool pbdma);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -92,12 +92,12 @@ u32 gv11b_channel_count(struct gk20a *g)
return ccsr_channel__size_1_v(); return ccsr_channel__size_1_v();
} }
void gv11b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, void gv11b_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state) struct nvgpu_channel_hw_state *state)
{ {
u32 reg = nvgpu_readl(g, ccsr_channel_r(ch->chid)); u32 reg = nvgpu_readl(g, ccsr_channel_r(chid));
gk20a_channel_read_state(g, ch, state); gk20a_channel_read_state(g, runlist_id, chid, state);
state->eng_faulted = ccsr_channel_eng_faulted_v(reg) == state->eng_faulted = ccsr_channel_eng_faulted_v(reg) ==
ccsr_channel_eng_faulted_true_v(); ccsr_channel_eng_faulted_true_v();

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,7 +53,8 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
struct nvgpu_channel_hw_state hw_state; struct nvgpu_channel_hw_state hw_state;
g->ops.channel.read_state(g, ch, &hw_state); g->ops.channel.read_state(g, ch->runlist->id, ch->chid,
&hw_state);
if (hw_state.next || hw_state.ctx_reload) { if (hw_state.next || hw_state.ctx_reload) {
g->ops.channel.enable(ch); g->ops.channel.enable(ch);
@@ -63,7 +64,8 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
struct nvgpu_channel_hw_state hw_state; struct nvgpu_channel_hw_state hw_state;
g->ops.channel.read_state(g, ch, &hw_state); g->ops.channel.read_state(g, ch->runlist->id, ch->chid,
&hw_state);
if (hw_state.next || hw_state.ctx_reload) { if (hw_state.next || hw_state.ctx_reload) {
continue; continue;

View File

@@ -135,7 +135,7 @@ struct gops_channel {
void (*free_inst)(struct gk20a *g, struct nvgpu_channel *ch); void (*free_inst)(struct gk20a *g, struct nvgpu_channel *ch);
void (*bind)(struct nvgpu_channel *ch); void (*bind)(struct nvgpu_channel *ch);
void (*unbind)(struct nvgpu_channel *ch); void (*unbind)(struct nvgpu_channel *ch);
void (*read_state)(struct gk20a *g, struct nvgpu_channel *ch, void (*read_state)(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void (*force_ctx_reload)(struct nvgpu_channel *ch); void (*force_ctx_reload)(struct nvgpu_channel *ch);
void (*abort_clean_up)(struct nvgpu_channel *ch); void (*abort_clean_up)(struct nvgpu_channel *ch);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -195,7 +195,8 @@ int test_gk20a_channel_read_state(struct unit_module *m,
nvgpu_writel(g, ccsr_channel_r(ch->chid), v); nvgpu_writel(g, ccsr_channel_r(ch->chid), v);
gk20a_channel_read_state(g, ch, &state); gk20a_channel_read_state(g, ch->runlist->id, ch->chid,
&state);
unit_assert(state.next == next, goto done); unit_assert(state.next == next, goto done);
unit_assert(state.enabled == enabled, goto done); unit_assert(state.enabled == enabled, goto done);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -163,7 +163,7 @@ int test_gv11b_channel_read_state(struct unit_module *m,
nvgpu_writel(g, ccsr_channel_r(ch->chid), v); nvgpu_writel(g, ccsr_channel_r(ch->chid), v);
gv11b_channel_read_state(g, ch, &state); gv11b_channel_read_state(g, ch->runlist->id, ch->chid, &state);
unit_assert(state.eng_faulted == eng_faulted, goto done); unit_assert(state.eng_faulted == eng_faulted, goto done);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -1671,10 +1671,10 @@ static const char *f_channel_debug_dump[] = {
"info_alloc_fail", "info_alloc_fail",
}; };
static void stub_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, static void stub_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state) struct nvgpu_channel_hw_state *state)
{ {
stub[0].chid = ch->chid; stub[0].chid = chid;
} }
static void stub_ramfc_capture_ram_dump(struct gk20a *g, static void stub_ramfc_capture_ram_dump(struct gk20a *g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -775,13 +775,13 @@ static const char *f_tsg_unbind_channel_check_hw[] = {
}; };
static void stub_channel_read_state_NEXT(struct gk20a *g, static void stub_channel_read_state_NEXT(struct gk20a *g,
struct nvgpu_channel *ch, struct nvgpu_channel_hw_state *state) u32 runlist_id, u32 chid, struct nvgpu_channel_hw_state *state)
{ {
state->next = true; state->next = true;
} }
static void stub_channel_read_state_NEXT_CLR(struct gk20a *g, static void stub_channel_read_state_NEXT_CLR(struct gk20a *g,
struct nvgpu_channel *ch, struct nvgpu_channel_hw_state *state) u32 runlist_id, u32 chid, struct nvgpu_channel_hw_state *state)
{ {
state->next = false; state->next = false;
} }