gpu: nvgpu: move .force_ctx_reload to use runlist_id and chid

Moving to use IDs rather than struct makes it reusable on server side.

Jira GVSCI-15770

Change-Id: Id4e815e9cf78a43156449d0e77e8e331fc906725
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2863439
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Richard Zhao
2023-02-14 16:27:21 -08:00
committed by mobile promotions
parent c8d6a91de6
commit 84ddb23633
8 changed files with 21 additions and 21 deletions

View File

@@ -826,7 +826,8 @@ void nvgpu_tsg_unbind_channel_ctx_reload_check(struct nvgpu_tsg *tsg,
nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list, nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list,
nvgpu_channel, ch_entry) { nvgpu_channel, ch_entry) {
if (temp_ch->chid != ch->chid) { if (temp_ch->chid != ch->chid) {
g->ops.channel.force_ctx_reload(temp_ch); g->ops.channel.force_ctx_reload(g,
temp_ch->runlist->id, temp_ch->chid);
break; break;
} }
} }

View File

@@ -39,6 +39,6 @@ void ga10b_channel_read_state(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch, void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch,
bool eng, bool pbdma); bool eng, bool pbdma);
void ga10b_channel_force_ctx_reload(struct nvgpu_channel *ch); void ga10b_channel_force_ctx_reload(struct gk20a *g, u32 runlist_id, u32 chid);
#endif /* FIFO_CHANNEL_GA10B_H */ #endif /* FIFO_CHANNEL_GA10B_H */

View File

@@ -273,14 +273,11 @@ void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch,
} }
void ga10b_channel_force_ctx_reload(struct nvgpu_channel *ch) void ga10b_channel_force_ctx_reload(struct gk20a *g, u32 runlist_id, u32 chid)
{ {
struct gk20a *g = ch->g; struct nvgpu_runlist *runlist = g->fifo.runlists[runlist_id];
struct nvgpu_runlist *runlist = NULL;
runlist = ch->runlist; nvgpu_chram_bar0_writel(g, runlist, runlist_chram_channel_r(chid),
nvgpu_chram_bar0_writel(g, runlist, runlist_chram_channel_r(ch->chid),
runlist_chram_channel_update_f( runlist_chram_channel_update_f(
runlist_chram_channel_update_force_ctx_reload_v())); runlist_chram_channel_update_force_ctx_reload_v()));
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,7 +29,7 @@ struct nvgpu_channel;
struct gk20a; struct gk20a;
void gm20b_channel_bind(struct nvgpu_channel *c); void gm20b_channel_bind(struct nvgpu_channel *c);
void gm20b_channel_force_ctx_reload(struct nvgpu_channel *ch); void gm20b_channel_force_ctx_reload(struct gk20a *g, u32 runlist_id, u32 chid);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA #ifdef CONFIG_NVGPU_HAL_NON_FUSA
u32 gm20b_channel_count(struct gk20a *g); u32 gm20b_channel_count(struct gk20a *g);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -58,11 +58,11 @@ void gm20b_channel_bind(struct nvgpu_channel *c)
nvgpu_atomic_set(&c->bound, 1); nvgpu_atomic_set(&c->bound, 1);
} }
void gm20b_channel_force_ctx_reload(struct nvgpu_channel *ch) void gm20b_channel_force_ctx_reload(struct gk20a *g, u32 runlist_id, u32 chid)
{ {
struct gk20a *g = ch->g; u32 reg = nvgpu_readl(g, ccsr_channel_r(chid));
u32 reg = nvgpu_readl(g, ccsr_channel_r(ch->chid));
nvgpu_writel(g, ccsr_channel_r(ch->chid), (void)runlist_id;
nvgpu_writel(g, ccsr_channel_r(chid),
reg | ccsr_channel_force_ctx_reload_true_f()); reg | ccsr_channel_force_ctx_reload_true_f());
} }

View File

@@ -141,7 +141,7 @@ struct gops_channel {
void (*unbind)(struct nvgpu_channel *ch); void (*unbind)(struct nvgpu_channel *ch);
void (*read_state)(struct gk20a *g, u32 runlist_id, u32 chid, void (*read_state)(struct gk20a *g, u32 runlist_id, u32 chid,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void (*force_ctx_reload)(struct nvgpu_channel *ch); void (*force_ctx_reload)(struct gk20a *g, u32 runlist_id, u32 chid);
void (*abort_clean_up)(struct nvgpu_channel *ch); void (*abort_clean_up)(struct nvgpu_channel *ch);
void (*reset_faulted)(struct gk20a *g, struct nvgpu_channel *ch, void (*reset_faulted)(struct gk20a *g, struct nvgpu_channel *ch,
bool eng, bool pbdma); bool eng, bool pbdma);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -112,13 +112,14 @@ int test_gm20b_channel_force_ctx_reload(struct unit_module *m,
unit_assert(ch, goto done); unit_assert(ch, goto done);
nvgpu_writel(g, ccsr_channel_r(ch->chid), 0); nvgpu_writel(g, ccsr_channel_r(ch->chid), 0);
gm20b_channel_force_ctx_reload(ch); gm20b_channel_force_ctx_reload(g, ch->runlist->id, ch->chid);
unit_assert((nvgpu_readl(g, ccsr_channel_r(ch->chid)) & unit_assert((nvgpu_readl(g, ccsr_channel_r(ch->chid)) &
ccsr_channel_force_ctx_reload_true_f()) != 0, goto done); ccsr_channel_force_ctx_reload_true_f()) != 0, goto done);
chid = ch->chid; chid = ch->chid;
ch->chid = U32_MAX; ch->chid = U32_MAX;
err = EXPECT_BUG(gm20b_channel_force_ctx_reload(ch)); err = EXPECT_BUG(gm20b_channel_force_ctx_reload(g,
ch->runlist->id, ch->chid));
ch->chid = chid; ch->chid = chid;
unit_assert(err != 0, goto done); unit_assert(err != 0, goto done);

View File

@@ -1002,10 +1002,11 @@ static const char *f_unbind_channel_check_ctx_reload[] = {
"chid_match", "chid_match",
}; };
static void stub_channel_force_ctx_reload(struct nvgpu_channel *ch) static void stub_channel_force_ctx_reload(struct gk20a *g,
u32 runlist_id, u32 chid)
{ {
stub[0].name = __func__; stub[0].name = __func__;
stub[0].chid = ch->chid; stub[0].chid = chid;
} }
int test_tsg_unbind_channel_check_ctx_reload(struct unit_module *m, int test_tsg_unbind_channel_check_ctx_reload(struct unit_module *m,