gpu: nvgpu: update .clear to use runlist_id and chid

- Moving to use IDs rather than struct makes it reusable on server side.
- move channel bind/unbind to use .enable/.clear HALs

Jira GVSCI-15770

Change-Id: I86d4aae2953024e537e32a35fe9cabb1b91cd201
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2863436
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Ramesh Mylavarapu <rmylavarapu@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Richard Zhao
2023-01-31 17:42:06 -08:00
committed by mobile promotions
parent 3edae21ca6
commit 2ff110f722
6 changed files with 21 additions and 19 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -598,7 +598,7 @@ static int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg,
} }
if (g->ops.channel.clear != NULL) { if (g->ops.channel.clear != NULL) {
g->ops.channel.clear(ch); g->ops.channel.clear(g, ch->runlist->id, ch->chid);
} }
/* Channel should be seen as TSG channel while updating runlist */ /* Channel should be seen as TSG channel while updating runlist */
@@ -743,7 +743,7 @@ fail_common:
nvgpu_tsg_abort(g, tsg, true); nvgpu_tsg_abort(g, tsg, true);
if (g->ops.channel.clear != NULL) { if (g->ops.channel.clear != NULL) {
g->ops.channel.clear(ch); g->ops.channel.clear(g, ch->runlist->id, ch->chid);
} }
/* If channel unbind fails, channel is still part of runlist */ /* If channel unbind fails, channel is still part of runlist */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -34,6 +34,7 @@ void ga10b_channel_enable(struct nvgpu_channel *ch);
void ga10b_channel_disable(struct nvgpu_channel *ch); void ga10b_channel_disable(struct nvgpu_channel *ch);
void ga10b_channel_bind(struct nvgpu_channel *ch); void ga10b_channel_bind(struct nvgpu_channel *ch);
void ga10b_channel_unbind(struct nvgpu_channel *ch); void ga10b_channel_unbind(struct nvgpu_channel *ch);
void ga10b_channel_clear(struct gk20a *g, u32 runlist_id, u32 chid);
void ga10b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, void ga10b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch, void ga10b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -83,11 +83,8 @@ void ga10b_channel_disable(struct nvgpu_channel *ch)
void ga10b_channel_bind(struct nvgpu_channel *ch) void ga10b_channel_bind(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct nvgpu_runlist *runlist = NULL;
int err; int err;
runlist = ch->runlist;
/* Enable subcontext */ /* Enable subcontext */
if (g->ops.tsg.add_subctx_channel_hw != NULL) { if (g->ops.tsg.add_subctx_channel_hw != NULL) {
err = g->ops.tsg.add_subctx_channel_hw(ch, ch->replayable); err = g->ops.tsg.add_subctx_channel_hw(ch, ch->replayable);
@@ -98,9 +95,7 @@ void ga10b_channel_bind(struct nvgpu_channel *ch)
} }
/* Enable channel */ /* Enable channel */
nvgpu_chram_bar0_writel(g, runlist, runlist_chram_channel_r(ch->chid), g->ops.channel.enable(ch);
runlist_chram_channel_update_f(
runlist_chram_channel_update_enable_channel_v()));
nvgpu_atomic_set(&ch->bound, CHANNEL_BOUND); nvgpu_atomic_set(&ch->bound, CHANNEL_BOUND);
} }
@@ -149,11 +144,17 @@ void ga10b_channel_unbind(struct nvgpu_channel *ch)
if (nvgpu_atomic_cmpxchg(&ch->bound, CHANNEL_BOUND, CHANNEL_UNBOUND) != if (nvgpu_atomic_cmpxchg(&ch->bound, CHANNEL_BOUND, CHANNEL_UNBOUND) !=
0) { 0) {
nvgpu_chram_bar0_writel(g, runlist, g->ops.channel.clear(g, runlist->id, ch->chid);
runlist_chram_channel_r(ch->chid), }
}
void ga10b_channel_clear(struct gk20a *g, u32 runlist_id, u32 chid)
{
nvgpu_chram_bar0_writel(g,
g->fifo.runlists[runlist_id],
runlist_chram_channel_r(chid),
runlist_chram_channel_update_f( runlist_chram_channel_update_f(
runlist_chram_channel_update_clear_channel_v())); runlist_chram_channel_update_clear_channel_v()));
}
} }
#define NUM_STATUS_STR 8U #define NUM_STATUS_STR 8U

View File

@@ -1197,7 +1197,7 @@ static const struct gops_channel ga100_ops_channel = {
.free_inst = nvgpu_channel_free_inst, .free_inst = nvgpu_channel_free_inst,
.bind = ga10b_channel_bind, .bind = ga10b_channel_bind,
.unbind = ga10b_channel_unbind, .unbind = ga10b_channel_unbind,
.clear = ga10b_channel_unbind, .clear = ga10b_channel_clear,
.enable = ga10b_channel_enable, .enable = ga10b_channel_enable,
.disable = ga10b_channel_disable, .disable = ga10b_channel_disable,
.count = ga100_channel_count, .count = ga100_channel_count,

View File

@@ -1213,7 +1213,7 @@ static const struct gops_channel ga10b_ops_channel = {
.free_inst = nvgpu_channel_free_inst, .free_inst = nvgpu_channel_free_inst,
.bind = ga10b_channel_bind, .bind = ga10b_channel_bind,
.unbind = ga10b_channel_unbind, .unbind = ga10b_channel_unbind,
.clear = ga10b_channel_unbind, .clear = ga10b_channel_clear,
.enable = ga10b_channel_enable, .enable = ga10b_channel_enable,
.disable = ga10b_channel_disable, .disable = ga10b_channel_disable,
.count = ga10b_channel_count, .count = ga10b_channel_count,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -141,7 +141,7 @@ struct gops_channel {
void (*abort_clean_up)(struct nvgpu_channel *ch); void (*abort_clean_up)(struct nvgpu_channel *ch);
void (*reset_faulted)(struct gk20a *g, struct nvgpu_channel *ch, void (*reset_faulted)(struct gk20a *g, struct nvgpu_channel *ch,
bool eng, bool pbdma); bool eng, bool pbdma);
void (*clear)(struct nvgpu_channel *ch); void (*clear)(struct gk20a *g, u32 runlist_id, u32 chid);
int (*get_vmid)(struct nvgpu_channel *ch, u32 *vmid); int (*get_vmid)(struct nvgpu_channel *ch, u32 *vmid);
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT #ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT