mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 09:57:08 +03:00
gpu: nvgpu: pass gr_ctx to alloc_channel_patch_ctx
Simplify object ownership by passing the gr_ctx around directly instead of reading from tsg via a channel; the caller holds the gr_ctx already. Also pass the channel vm instead of the whole channel. Jira NVGPU-1149 Change-Id: Id9d65841f09459e7acfc8c4ce4c6de7db054dbd8 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1925427 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
50438811c8
commit
95f1d19b94
@@ -78,9 +78,6 @@ static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g,
|
|||||||
struct vm_gk20a *vm,
|
struct vm_gk20a *vm,
|
||||||
struct nvgpu_gr_ctx *gr_ctx);
|
struct nvgpu_gr_ctx *gr_ctx);
|
||||||
|
|
||||||
/* channel patch ctx buffer */
|
|
||||||
static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
|
|
||||||
struct channel_gk20a *c);
|
|
||||||
static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g,
|
static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g,
|
||||||
struct vm_gk20a *vm,
|
struct vm_gk20a *vm,
|
||||||
struct nvgpu_gr_ctx *gr_ctx);
|
struct nvgpu_gr_ctx *gr_ctx);
|
||||||
@@ -2810,22 +2807,16 @@ u32 gr_gk20a_get_patch_slots(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
|
static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
|
||||||
struct channel_gk20a *c)
|
struct vm_gk20a *ch_vm,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx)
|
||||||
{
|
{
|
||||||
struct tsg_gk20a *tsg;
|
|
||||||
struct patch_desc *patch_ctx;
|
struct patch_desc *patch_ctx;
|
||||||
struct vm_gk20a *ch_vm = c->vm;
|
|
||||||
u32 alloc_size;
|
u32 alloc_size;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
tsg = tsg_gk20a_from_ch(c);
|
patch_ctx = &gr_ctx->patch_ctx;
|
||||||
if (tsg == NULL) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
patch_ctx = &tsg->gr_ctx->patch_ctx;
|
|
||||||
alloc_size = g->ops.gr.get_patch_slots(g) *
|
alloc_size = g->ops.gr.get_patch_slots(g) *
|
||||||
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY;
|
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY;
|
||||||
|
|
||||||
@@ -2925,7 +2916,8 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
|||||||
/* allocate patch buffer */
|
/* allocate patch buffer */
|
||||||
if (!nvgpu_mem_is_valid(&gr_ctx->patch_ctx.mem)) {
|
if (!nvgpu_mem_is_valid(&gr_ctx->patch_ctx.mem)) {
|
||||||
gr_ctx->patch_ctx.data_count = 0;
|
gr_ctx->patch_ctx.data_count = 0;
|
||||||
err = gr_gk20a_alloc_channel_patch_ctx(g, c);
|
err = gr_gk20a_alloc_channel_patch_ctx(g, c->vm,
|
||||||
|
gr_ctx);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"fail to allocate patch buffer");
|
"fail to allocate patch buffer");
|
||||||
|
|||||||
Reference in New Issue
Block a user