mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: move parameter of .vm_bind_channel from as_share to vm
as_share is more os specific and not yet used on other OSes. Jira VQRM-2344 Change-Id: Ie2ed007125400484352fbab602c37a198e8a64ae Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1699842 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
98dce7eaac
commit
3d0ddb8c4a
@@ -1306,7 +1306,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
|
||||
}
|
||||
|
||||
/* bind the channel to the vm */
|
||||
err = __gk20a_vm_bind_channel(g->mm.cde.vm, ch);
|
||||
err = g->ops.mm.vm_bind_channel(g->mm.cde.vm, ch);
|
||||
if (err) {
|
||||
nvgpu_warn(g, "cde: could not bind vm");
|
||||
goto err_commit_va;
|
||||
|
||||
@@ -63,7 +63,7 @@ static int gk20a_as_ioctl_bind_channel(
|
||||
}
|
||||
|
||||
/* this will set channel_gk20a->vm */
|
||||
err = ch->g->ops.mm.vm_bind_channel(as_share, ch);
|
||||
err = ch->g->ops.mm.vm_bind_channel(as_share->vm, ch);
|
||||
|
||||
out:
|
||||
gk20a_channel_put(ch);
|
||||
|
||||
@@ -449,7 +449,7 @@ u32 gk20a_ce_create_context(struct gk20a *g,
|
||||
ce_ctx->ch->timeout.enabled = false;
|
||||
|
||||
/* bind the channel to the vm */
|
||||
err = __gk20a_vm_bind_channel(g->mm.ce.vm, ce_ctx->ch);
|
||||
err = g->ops.mm.vm_bind_channel(g->mm.ce.vm, ce_ctx->ch);
|
||||
if (err) {
|
||||
nvgpu_err(g, "ce: could not bind vm");
|
||||
goto end;
|
||||
|
||||
@@ -855,7 +855,7 @@ struct gpu_ops {
|
||||
int rw_flag,
|
||||
bool sparse,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
int (*vm_bind_channel)(struct gk20a_as_share *as_share,
|
||||
int (*vm_bind_channel)(struct vm_gk20a *vm,
|
||||
struct channel_gk20a *ch);
|
||||
int (*fb_flush)(struct gk20a *g);
|
||||
void (*l2_invalidate)(struct gk20a *g);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -332,7 +332,7 @@ const struct gk20a_mmu_level gk20a_mm_levels_128k[] = {
|
||||
{.update_entry = NULL}
|
||||
};
|
||||
|
||||
int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
|
||||
int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@@ -350,12 +350,6 @@ int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
|
||||
return err;
|
||||
}
|
||||
|
||||
int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
|
||||
struct channel_gk20a *ch)
|
||||
{
|
||||
return __gk20a_vm_bind_channel(as_share->vm, ch);
|
||||
}
|
||||
|
||||
void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
|
||||
struct vm_gk20a *vm)
|
||||
{
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A memory management
|
||||
*
|
||||
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -165,9 +165,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
struct nvgpu_as_alloc_space_args;
|
||||
struct nvgpu_as_free_space_args;
|
||||
int gk20a_vm_release_share(struct gk20a_as_share *as_share);
|
||||
int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
|
||||
struct channel_gk20a *ch);
|
||||
int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch);
|
||||
int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch);
|
||||
|
||||
void pde_range_from_vaddr_range(struct vm_gk20a *vm,
|
||||
u64 addr_lo, u64 addr_hi,
|
||||
|
||||
@@ -177,10 +177,9 @@ u64 vgpu_bar1_map(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
return addr;
|
||||
}
|
||||
|
||||
int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
|
||||
int vgpu_vm_bind_channel(struct vm_gk20a *vm,
|
||||
struct channel_gk20a *ch)
|
||||
{
|
||||
struct vm_gk20a *vm = as_share->vm;
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
|
||||
int err;
|
||||
|
||||
@@ -37,7 +37,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
int rw_flag,
|
||||
bool sparse,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
|
||||
int vgpu_vm_bind_channel(struct vm_gk20a *vm,
|
||||
struct channel_gk20a *ch);
|
||||
int vgpu_mm_fb_flush(struct gk20a *g);
|
||||
void vgpu_mm_l2_invalidate(struct gk20a *g);
|
||||
|
||||
Reference in New Issue
Block a user