mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: remove g->ops.gr.alloc_gr_ctx() hal
Common code now directly calls gr_gk20a_alloc_gr_ctx() and vgpu code directly calls vgpu_gr_alloc_gr_ctx() Remove g->ops.gr.alloc_gr_ctx() hal since it is no longer required Jira NVGPU-1887 Change-Id: I65d19f4a8ae62967ff67d6f69b5af1b46abf9c1a Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2075233 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
6a315a5a6d
commit
dbcce79b55
@@ -138,7 +138,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.detect_sm_arch = vgpu_gr_detect_sm_arch,
|
||||
.pagepool_default_size = gr_gp10b_pagepool_default_size,
|
||||
.init_ctx_state = vgpu_gr_init_ctx_state,
|
||||
.alloc_gr_ctx = vgpu_gr_alloc_gr_ctx,
|
||||
.free_gr_ctx = vgpu_gr_free_gr_ctx,
|
||||
.init_ctxsw_preemption_mode =
|
||||
vgpu_gr_init_ctxsw_preemption_mode,
|
||||
|
||||
@@ -224,8 +224,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
||||
tsg->vm = c->vm;
|
||||
nvgpu_vm_get(tsg->vm);
|
||||
gr_ctx->tsgid = tsg->tsgid;
|
||||
err = g->ops.gr.alloc_gr_ctx(g, gr_ctx,
|
||||
c->vm);
|
||||
err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, c->vm);
|
||||
if (err) {
|
||||
nvgpu_err(g,
|
||||
"fail to allocate TSG gr ctx buffer, err=%d",
|
||||
|
||||
@@ -156,7 +156,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.detect_sm_arch = vgpu_gr_detect_sm_arch,
|
||||
.pagepool_default_size = gr_gv11b_pagepool_default_size,
|
||||
.init_ctx_state = vgpu_gr_init_ctx_state,
|
||||
.alloc_gr_ctx = vgpu_gr_alloc_gr_ctx,
|
||||
.free_gr_ctx = vgpu_gr_free_gr_ctx,
|
||||
.init_ctxsw_preemption_mode =
|
||||
vgpu_gr_init_ctxsw_preemption_mode,
|
||||
|
||||
@@ -1877,7 +1877,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
|
||||
static int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm)
|
||||
{
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
@@ -1962,7 +1962,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
||||
if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {
|
||||
tsg->vm = c->vm;
|
||||
nvgpu_vm_get(tsg->vm);
|
||||
err = g->ops.gr.alloc_gr_ctx(g, gr_ctx, tsg->vm);
|
||||
err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, tsg->vm);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g,
|
||||
"fail to allocate TSG gr ctx buffer");
|
||||
|
||||
@@ -444,8 +444,6 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
|
||||
bool sleepduringwait);
|
||||
int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g,
|
||||
struct fecs_method_op_gk20a op);
|
||||
int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm);
|
||||
void gr_gk20a_free_gr_ctx(struct gk20a *g,
|
||||
struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx);
|
||||
int gr_gk20a_halt_pipe(struct gk20a *g);
|
||||
|
||||
@@ -261,7 +261,6 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.detect_sm_arch = gr_gm20b_detect_sm_arch,
|
||||
.pagepool_default_size = gr_gm20b_pagepool_default_size,
|
||||
.init_ctx_state = gr_gk20a_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gk20a_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_gk20a_free_gr_ctx,
|
||||
.init_ctxsw_preemption_mode =
|
||||
gr_gm20b_init_ctxsw_preemption_mode,
|
||||
|
||||
@@ -283,7 +283,6 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.detect_sm_arch = gr_gm20b_detect_sm_arch,
|
||||
.pagepool_default_size = gr_gp10b_pagepool_default_size,
|
||||
.init_ctx_state = gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gk20a_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_gk20a_free_gr_ctx,
|
||||
.init_ctxsw_preemption_mode =
|
||||
gr_gp10b_init_ctxsw_preemption_mode,
|
||||
|
||||
@@ -386,7 +386,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.detect_sm_arch = gr_gv11b_detect_sm_arch,
|
||||
.pagepool_default_size = gr_gv11b_pagepool_default_size,
|
||||
.init_ctx_state = gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gk20a_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_gk20a_free_gr_ctx,
|
||||
.init_ctxsw_preemption_mode =
|
||||
gr_gp10b_init_ctxsw_preemption_mode,
|
||||
|
||||
@@ -338,7 +338,6 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.detect_sm_arch = gr_gv11b_detect_sm_arch,
|
||||
.pagepool_default_size = gr_gv11b_pagepool_default_size,
|
||||
.init_ctx_state = gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gk20a_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_gk20a_free_gr_ctx,
|
||||
.powergate_tpc = gr_gv11b_powergate_tpc,
|
||||
.init_ctxsw_preemption_mode =
|
||||
|
||||
@@ -324,8 +324,6 @@ struct gpu_ops {
|
||||
void (*detect_sm_arch)(struct gk20a *g);
|
||||
u32 (*pagepool_default_size)(struct gk20a *g);
|
||||
int (*init_ctx_state)(struct gk20a *g);
|
||||
int (*alloc_gr_ctx)(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm);
|
||||
void (*free_gr_ctx)(struct gk20a *g,
|
||||
struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx);
|
||||
void (*powergate_tpc)(struct gk20a *g);
|
||||
|
||||
@@ -406,7 +406,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.detect_sm_arch = gr_gv11b_detect_sm_arch,
|
||||
.pagepool_default_size = gr_gv11b_pagepool_default_size,
|
||||
.init_ctx_state = gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gk20a_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_tu104_free_gr_ctx,
|
||||
.init_ctxsw_preemption_mode =
|
||||
gr_gp10b_init_ctxsw_preemption_mode,
|
||||
|
||||
Reference in New Issue
Block a user