mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Make alloc_obj_ctx args Linux specific
Use nvgpu_alloc_obj_ctx_args structure specific to Linux code only. Pass the fields of the structure as separate arguments to all common functions. gr_ops_gp10b.h referred to the struct, but it's not used anywhere, so delete the file. JIRA NVGPU-259 Change-Id: Idba78d48de1c30f205a42da2fe47a9f8c03735f1 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1586563 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
4d2d890c01
commit
721315298b
@@ -527,16 +527,12 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
|
||||
{
|
||||
struct nvgpu_os_linux *l = cde_ctx->l;
|
||||
struct gk20a *g = &l->g;
|
||||
struct nvgpu_alloc_obj_ctx_args alloc_obj_ctx;
|
||||
int err;
|
||||
|
||||
alloc_obj_ctx.class_num = required_class;
|
||||
alloc_obj_ctx.flags = 0;
|
||||
|
||||
/* CDE enabled */
|
||||
cde_ctx->ch->cde = true;
|
||||
|
||||
err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx);
|
||||
err = gk20a_alloc_obj_ctx(cde_ctx->ch, required_class, 0);
|
||||
if (err) {
|
||||
nvgpu_warn(g, "cde: failed to allocate ctx. err=%d",
|
||||
err);
|
||||
|
||||
@@ -923,6 +923,10 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD:
|
||||
break;
|
||||
case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
|
||||
{
|
||||
struct nvgpu_alloc_obj_ctx_args *args =
|
||||
(struct nvgpu_alloc_obj_ctx_args *)buf;
|
||||
|
||||
err = gk20a_busy(ch->g);
|
||||
if (err) {
|
||||
dev_err(dev,
|
||||
@@ -930,10 +934,10 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
__func__, cmd);
|
||||
break;
|
||||
}
|
||||
err = ch->g->ops.gr.alloc_obj_ctx(ch,
|
||||
(struct nvgpu_alloc_obj_ctx_args *)buf);
|
||||
err = ch->g->ops.gr.alloc_obj_ctx(ch, args->class_num, args->flags);
|
||||
gk20a_idle(ch->g);
|
||||
break;
|
||||
}
|
||||
case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX:
|
||||
{
|
||||
struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args =
|
||||
|
||||
@@ -225,7 +225,7 @@ struct gpu_ops {
|
||||
void (*set_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index);
|
||||
void (*free_channel_ctx)(struct channel_gk20a *c, bool is_tsg);
|
||||
int (*alloc_obj_ctx)(struct channel_gk20a *c,
|
||||
struct nvgpu_alloc_obj_ctx_args *args);
|
||||
u32 class_num, u32 flags);
|
||||
int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr,
|
||||
struct channel_gk20a *c, u64 zcull_va,
|
||||
u32 mode);
|
||||
|
||||
@@ -2890,8 +2890,7 @@ void gk20a_free_channel_ctx(struct channel_gk20a *c, bool is_tsg)
|
||||
c->first_init = false;
|
||||
}
|
||||
|
||||
int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
struct nvgpu_alloc_obj_ctx_args *args)
|
||||
int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
@@ -2909,13 +2908,13 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!g->ops.gr.is_valid_class(g, args->class_num)) {
|
||||
if (!g->ops.gr.is_valid_class(g, class_num)) {
|
||||
nvgpu_err(g,
|
||||
"invalid obj class 0x%x", args->class_num);
|
||||
"invalid obj class 0x%x", class_num);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
c->obj_class = args->class_num;
|
||||
c->obj_class = class_num;
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(c))
|
||||
tsg = &f->tsg[c->tsgid];
|
||||
@@ -2924,8 +2923,8 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
if (!tsg) {
|
||||
if (!ch_ctx->gr_ctx) {
|
||||
err = gr_gk20a_alloc_channel_gr_ctx(g, c,
|
||||
args->class_num,
|
||||
args->flags);
|
||||
class_num,
|
||||
flags);
|
||||
if (err) {
|
||||
nvgpu_err(g,
|
||||
"fail to allocate gr ctx buffer");
|
||||
@@ -2945,8 +2944,8 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
tsg->vm = c->vm;
|
||||
nvgpu_vm_get(tsg->vm);
|
||||
err = gr_gk20a_alloc_tsg_gr_ctx(g, tsg,
|
||||
args->class_num,
|
||||
args->flags);
|
||||
class_num,
|
||||
flags);
|
||||
if (err) {
|
||||
nvgpu_err(g,
|
||||
"fail to allocate TSG gr ctx buffer");
|
||||
@@ -2993,7 +2992,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
}
|
||||
|
||||
/* tweak any perf parameters per-context here */
|
||||
if (args->class_num == KEPLER_COMPUTE_A) {
|
||||
if (class_num == KEPLER_COMPUTE_A) {
|
||||
u32 tex_lock_disable_mask;
|
||||
u32 texlock;
|
||||
u32 lockboost_mask;
|
||||
@@ -3047,7 +3046,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
"failed to set texlock for compute class");
|
||||
}
|
||||
|
||||
args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO;
|
||||
flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO;
|
||||
|
||||
if (g->support_pmu && g->can_elpg)
|
||||
nvgpu_pmu_enable_elpg(g);
|
||||
|
||||
@@ -522,13 +522,7 @@ int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a);
|
||||
|
||||
int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr);
|
||||
|
||||
struct nvgpu_alloc_obj_ctx_args;
|
||||
struct nvgpu_free_obj_ctx_args;
|
||||
|
||||
int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
struct nvgpu_alloc_obj_ctx_args *args);
|
||||
int gk20a_free_obj_ctx(struct channel_gk20a *c,
|
||||
struct nvgpu_free_obj_ctx_args *args);
|
||||
int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
|
||||
void gk20a_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
|
||||
|
||||
int gk20a_gr_isr(struct gk20a *g);
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
* GP10B GPU graphics ops
|
||||
*
|
||||
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _GR_OPS_GP10B_H_
|
||||
#define _GR_OPS_GP10B_H_
|
||||
|
||||
#include "gr_ops.h"
|
||||
|
||||
#define __gr_gp10b_op(X) gr_gp10b_ ## X
|
||||
#define __set_gr_gp10b_op(X) . X = gr_gp10b_ ## X
|
||||
|
||||
bool __gr_gp10b_op(is_valid_class)(struct gk20a *, u32);
|
||||
int __gr_gp10b_op(alloc_obj_ctx)(struct channel_gk20a *, struct nvgpu_alloc_obj_ctx_args *);
|
||||
|
||||
|
||||
#endif
|
||||
@@ -485,8 +485,7 @@ static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
|
||||
return err;
|
||||
}
|
||||
|
||||
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
struct nvgpu_alloc_obj_ctx_args *args)
|
||||
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
@@ -503,12 +502,12 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!g->ops.gr.is_valid_class(g, args->class_num)) {
|
||||
nvgpu_err(g, "invalid obj class 0x%x", args->class_num);
|
||||
if (!g->ops.gr.is_valid_class(g, class_num)) {
|
||||
nvgpu_err(g, "invalid obj class 0x%x", class_num);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
c->obj_class = args->class_num;
|
||||
c->obj_class = class_num;
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(c))
|
||||
tsg = &f->tsg[c->tsgid];
|
||||
@@ -518,8 +517,8 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
if (!ch_ctx->gr_ctx) {
|
||||
err = g->ops.gr.alloc_gr_ctx(g, &c->ch_ctx.gr_ctx,
|
||||
c->vm,
|
||||
args->class_num,
|
||||
args->flags);
|
||||
class_num,
|
||||
flags);
|
||||
if (!err)
|
||||
err = vgpu_gr_ch_bind_gr_ctx(c);
|
||||
if (err) {
|
||||
@@ -541,8 +540,8 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
nvgpu_vm_get(tsg->vm);
|
||||
err = g->ops.gr.alloc_gr_ctx(g, &tsg->tsg_gr_ctx,
|
||||
c->vm,
|
||||
args->class_num,
|
||||
args->flags);
|
||||
class_num,
|
||||
flags);
|
||||
if (!err)
|
||||
err = vgpu_gr_tsg_bind_gr_ctx(tsg);
|
||||
if (err) {
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
|
||||
struct gk20a;
|
||||
struct channel_gk20a;
|
||||
struct nvgpu_alloc_obj_ctx_args;
|
||||
struct gr_gk20a;
|
||||
struct gr_zcull_info;
|
||||
struct zbc_entry;
|
||||
@@ -36,8 +35,7 @@ struct dbg_session_gk20a;
|
||||
|
||||
void vgpu_gr_detect_sm_arch(struct gk20a *g);
|
||||
void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
|
||||
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
struct nvgpu_alloc_obj_ctx_args *args);
|
||||
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
|
||||
int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
|
||||
struct channel_gk20a *c, u64 zcull_va,
|
||||
u32 mode);
|
||||
|
||||
Reference in New Issue
Block a user