gpu: nvgpu: Remove gk20a_dbg* functions

Switch all logging to nvgpu_log*(). gk20a_dbg* macros are
intentionally left there because of use from other repositories.

Because the new functions do not work without a pointer to struct
gk20a, and piping it just for logging is excessive, some log messages
are deleted.

Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1704148
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2018-04-18 19:39:46 -07:00
committed by mobile promotions
parent 7e66f2a63d
commit dd739fcb03
131 changed files with 1726 additions and 1637 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ u32 boardobj_construct_super(struct gk20a *g, struct boardobj **ppboardobj,
struct boardobj *pboardobj = NULL; struct boardobj *pboardobj = NULL;
struct boardobj *devtmp = (struct boardobj *)args; struct boardobj *devtmp = (struct boardobj *)args;
gk20a_dbg_info(" "); nvgpu_log_info(g, " ");
if (devtmp == NULL) if (devtmp == NULL)
return -EINVAL; return -EINVAL;
@@ -61,7 +61,9 @@ u32 boardobj_construct_super(struct gk20a *g, struct boardobj **ppboardobj,
u32 boardobj_destruct_super(struct boardobj *pboardobj) u32 boardobj_destruct_super(struct boardobj *pboardobj)
{ {
gk20a_dbg_info(""); struct gk20a *g = pboardobj->g;
nvgpu_log_info(g, " ");
if (pboardobj == NULL) if (pboardobj == NULL)
return -EINVAL; return -EINVAL;
@@ -75,7 +77,7 @@ u32 boardobj_destruct_super(struct boardobj *pboardobj)
bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj, bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj,
u8 type) u8 type)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
return (0 != (pboardobj->type_mask & BIT(type))); return (0 != (pboardobj->type_mask & BIT(type)));
} }
@@ -83,12 +85,12 @@ bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj,
u32 boardobj_pmudatainit_super(struct gk20a *g, struct boardobj *pboardobj, u32 boardobj_pmudatainit_super(struct gk20a *g, struct boardobj *pboardobj,
struct nv_pmu_boardobj *pmudata) struct nv_pmu_boardobj *pmudata)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobj == NULL) if (pboardobj == NULL)
return -EINVAL; return -EINVAL;
if (pmudata == NULL) if (pmudata == NULL)
return -EINVAL; return -EINVAL;
pmudata->type = pboardobj->type; pmudata->type = pboardobj->type;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return 0; return 0;
} }

View File

@@ -50,7 +50,7 @@ struct boardobjgrp_pmucmdhandler_params {
u32 boardobjgrp_construct_super(struct gk20a *g, struct boardobjgrp *pboardobjgrp) u32 boardobjgrp_construct_super(struct gk20a *g, struct boardobjgrp *pboardobjgrp)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) if (pboardobjgrp == NULL)
return -EINVAL; return -EINVAL;
@@ -101,7 +101,9 @@ u32 boardobjgrp_construct_super(struct gk20a *g, struct boardobjgrp *pboardobjgr
u32 boardobjgrp_destruct_impl(struct boardobjgrp *pboardobjgrp) u32 boardobjgrp_destruct_impl(struct boardobjgrp *pboardobjgrp)
{ {
gk20a_dbg_info(""); struct gk20a *g = pboardobjgrp->g;
nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) if (pboardobjgrp == NULL)
return -EINVAL; return -EINVAL;
@@ -120,7 +122,7 @@ u32 boardobjgrp_destruct_super(struct boardobjgrp *pboardobjgrp)
u32 stat; u32 stat;
u8 index; u8 index;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobjgrp->mask == NULL) if (pboardobjgrp->mask == NULL)
return -EINVAL; return -EINVAL;
@@ -165,7 +167,7 @@ u32 boardobjgrp_pmucmd_construct_impl(struct gk20a *g, struct boardobjgrp
*pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid, *pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid,
u8 hdrsize, u8 entrysize, u16 fbsize, u32 ss_offset, u8 rpc_func_id) u8 hdrsize, u8 entrysize, u16 fbsize, u32 ss_offset, u8 rpc_func_id)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
/* Copy the parameters into the CMD*/ /* Copy the parameters into the CMD*/
cmd->id = id; cmd->id = id;
@@ -234,7 +236,7 @@ u32 boardobjgrp_pmucmd_pmuinithandle_impl(struct gk20a *g,
u32 status = 0; u32 status = 0;
struct nvgpu_mem *sysmem_desc = &pcmd->surf.sysmem_desc; struct nvgpu_mem *sysmem_desc = &pcmd->surf.sysmem_desc;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid(g, if (g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid(g,
pboardobjgrp, pcmd)) pboardobjgrp, pcmd))
@@ -259,7 +261,7 @@ u32 boardobjgrp_pmuinithandle_impl(struct gk20a *g,
{ {
u32 status = 0; u32 status = 0;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobjgrp_pmucmd_pmuinithandle_impl(g, pboardobjgrp, status = boardobjgrp_pmucmd_pmuinithandle_impl(g, pboardobjgrp,
&pboardobjgrp->pmu.set); &pboardobjgrp->pmu.set);
@@ -295,7 +297,7 @@ u32 boardobjgrp_pmuhdrdatainit_super(struct gk20a *g, struct boardobjgrp
*pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu, *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu,
struct boardobjgrpmask *mask) struct boardobjgrpmask *mask)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) if (pboardobjgrp == NULL)
return -EINVAL; return -EINVAL;
@@ -306,7 +308,7 @@ u32 boardobjgrp_pmuhdrdatainit_super(struct gk20a *g, struct boardobjgrp
pboardobjgrppmu->obj_slots = BOARDOBJGRP_PMU_SLOTS_GET(pboardobjgrp); pboardobjgrppmu->obj_slots = BOARDOBJGRP_PMU_SLOTS_GET(pboardobjgrp);
pboardobjgrppmu->flags = 0; pboardobjgrppmu->flags = 0;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return 0; return 0;
} }
@@ -314,7 +316,7 @@ static u32 boardobjgrp_pmudatainstget_stub(struct gk20a *g,
struct nv_pmu_boardobjgrp *boardobjgrppmu, struct nv_pmu_boardobjgrp *boardobjgrppmu,
struct nv_pmu_boardobj **ppboardobjpmudata, u8 idx) struct nv_pmu_boardobj **ppboardobjpmudata, u8 idx)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
return -EINVAL; return -EINVAL;
} }
@@ -323,7 +325,7 @@ static u32 boardobjgrp_pmustatusinstget_stub(struct gk20a *g,
void *pboardobjgrppmu, void *pboardobjgrppmu,
struct nv_pmu_boardobj_query **ppBoardobjpmustatus, u8 idx) struct nv_pmu_boardobj_query **ppBoardobjpmustatus, u8 idx)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
return -EINVAL; return -EINVAL;
} }
@@ -336,7 +338,7 @@ u32 boardobjgrp_pmudatainit_legacy(struct gk20a *g,
struct nv_pmu_boardobj *ppmudata = NULL; struct nv_pmu_boardobj *ppmudata = NULL;
u8 index; u8 index;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) if (pboardobjgrp == NULL)
return -EINVAL; return -EINVAL;
@@ -374,7 +376,7 @@ u32 boardobjgrp_pmudatainit_legacy(struct gk20a *g,
BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END
boardobjgrppmudatainit_legacy_done: boardobjgrppmudatainit_legacy_done:
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return status; return status;
} }
@@ -386,7 +388,7 @@ u32 boardobjgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp
struct nv_pmu_boardobj *ppmudata = NULL; struct nv_pmu_boardobj *ppmudata = NULL;
u8 index; u8 index;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) if (pboardobjgrp == NULL)
return -EINVAL; return -EINVAL;
@@ -420,7 +422,7 @@ u32 boardobjgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp
} }
boardobjgrppmudatainit_super_done: boardobjgrppmudatainit_super_done:
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return status; return status;
} }
@@ -452,7 +454,7 @@ u32 boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp)
struct boardobjgrp_pmu_cmd *pcmd = struct boardobjgrp_pmu_cmd *pcmd =
(struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set);
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (check_boardobjgrp_param(g, pboardobjgrp)) if (check_boardobjgrp_param(g, pboardobjgrp))
return -EINVAL; return -EINVAL;
@@ -511,7 +513,7 @@ u32 boardobjgrp_pmuset_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjgrp
struct boardobjgrp_pmu_cmd *pcmd = struct boardobjgrp_pmu_cmd *pcmd =
(struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set);
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (check_boardobjgrp_param(g, pboardobjgrp)) if (check_boardobjgrp_param(g, pboardobjgrp))
return -EINVAL; return -EINVAL;
@@ -568,7 +570,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp,
struct boardobjgrp_pmu_cmd *pset = struct boardobjgrp_pmu_cmd *pset =
(struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set);
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (check_boardobjgrp_param(g, pboardobjgrp)) if (check_boardobjgrp_param(g, pboardobjgrp))
return -EINVAL; return -EINVAL;
@@ -635,7 +637,7 @@ boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjg
struct boardobjgrp_pmu_cmd *pcmd = struct boardobjgrp_pmu_cmd *pcmd =
(struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.getstatus); (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.getstatus);
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (check_boardobjgrp_param(g, pboardobjgrp)) if (check_boardobjgrp_param(g, pboardobjgrp))
return -EINVAL; return -EINVAL;
@@ -690,8 +692,9 @@ static u32
boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp, boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp,
struct boardobj *pboardobj, u8 index) struct boardobj *pboardobj, u8 index)
{ {
struct gk20a *g = pboardobjgrp->g;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) if (pboardobjgrp == NULL)
return -EINVAL; return -EINVAL;
@@ -719,7 +722,7 @@ boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp,
pboardobjgrp->objmask |= BIT(index); pboardobjgrp->objmask |= BIT(index);
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return boardobjgrpmask_bitset(pboardobjgrp->mask, index); return boardobjgrpmask_bitset(pboardobjgrp->mask, index);
} }
@@ -789,8 +792,9 @@ static u32 boardobjgrp_objremoveanddestroy_final(
{ {
u32 status = 0; u32 status = 0;
u32 stat; u32 stat;
struct gk20a *g = pboardobjgrp->g;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (!boardobjgrp_idxisvalid(pboardobjgrp, index)) if (!boardobjgrp_idxisvalid(pboardobjgrp, index))
return -EINVAL; return -EINVAL;
@@ -824,8 +828,6 @@ void boardobjgrpe32hdrset(struct nv_pmu_boardobjgrp *hdr, u32 objmask)
{ {
u32 slots = objmask; u32 slots = objmask;
gk20a_dbg_info("");
HIGHESTBITIDX_32(slots); HIGHESTBITIDX_32(slots);
slots++; slots++;
@@ -844,7 +846,7 @@ static void boardobjgrp_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
struct boardobjgrp *pboardobjgrp = phandlerparams->pboardobjgrp; struct boardobjgrp *pboardobjgrp = phandlerparams->pboardobjgrp;
struct boardobjgrp_pmu_cmd *pgrpcmd = phandlerparams->pcmd; struct boardobjgrp_pmu_cmd *pgrpcmd = phandlerparams->pcmd;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pgrpmsg = &msg->msg.boardobj.grp; pgrpmsg = &msg->msg.boardobj.grp;
@@ -895,7 +897,7 @@ static u32 boardobjgrp_pmucmdsend(struct gk20a *g,
u32 seqdesc; u32 seqdesc;
u32 status = 0; u32 status = 0;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
memset(&payload, 0, sizeof(payload)); memset(&payload, 0, sizeof(payload));
memset(&handlerparams, 0, sizeof(handlerparams)); memset(&handlerparams, 0, sizeof(handlerparams));

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ u32 boardobjgrpconstruct_e255(struct gk20a *g,
u32 status = 0; u32 status = 0;
u8 objslots; u8 objslots;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
objslots = 255; objslots = 255;
status = boardobjgrpmask_e255_init(&pboardobjgrp_e255->mask, NULL); status = boardobjgrpmask_e255_init(&pboardobjgrp_e255->mask, NULL);
@@ -65,7 +65,7 @@ u32 boardobjgrp_pmuhdrdatainit_e255(struct gk20a *g,
(struct nv_pmu_boardobjgrp_e255 *)pboardobjgrppmu; (struct nv_pmu_boardobjgrp_e255 *)pboardobjgrppmu;
u32 status; u32 status;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) if (pboardobjgrp == NULL)
return -EINVAL; return -EINVAL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -34,7 +34,7 @@ u32 boardobjgrpconstruct_e32(struct gk20a *g,
u32 status; u32 status;
u8 objslots; u8 objslots;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
objslots = 32; objslots = 32;
status = boardobjgrpmask_e32_init(&pboardobjgrp_e32->mask, NULL); status = boardobjgrpmask_e32_init(&pboardobjgrp_e32->mask, NULL);
@@ -65,7 +65,7 @@ u32 boardobjgrp_pmuhdrdatainit_e32(struct gk20a *g,
(struct nv_pmu_boardobjgrp_e32 *)pboardobjgrppmu; (struct nv_pmu_boardobjgrp_e32 *)pboardobjgrppmu;
u32 status; u32 status;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) if (pboardobjgrp == NULL)
return -EINVAL; return -EINVAL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@ static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
struct clkrpc_pmucmdhandler_params *phandlerparams = struct clkrpc_pmucmdhandler_params *phandlerparams =
(struct clkrpc_pmucmdhandler_params *)param; (struct clkrpc_pmucmdhandler_params *)param;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) { if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) {
nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x", nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x",

View File

@@ -153,7 +153,7 @@ static u32 _clk_domains_pmudata_instget(struct gk20a *g,
(struct nv_pmu_clk_clk_domain_boardobj_grp_set *) (struct nv_pmu_clk_clk_domain_boardobj_grp_set *)
pmuboardobjgrp; pmuboardobjgrp;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
@@ -162,7 +162,7 @@ static u32 _clk_domains_pmudata_instget(struct gk20a *g,
*ppboardobjpmudata = (struct nv_pmu_boardobj *) *ppboardobjpmudata = (struct nv_pmu_boardobj *)
&pgrp_set->objects[idx].data.board_obj; &pgrp_set->objects[idx].data.board_obj;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return 0; return 0;
} }
@@ -176,7 +176,7 @@ u32 clk_domain_sw_setup(struct gk20a *g)
struct clk_domain_3x_slave *pdomain_slave; struct clk_domain_3x_slave *pdomain_slave;
u8 i; u8 i;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobjgrpconstruct_e32(g, &g->clk_pmu.clk_domainobjs.super); status = boardobjgrpconstruct_e32(g, &g->clk_pmu.clk_domainobjs.super);
if (status) { if (status) {
@@ -255,7 +255,7 @@ u32 clk_domain_sw_setup(struct gk20a *g)
} }
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -264,7 +264,7 @@ u32 clk_domain_pmu_setup(struct gk20a *g)
u32 status; u32 status;
struct boardobjgrp *pboardobjgrp = NULL; struct boardobjgrp *pboardobjgrp = NULL;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pboardobjgrp = &g->clk_pmu.clk_domainobjs.super.super; pboardobjgrp = &g->clk_pmu.clk_domainobjs.super.super;
@@ -273,7 +273,7 @@ u32 clk_domain_pmu_setup(struct gk20a *g)
status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
gk20a_dbg_info("Done"); nvgpu_log_info(g, "Done");
return status; return status;
} }
@@ -298,7 +298,7 @@ static u32 devinit_get_clocks_table(struct gk20a *g,
struct clk_domain_3x_slave v3x_slave; struct clk_domain_3x_slave v3x_slave;
} clk_domain_data; } clk_domain_data;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
clocks_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, clocks_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
g->bios.clock_token, CLOCKS_TABLE); g->bios.clock_token, CLOCKS_TABLE);
@@ -459,7 +459,7 @@ static u32 devinit_get_clocks_table(struct gk20a *g,
} }
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -467,7 +467,7 @@ static u32 clkdomainclkproglink_not_supported(struct gk20a *g,
struct clk_pmupstate *pclk, struct clk_pmupstate *pclk,
struct clk_domain *pdomain) struct clk_domain *pdomain)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
return -EINVAL; return -EINVAL;
} }
@@ -480,7 +480,7 @@ static int clkdomainvfsearch_stub(
u8 rail) u8 rail)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
return -EINVAL; return -EINVAL;
} }
@@ -492,7 +492,7 @@ static u32 clkdomaingetfpoints_stub(
u16 *pfreqpointsinmhz, u16 *pfreqpointsinmhz,
u8 rail) u8 rail)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
return -EINVAL; return -EINVAL;
} }
@@ -541,7 +541,7 @@ static u32 _clk_domain_pmudatainit_3x(struct gk20a *g,
struct clk_domain_3x *pclk_domain_3x; struct clk_domain_3x *pclk_domain_3x;
struct nv_pmu_clk_clk_domain_3x_boardobj_set *pset; struct nv_pmu_clk_clk_domain_3x_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = clk_domain_pmudatainit_super(g, board_obj_ptr, ppmudata); status = clk_domain_pmudatainit_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -592,7 +592,7 @@ static u32 clkdomainclkproglink_3x_prog(struct gk20a *g,
struct clk_prog *pprog = NULL; struct clk_prog *pprog = NULL;
u8 i; u8 i;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
for (i = p3xprog->clk_prog_idx_first; for (i = p3xprog->clk_prog_idx_first;
i <= p3xprog->clk_prog_idx_last; i <= p3xprog->clk_prog_idx_last;
@@ -616,7 +616,7 @@ static int clkdomaingetslaveclk(struct gk20a *g,
u8 slaveidx; u8 slaveidx;
struct clk_domain_3x_master *p3xmaster; struct clk_domain_3x_master *p3xmaster;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pclkmhz == NULL) if (pclkmhz == NULL)
return -EINVAL; return -EINVAL;
@@ -657,7 +657,7 @@ static int clkdomainvfsearch(struct gk20a *g,
u16 bestclkmhz; u16 bestclkmhz;
u32 bestvoltuv; u32 bestvoltuv;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if ((pclkmhz == NULL) || (pvoltuv == NULL)) if ((pclkmhz == NULL) || (pvoltuv == NULL))
return -EINVAL; return -EINVAL;
@@ -719,7 +719,7 @@ static int clkdomainvfsearch(struct gk20a *g,
goto done; goto done;
} }
done: done:
gk20a_dbg_info("done status %x", status); nvgpu_log_info(g, "done status %x", status);
return status; return status;
} }
@@ -744,7 +744,7 @@ static u32 clkdomaingetfpoints
u16 *freqpointsdata; u16 *freqpointsdata;
u8 i; u8 i;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (pfpointscount == NULL) if (pfpointscount == NULL)
return -EINVAL; return -EINVAL;
@@ -783,7 +783,7 @@ static u32 clkdomaingetfpoints
*pfpointscount = totalcount; *pfpointscount = totalcount;
done: done:
gk20a_dbg_info("done status %x", status); nvgpu_log_info(g, "done status %x", status);
return status; return status;
} }
@@ -796,7 +796,7 @@ static u32 _clk_domain_pmudatainit_3x_prog(struct gk20a *g,
struct nv_pmu_clk_clk_domain_3x_prog_boardobj_set *pset; struct nv_pmu_clk_clk_domain_3x_prog_boardobj_set *pset;
struct clk_domains *pdomains = &(g->clk_pmu.clk_domainobjs); struct clk_domains *pdomains = &(g->clk_pmu.clk_domainobjs);
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -876,7 +876,7 @@ static u32 _clk_domain_pmudatainit_3x_slave(struct gk20a *g,
struct clk_domain_3x_slave *pclk_domain_3x_slave; struct clk_domain_3x_slave *pclk_domain_3x_slave;
struct nv_pmu_clk_clk_domain_3x_slave_boardobj_set *pset; struct nv_pmu_clk_clk_domain_3x_slave_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata); status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -935,7 +935,7 @@ static u32 clkdomainclkproglink_3x_master(struct gk20a *g,
u16 freq_max_last_mhz = 0; u16 freq_max_last_mhz = 0;
u8 i; u8 i;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = clkdomainclkproglink_3x_prog(g, pclk, pdomain); status = clkdomainclkproglink_3x_prog(g, pclk, pdomain);
if (status) if (status)
@@ -961,7 +961,7 @@ static u32 clkdomainclkproglink_3x_master(struct gk20a *g,
goto done; goto done;
} }
done: done:
gk20a_dbg_info("done status %x", status); nvgpu_log_info(g, "done status %x", status);
return status; return status;
} }
@@ -973,7 +973,7 @@ static u32 _clk_domain_pmudatainit_3x_master(struct gk20a *g,
struct clk_domain_3x_master *pclk_domain_3x_master; struct clk_domain_3x_master *pclk_domain_3x_master;
struct nv_pmu_clk_clk_domain_3x_master_boardobj_set *pset; struct nv_pmu_clk_clk_domain_3x_master_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata); status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -1021,7 +1021,7 @@ static u32 clkdomainclkproglink_fixed(struct gk20a *g,
struct clk_pmupstate *pclk, struct clk_pmupstate *pclk,
struct clk_domain *pdomain) struct clk_domain *pdomain)
{ {
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
return 0; return 0;
} }
@@ -1033,7 +1033,7 @@ static u32 _clk_domain_pmudatainit_3x_fixed(struct gk20a *g,
struct clk_domain_3x_fixed *pclk_domain_3x_fixed; struct clk_domain_3x_fixed *pclk_domain_3x_fixed;
struct nv_pmu_clk_clk_domain_3x_fixed_boardobj_set *pset; struct nv_pmu_clk_clk_domain_3x_fixed_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -1085,7 +1085,7 @@ static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs)
struct boardobj *board_obj_ptr = NULL; struct boardobj *board_obj_ptr = NULL;
u32 status; u32 status;
gk20a_dbg_info(" %d", BOARDOBJ_GET_TYPE(pargs)); nvgpu_log_info(g, " %d", BOARDOBJ_GET_TYPE(pargs));
switch (BOARDOBJ_GET_TYPE(pargs)) { switch (BOARDOBJ_GET_TYPE(pargs)) {
case CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED: case CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED:
status = clk_domain_construct_3x_fixed(g, &board_obj_ptr, status = clk_domain_construct_3x_fixed(g, &board_obj_ptr,
@@ -1109,7 +1109,7 @@ static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs)
if (status) if (status)
return NULL; return NULL;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return (struct clk_domain *)board_obj_ptr; return (struct clk_domain *)board_obj_ptr;
} }
@@ -1122,7 +1122,7 @@ static u32 clk_domain_pmudatainit_super(struct gk20a *g,
struct clk_domain *pclk_domain; struct clk_domain *pclk_domain;
struct nv_pmu_clk_clk_domain_boardobj_set *pset; struct nv_pmu_clk_clk_domain_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)

View File

@@ -50,7 +50,7 @@ static u32 _clk_fll_devgrp_pmudatainit_super(struct gk20a *g,
pboardobjgrp; pboardobjgrp;
u32 status = 0; u32 status = 0;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu);
if (status) { if (status) {
@@ -67,7 +67,7 @@ static u32 _clk_fll_devgrp_pmudatainit_super(struct gk20a *g,
pfll_objs->lut_prog_master_mask.super.bitcount, pfll_objs->lut_prog_master_mask.super.bitcount,
&pset->lut_prog_master_mask.super); &pset->lut_prog_master_mask.super);
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return status; return status;
} }
@@ -80,7 +80,7 @@ static u32 _clk_fll_devgrp_pmudata_instget(struct gk20a *g,
(struct nv_pmu_clk_clk_fll_device_boardobj_grp_set *) (struct nv_pmu_clk_clk_fll_device_boardobj_grp_set *)
pmuboardobjgrp; pmuboardobjgrp;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
@@ -89,7 +89,7 @@ static u32 _clk_fll_devgrp_pmudata_instget(struct gk20a *g,
*ppboardobjpmudata = (struct nv_pmu_boardobj *) *ppboardobjpmudata = (struct nv_pmu_boardobj *)
&pgrp_set->objects[idx].data.board_obj; &pgrp_set->objects[idx].data.board_obj;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return 0; return 0;
} }
@@ -123,7 +123,7 @@ u32 clk_fll_sw_setup(struct gk20a *g)
u8 i; u8 i;
u8 j; u8 j;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_fllobjs.super); status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_fllobjs.super);
if (status) { if (status) {
@@ -202,7 +202,7 @@ u32 clk_fll_sw_setup(struct gk20a *g)
} }
} }
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -211,7 +211,7 @@ u32 clk_fll_pmu_setup(struct gk20a *g)
u32 status; u32 status;
struct boardobjgrp *pboardobjgrp = NULL; struct boardobjgrp *pboardobjgrp = NULL;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pboardobjgrp = &g->clk_pmu.avfs_fllobjs.super.super; pboardobjgrp = &g->clk_pmu.avfs_fllobjs.super.super;
@@ -220,7 +220,7 @@ u32 clk_fll_pmu_setup(struct gk20a *g)
status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
gk20a_dbg_info("Done"); nvgpu_log_info(g, "Done");
return status; return status;
} }
@@ -241,7 +241,7 @@ static u32 devinit_get_fll_device_table(struct gk20a *g,
u32 vbios_domain = NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP; u32 vbios_domain = NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP;
struct avfsvinobjs *pvinobjs = &g->clk_pmu.avfs_vinobjs; struct avfsvinobjs *pvinobjs = &g->clk_pmu.avfs_vinobjs;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
fll_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, fll_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
g->bios.clock_token, FLL_TABLE); g->bios.clock_token, FLL_TABLE);
@@ -350,7 +350,7 @@ static u32 devinit_get_fll_device_table(struct gk20a *g,
} }
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -399,7 +399,7 @@ static struct fll_device *construct_fll_device(struct gk20a *g,
struct fll_device *board_obj_fll_ptr = NULL; struct fll_device *board_obj_fll_ptr = NULL;
u32 status; u32 status;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobj_construct_super(g, &board_obj_ptr, status = boardobj_construct_super(g, &board_obj_ptr,
sizeof(struct fll_device), pargs); sizeof(struct fll_device), pargs);
if (status) if (status)
@@ -429,7 +429,7 @@ static struct fll_device *construct_fll_device(struct gk20a *g,
boardobjgrpmask_e32_init( boardobjgrpmask_e32_init(
&board_obj_fll_ptr->lut_prog_broadcast_slave_mask, NULL); &board_obj_fll_ptr->lut_prog_broadcast_slave_mask, NULL);
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return (struct fll_device *)board_obj_ptr; return (struct fll_device *)board_obj_ptr;
} }
@@ -442,7 +442,7 @@ static u32 fll_device_init_pmudata_super(struct gk20a *g,
struct fll_device *pfll_dev; struct fll_device *pfll_dev;
struct nv_pmu_clk_clk_fll_device_boardobj_set *perf_pmu_data; struct nv_pmu_clk_clk_fll_device_boardobj_set *perf_pmu_data;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -473,7 +473,7 @@ static u32 fll_device_init_pmudata_super(struct gk20a *g,
pfll_dev->lut_prog_broadcast_slave_mask.super.bitcount, pfll_dev->lut_prog_broadcast_slave_mask.super.bitcount,
&perf_pmu_data->lut_prog_broadcast_slave_mask.super); &perf_pmu_data->lut_prog_broadcast_slave_mask.super);
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return status; return status;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -321,7 +321,7 @@ u32 clk_freq_controller_pmu_setup(struct gk20a *g)
u32 status; u32 status;
struct boardobjgrp *pboardobjgrp = NULL; struct boardobjgrp *pboardobjgrp = NULL;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pboardobjgrp = &g->clk_pmu.clk_freq_controllers.super.super; pboardobjgrp = &g->clk_pmu.clk_freq_controllers.super.super;
@@ -330,7 +330,7 @@ u32 clk_freq_controller_pmu_setup(struct gk20a *g)
status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
gk20a_dbg_info("Done"); nvgpu_log_info(g, "Done");
return status; return status;
} }
@@ -343,7 +343,7 @@ static u32 _clk_freq_controller_devgrp_pmudata_instget(struct gk20a *g,
(struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set *) (struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set *)
pmuboardobjgrp; pmuboardobjgrp;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
@@ -352,7 +352,7 @@ static u32 _clk_freq_controller_devgrp_pmudata_instget(struct gk20a *g,
*ppboardobjpmudata = (struct nv_pmu_boardobj *) *ppboardobjpmudata = (struct nv_pmu_boardobj *)
&pgrp_set->objects[idx].data.board_obj; &pgrp_set->objects[idx].data.board_obj;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return 0; return 0;
} }
@@ -392,7 +392,7 @@ u32 clk_freq_controller_sw_setup(struct gk20a *g)
u8 i; u8 i;
u8 j; u8 j;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers; pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers;
status = boardobjgrpconstruct_e32(g, &pclk_freq_controllers->super); status = boardobjgrpconstruct_e32(g, &pclk_freq_controllers->super);
@@ -447,6 +447,6 @@ u32 clk_freq_controller_sw_setup(struct gk20a *g)
freq_ctrl_load_mask.super, i); freq_ctrl_load_mask.super, i);
} }
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -72,7 +72,7 @@ static u32 _clk_progs_pmudata_instget(struct gk20a *g,
struct nv_pmu_clk_clk_prog_boardobj_grp_set *pgrp_set = struct nv_pmu_clk_clk_prog_boardobj_grp_set *pgrp_set =
(struct nv_pmu_clk_clk_prog_boardobj_grp_set *)pmuboardobjgrp; (struct nv_pmu_clk_clk_prog_boardobj_grp_set *)pmuboardobjgrp;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
@@ -81,7 +81,7 @@ static u32 _clk_progs_pmudata_instget(struct gk20a *g,
*ppboardobjpmudata = (struct nv_pmu_boardobj *) *ppboardobjpmudata = (struct nv_pmu_boardobj *)
&pgrp_set->objects[idx].data.board_obj; &pgrp_set->objects[idx].data.board_obj;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return 0; return 0;
} }
@@ -91,7 +91,7 @@ u32 clk_prog_sw_setup(struct gk20a *g)
struct boardobjgrp *pboardobjgrp = NULL; struct boardobjgrp *pboardobjgrp = NULL;
struct clk_progs *pclkprogobjs; struct clk_progs *pclkprogobjs;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_progobjs.super); status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_progobjs.super);
if (status) { if (status) {
@@ -130,7 +130,7 @@ u32 clk_prog_sw_setup(struct gk20a *g)
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -139,7 +139,7 @@ u32 clk_prog_pmu_setup(struct gk20a *g)
u32 status; u32 status;
struct boardobjgrp *pboardobjgrp = NULL; struct boardobjgrp *pboardobjgrp = NULL;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pboardobjgrp = &g->clk_pmu.clk_progobjs.super.super; pboardobjgrp = &g->clk_pmu.clk_progobjs.super.super;
@@ -148,7 +148,7 @@ u32 clk_prog_pmu_setup(struct gk20a *g)
status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
gk20a_dbg_info("Done"); nvgpu_log_info(g, "Done");
return status; return status;
} }
@@ -186,7 +186,7 @@ static u32 devinit_get_clk_prog_table(struct gk20a *g,
struct clk_prog_1x_master_table v1x_master_table; struct clk_prog_1x_master_table v1x_master_table;
} prog_data; } prog_data;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
clkprogs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, clkprogs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
g->bios.clock_token, CLOCK_PROGRAMMING_TABLE); g->bios.clock_token, CLOCK_PROGRAMMING_TABLE);
@@ -372,7 +372,7 @@ static u32 devinit_get_clk_prog_table(struct gk20a *g,
} }
} }
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -382,7 +382,7 @@ static u32 _clk_prog_pmudatainit_super(struct gk20a *g,
{ {
u32 status = 0; u32 status = 0;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
return status; return status;
@@ -396,7 +396,7 @@ static u32 _clk_prog_pmudatainit_1x(struct gk20a *g,
struct clk_prog_1x *pclk_prog_1x; struct clk_prog_1x *pclk_prog_1x;
struct nv_pmu_clk_clk_prog_1x_boardobj_set *pset; struct nv_pmu_clk_clk_prog_1x_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_prog_pmudatainit_super(g, board_obj_ptr, ppmudata); status = _clk_prog_pmudatainit_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -424,7 +424,7 @@ static u32 _clk_prog_pmudatainit_1x_master(struct gk20a *g,
u32 vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * u32 vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) *
g->clk_pmu.clk_progobjs.vf_entry_count; g->clk_pmu.clk_progobjs.vf_entry_count;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_prog_pmudatainit_1x(g, board_obj_ptr, ppmudata); status = _clk_prog_pmudatainit_1x(g, board_obj_ptr, ppmudata);
@@ -455,7 +455,7 @@ static u32 _clk_prog_pmudatainit_1x_master_ratio(struct gk20a *g,
u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) *
g->clk_pmu.clk_progobjs.slave_entry_count; g->clk_pmu.clk_progobjs.slave_entry_count;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata); status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -483,7 +483,7 @@ static u32 _clk_prog_pmudatainit_1x_master_table(struct gk20a *g,
u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) *
g->clk_pmu.clk_progobjs.slave_entry_count; g->clk_pmu.clk_progobjs.slave_entry_count;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata); status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -510,7 +510,7 @@ static u32 _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g,
struct clk_vf_point *p_vf_point; struct clk_vf_point *p_vf_point;
u32 status; u32 status;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
p_vf_point = construct_clk_vf_point(g, (void *)p_vf_point_tmp); p_vf_point = construct_clk_vf_point(g, (void *)p_vf_point_tmp);
if (p_vf_point == NULL) { if (p_vf_point == NULL) {
@@ -527,7 +527,7 @@ static u32 _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g,
p_vf_rail->vf_point_idx_last = (*p_vf_point_idx)++; p_vf_rail->vf_point_idx_last = (*p_vf_point_idx)++;
done: done:
gk20a_dbg_info("done status %x", status); nvgpu_log_info(g, "done status %x", status);
return status; return status;
} }
@@ -561,7 +561,7 @@ static u32 clk_prog_construct_1x(struct gk20a *g,
(struct clk_prog_1x *)pargs; (struct clk_prog_1x *)pargs;
u32 status = 0; u32 status = 0;
gk20a_dbg_info(" "); nvgpu_log_info(g, " ");
ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X); ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X);
status = clk_prog_construct_super(g, ppboardobj, size, pargs); status = clk_prog_construct_super(g, ppboardobj, size, pargs);
if (status) if (status)
@@ -592,7 +592,7 @@ static u32 clk_prog_construct_1x_master(struct gk20a *g,
g->clk_pmu.clk_progobjs.vf_entry_count; g->clk_pmu.clk_progobjs.vf_entry_count;
u8 railidx; u8 railidx;
gk20a_dbg_info(" type - %x", BOARDOBJ_GET_TYPE(pargs)); nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs));
ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X_MASTER); ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X_MASTER);
status = clk_prog_construct_1x(g, ppboardobj, size, pargs); status = clk_prog_construct_1x(g, ppboardobj, size, pargs);
@@ -686,7 +686,7 @@ static u32 clk_prog_construct_1x_master_table(struct gk20a *g,
u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) *
g->clk_pmu.clk_progobjs.slave_entry_count; g->clk_pmu.clk_progobjs.slave_entry_count;
gk20a_dbg_info("type - %x", BOARDOBJ_GET_TYPE(pargs)); nvgpu_log_info(g, "type - %x", BOARDOBJ_GET_TYPE(pargs));
if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE) if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE)
return -EINVAL; return -EINVAL;
@@ -727,7 +727,7 @@ static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs)
struct boardobj *board_obj_ptr = NULL; struct boardobj *board_obj_ptr = NULL;
u32 status; u32 status;
gk20a_dbg_info(" type - %x", BOARDOBJ_GET_TYPE(pargs)); nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs));
switch (BOARDOBJ_GET_TYPE(pargs)) { switch (BOARDOBJ_GET_TYPE(pargs)) {
case CTRL_CLK_CLK_PROG_TYPE_1X: case CTRL_CLK_CLK_PROG_TYPE_1X:
status = clk_prog_construct_1x(g, &board_obj_ptr, status = clk_prog_construct_1x(g, &board_obj_ptr,
@@ -754,7 +754,7 @@ static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs)
return NULL; return NULL;
} }
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return (struct clk_prog *)board_obj_ptr; return (struct clk_prog *)board_obj_ptr;
} }
@@ -777,7 +777,7 @@ static u32 vfflatten_prog_1x_master(struct gk20a *g,
u8 vf_point_idx; u8 vf_point_idx;
u8 vf_rail_idx; u8 vf_rail_idx;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
memset(&vf_point_data, 0x0, sizeof(vf_point_data)); memset(&vf_point_data, 0x0, sizeof(vf_point_data));
vf_point_idx = BOARDOBJGRP_NEXT_EMPTY_IDX( vf_point_idx = BOARDOBJGRP_NEXT_EMPTY_IDX(
@@ -851,7 +851,7 @@ static u32 vfflatten_prog_1x_master(struct gk20a *g,
*pfreqmaxlastmhz = p1xmaster->super.freq_max_mhz; *pfreqmaxlastmhz = p1xmaster->super.freq_max_mhz;
done: done:
gk20a_dbg_info("done status %x", status); nvgpu_log_info(g, "done status %x", status);
return status; return status;
} }

View File

@@ -59,7 +59,7 @@ static u32 _clk_vf_points_pmudata_instget(struct gk20a *g,
(struct nv_pmu_clk_clk_vf_point_boardobj_grp_set *) (struct nv_pmu_clk_clk_vf_point_boardobj_grp_set *)
pmuboardobjgrp; pmuboardobjgrp;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS)
@@ -67,7 +67,7 @@ static u32 _clk_vf_points_pmudata_instget(struct gk20a *g,
*ppboardobjpmudata = (struct nv_pmu_boardobj *) *ppboardobjpmudata = (struct nv_pmu_boardobj *)
&pgrp_set->objects[idx].data.board_obj; &pgrp_set->objects[idx].data.board_obj;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return 0; return 0;
} }
@@ -94,7 +94,7 @@ u32 clk_vf_point_sw_setup(struct gk20a *g)
u32 status; u32 status;
struct boardobjgrp *pboardobjgrp = NULL; struct boardobjgrp *pboardobjgrp = NULL;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_vf_pointobjs.super); status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_vf_pointobjs.super);
if (status) { if (status) {
@@ -132,7 +132,7 @@ u32 clk_vf_point_sw_setup(struct gk20a *g)
pboardobjgrp->pmustatusinstget = _clk_vf_points_pmustatus_instget; pboardobjgrp->pmustatusinstget = _clk_vf_points_pmustatus_instget;
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -141,7 +141,7 @@ u32 clk_vf_point_pmu_setup(struct gk20a *g)
u32 status; u32 status;
struct boardobjgrp *pboardobjgrp = NULL; struct boardobjgrp *pboardobjgrp = NULL;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pboardobjgrp = &g->clk_pmu.clk_vf_pointobjs.super.super; pboardobjgrp = &g->clk_pmu.clk_vf_pointobjs.super.super;
@@ -150,7 +150,7 @@ u32 clk_vf_point_pmu_setup(struct gk20a *g)
status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
gk20a_dbg_info("Done"); nvgpu_log_info(g, "Done");
return status; return status;
} }
@@ -187,7 +187,7 @@ static u32 _clk_vf_point_pmudatainit_volt(struct gk20a *g,
struct clk_vf_point_volt *pclk_vf_point_volt; struct clk_vf_point_volt *pclk_vf_point_volt;
struct nv_pmu_clk_clk_vf_point_volt_boardobj_set *pset; struct nv_pmu_clk_clk_vf_point_volt_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata); status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -214,7 +214,7 @@ static u32 _clk_vf_point_pmudatainit_freq(struct gk20a *g,
struct clk_vf_point_freq *pclk_vf_point_freq; struct clk_vf_point_freq *pclk_vf_point_freq;
struct nv_pmu_clk_clk_vf_point_freq_boardobj_set *pset; struct nv_pmu_clk_clk_vf_point_freq_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata); status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -297,7 +297,7 @@ struct clk_vf_point *construct_clk_vf_point(struct gk20a *g, void *pargs)
struct boardobj *board_obj_ptr = NULL; struct boardobj *board_obj_ptr = NULL;
u32 status; u32 status;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
switch (BOARDOBJ_GET_TYPE(pargs)) { switch (BOARDOBJ_GET_TYPE(pargs)) {
case CTRL_CLK_CLK_VF_POINT_TYPE_FREQ: case CTRL_CLK_CLK_VF_POINT_TYPE_FREQ:
status = clk_vf_point_construct_freq(g, &board_obj_ptr, status = clk_vf_point_construct_freq(g, &board_obj_ptr,
@@ -316,7 +316,7 @@ struct clk_vf_point *construct_clk_vf_point(struct gk20a *g, void *pargs)
if (status) if (status)
return NULL; return NULL;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return (struct clk_vf_point *)board_obj_ptr; return (struct clk_vf_point *)board_obj_ptr;
} }
@@ -329,7 +329,7 @@ static u32 _clk_vf_point_pmudatainit_super(struct gk20a *g,
struct clk_vf_point *pclk_vf_point; struct clk_vf_point *pclk_vf_point;
struct nv_pmu_clk_clk_vf_point_boardobj_set *pset; struct nv_pmu_clk_clk_vf_point_boardobj_set *pset;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -355,7 +355,7 @@ static u32 clk_vf_point_update(struct gk20a *g,
struct clk_vf_point *pclk_vf_point; struct clk_vf_point *pclk_vf_point;
struct nv_pmu_clk_clk_vf_point_boardobj_get_status *pstatus; struct nv_pmu_clk_clk_vf_point_boardobj_get_status *pstatus;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pclk_vf_point = pclk_vf_point =
@@ -388,7 +388,7 @@ u32 clk_vf_point_cache(struct gk20a *g)
u32 status; u32 status;
u8 index; u8 index;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pclk_vf_points = &g->clk_pmu.clk_vf_pointobjs; pclk_vf_points = &g->clk_pmu.clk_vf_pointobjs;
pboardobjgrp = &pclk_vf_points->super.super; pboardobjgrp = &pclk_vf_points->super.super;
pboardobjgrpmask = &pclk_vf_points->super.mask.super; pboardobjgrpmask = &pclk_vf_points->super.mask.super;

View File

@@ -323,13 +323,13 @@ static u32 _clk_vin_devgrp_pmudatainit_super(struct gk20a *g,
struct avfsvinobjs *pvin_obbj = (struct avfsvinobjs *)pboardobjgrp; struct avfsvinobjs *pvin_obbj = (struct avfsvinobjs *)pboardobjgrp;
u32 status = 0; u32 status = 0;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu);
pset->b_vin_is_disable_allowed = pvin_obbj->vin_is_disable_allowed; pset->b_vin_is_disable_allowed = pvin_obbj->vin_is_disable_allowed;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return status; return status;
} }
@@ -342,7 +342,7 @@ static u32 _clk_vin_devgrp_pmudata_instget(struct gk20a *g,
(struct nv_pmu_clk_clk_vin_device_boardobj_grp_set *) (struct nv_pmu_clk_clk_vin_device_boardobj_grp_set *)
pmuboardobjgrp; pmuboardobjgrp;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
/*check whether pmuboardobjgrp has a valid boardobj in index*/ /*check whether pmuboardobjgrp has a valid boardobj in index*/
if (((u32)BIT(idx) & if (((u32)BIT(idx) &
@@ -351,7 +351,7 @@ static u32 _clk_vin_devgrp_pmudata_instget(struct gk20a *g,
*ppboardobjpmudata = (struct nv_pmu_boardobj *) *ppboardobjpmudata = (struct nv_pmu_boardobj *)
&pgrp_set->objects[idx].data.board_obj; &pgrp_set->objects[idx].data.board_obj;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return 0; return 0;
} }
@@ -381,7 +381,7 @@ u32 clk_vin_sw_setup(struct gk20a *g)
struct vin_device_v20 *pvindev = NULL; struct vin_device_v20 *pvindev = NULL;
struct avfsvinobjs *pvinobjs; struct avfsvinobjs *pvinobjs;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_vinobjs.super); status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_vinobjs.super);
if (status) { if (status) {
@@ -427,7 +427,7 @@ u32 clk_vin_sw_setup(struct gk20a *g)
} }
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -436,7 +436,7 @@ u32 clk_vin_pmu_setup(struct gk20a *g)
u32 status; u32 status;
struct boardobjgrp *pboardobjgrp = NULL; struct boardobjgrp *pboardobjgrp = NULL;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
pboardobjgrp = &g->clk_pmu.avfs_vinobjs.super.super; pboardobjgrp = &g->clk_pmu.avfs_vinobjs.super.super;
@@ -445,7 +445,7 @@ u32 clk_vin_pmu_setup(struct gk20a *g)
status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
gk20a_dbg_info("Done"); nvgpu_log_info(g, "Done");
return status; return status;
} }
@@ -470,7 +470,7 @@ static u32 devinit_get_vin_device_table(struct gk20a *g,
struct vin_device_v20 vin_device_v20; struct vin_device_v20 vin_device_v20;
} vin_device_data; } vin_device_data;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
vin_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, vin_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
g->bios.clock_token, VIN_TABLE); g->bios.clock_token, VIN_TABLE);
@@ -557,7 +557,7 @@ static u32 devinit_get_vin_device_table(struct gk20a *g,
} }
done: done:
gk20a_dbg_info(" done status %x", status); nvgpu_log_info(g, " done status %x", status);
return status; return status;
} }
@@ -645,7 +645,7 @@ static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs)
struct boardobj *board_obj_ptr = NULL; struct boardobj *board_obj_ptr = NULL;
u32 status; u32 status;
gk20a_dbg_info(" %d", BOARDOBJ_GET_TYPE(pargs)); nvgpu_log_info(g, " %d", BOARDOBJ_GET_TYPE(pargs));
switch (BOARDOBJ_GET_TYPE(pargs)) { switch (BOARDOBJ_GET_TYPE(pargs)) {
case CTRL_CLK_VIN_TYPE_V10: case CTRL_CLK_VIN_TYPE_V10:
status = vin_device_construct_v10(g, &board_obj_ptr, status = vin_device_construct_v10(g, &board_obj_ptr,
@@ -664,7 +664,7 @@ static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs)
if (status) if (status)
return NULL; return NULL;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return (struct vin_device *)board_obj_ptr; return (struct vin_device *)board_obj_ptr;
} }
@@ -679,7 +679,7 @@ static u32 vin_device_init_pmudata_v10(struct gk20a *g,
struct vin_device_v20 *pvin_dev_v20; struct vin_device_v20 *pvin_dev_v20;
struct nv_pmu_clk_clk_vin_device_v10_boardobj_set *perf_pmu_data; struct nv_pmu_clk_clk_vin_device_v10_boardobj_set *perf_pmu_data;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata); status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -692,7 +692,7 @@ static u32 vin_device_init_pmudata_v10(struct gk20a *g,
perf_pmu_data->data.vin_cal.intercept = pvin_dev_v20->data.vin_cal.cal_v10.intercept; perf_pmu_data->data.vin_cal.intercept = pvin_dev_v20->data.vin_cal.cal_v10.intercept;
perf_pmu_data->data.vin_cal.slope = pvin_dev_v20->data.vin_cal.cal_v10.slope; perf_pmu_data->data.vin_cal.slope = pvin_dev_v20->data.vin_cal.cal_v10.slope;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return status; return status;
} }
@@ -705,7 +705,7 @@ static u32 vin_device_init_pmudata_v20(struct gk20a *g,
struct vin_device_v20 *pvin_dev_v20; struct vin_device_v20 *pvin_dev_v20;
struct nv_pmu_clk_clk_vin_device_v20_boardobj_set *perf_pmu_data; struct nv_pmu_clk_clk_vin_device_v20_boardobj_set *perf_pmu_data;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata); status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -718,7 +718,7 @@ static u32 vin_device_init_pmudata_v20(struct gk20a *g,
perf_pmu_data->data.vin_cal.cal_v20.offset = pvin_dev_v20->data.vin_cal.cal_v20.offset; perf_pmu_data->data.vin_cal.cal_v20.offset = pvin_dev_v20->data.vin_cal.cal_v20.offset;
perf_pmu_data->data.vin_cal.cal_v20.gain = pvin_dev_v20->data.vin_cal.cal_v20.gain; perf_pmu_data->data.vin_cal.cal_v20.gain = pvin_dev_v20->data.vin_cal.cal_v20.gain;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return status; return status;
} }
@@ -731,7 +731,7 @@ static u32 vin_device_init_pmudata_super(struct gk20a *g,
struct vin_device *pvin_dev; struct vin_device *pvin_dev;
struct nv_pmu_clk_clk_vin_device_boardobj_set *perf_pmu_data; struct nv_pmu_clk_clk_vin_device_boardobj_set *perf_pmu_data;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
if (status != 0) if (status != 0)
@@ -745,7 +745,7 @@ static u32 vin_device_init_pmudata_super(struct gk20a *g,
perf_pmu_data->volt_domain = pvin_dev->volt_domain; perf_pmu_data->volt_domain = pvin_dev->volt_domain;
perf_pmu_data->flls_shared_mask = pvin_dev->flls_shared_mask; perf_pmu_data->flls_shared_mask = pvin_dev->flls_shared_mask;
gk20a_dbg_info(" Done"); nvgpu_log_info(g, " Done");
return status; return status;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Address Spaces * GK20A Address Spaces
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -34,13 +34,17 @@
/* dumb allocator... */ /* dumb allocator... */
static int generate_as_share_id(struct gk20a_as *as) static int generate_as_share_id(struct gk20a_as *as)
{ {
gk20a_dbg_fn(""); struct gk20a *g = gk20a_from_as(as);
nvgpu_log_fn(g, " ");
return ++as->last_share_id; return ++as->last_share_id;
} }
/* still dumb */ /* still dumb */
static void release_as_share_id(struct gk20a_as *as, int id) static void release_as_share_id(struct gk20a_as *as, int id)
{ {
gk20a_dbg_fn(""); struct gk20a *g = gk20a_from_as(as);
nvgpu_log_fn(g, " ");
return; return;
} }
@@ -56,7 +60,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
const bool userspace_managed = const bool userspace_managed =
(flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0; (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (big_page_size == 0) { if (big_page_size == 0) {
big_page_size = g->ops.mm.get_default_big_page_size(); big_page_size = g->ops.mm.get_default_big_page_size();
@@ -92,7 +96,7 @@ int gk20a_as_alloc_share(struct gk20a *g,
struct gk20a_as_share *as_share; struct gk20a_as_share *as_share;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g = gk20a_get(g); g = gk20a_get(g);
if (!g) if (!g)
return -ENODEV; return -ENODEV;
@@ -126,8 +130,9 @@ failed:
int gk20a_vm_release_share(struct gk20a_as_share *as_share) int gk20a_vm_release_share(struct gk20a_as_share *as_share)
{ {
struct vm_gk20a *vm = as_share->vm; struct vm_gk20a *vm = as_share->vm;
struct gk20a *g = gk20a_from_vm(vm);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
vm->as_share = NULL; vm->as_share = NULL;
as_share->vm = NULL; as_share->vm = NULL;
@@ -146,7 +151,7 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
struct gk20a *g = as_share->vm->mm->g; struct gk20a *g = as_share->vm->mm->g;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = gk20a_busy(g); err = gk20a_busy(g);

View File

@@ -464,7 +464,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
new_data = cde_ctx->user_param_values[user_id]; new_data = cde_ctx->user_param_values[user_id];
} }
gk20a_dbg(gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx", nvgpu_log(g, gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx",
i, param->id, param->target_buf, i, param->id, param->target_buf,
param->target_byte_offset, new_data, param->target_byte_offset, new_data,
param->data_offset, param->type, param->shift, param->data_offset, param->type, param->shift,
@@ -790,8 +790,9 @@ __acquires(&cde_app->mutex)
__releases(&cde_app->mutex) __releases(&cde_app->mutex)
{ {
struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app; struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app;
struct gk20a *g = &cde_ctx->l->g;
gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); nvgpu_log(g, gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx);
trace_gk20a_cde_release(cde_ctx); trace_gk20a_cde_release(cde_ctx);
nvgpu_mutex_acquire(&cde_app->mutex); nvgpu_mutex_acquire(&cde_app->mutex);
@@ -801,7 +802,7 @@ __releases(&cde_app->mutex)
nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts); nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts);
cde_app->ctx_usecount--; cde_app->ctx_usecount--;
} else { } else {
gk20a_dbg_info("double release cde context %p", cde_ctx); nvgpu_log_info(g, "double release cde context %p", cde_ctx);
} }
nvgpu_mutex_release(&cde_app->mutex); nvgpu_mutex_release(&cde_app->mutex);
@@ -823,7 +824,7 @@ __releases(&cde_app->mutex)
if (cde_ctx->in_use || !cde_app->initialised) if (cde_ctx->in_use || !cde_app->initialised)
return; return;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
"cde: attempting to delete temporary %p", cde_ctx); "cde: attempting to delete temporary %p", cde_ctx);
err = gk20a_busy(g); err = gk20a_busy(g);
@@ -837,7 +838,7 @@ __releases(&cde_app->mutex)
nvgpu_mutex_acquire(&cde_app->mutex); nvgpu_mutex_acquire(&cde_app->mutex);
if (cde_ctx->in_use || !cde_app->initialised) { if (cde_ctx->in_use || !cde_app->initialised) {
gk20a_dbg(gpu_dbg_cde_ctx, nvgpu_log(g, gpu_dbg_cde_ctx,
"cde: context use raced, not deleting %p", "cde: context use raced, not deleting %p",
cde_ctx); cde_ctx);
goto out; goto out;
@@ -847,7 +848,7 @@ __releases(&cde_app->mutex)
"double pending %p", cde_ctx); "double pending %p", cde_ctx);
gk20a_cde_remove_ctx(cde_ctx); gk20a_cde_remove_ctx(cde_ctx);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
"cde: destroyed %p count=%d use=%d max=%d", "cde: destroyed %p count=%d use=%d max=%d",
cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount, cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount,
cde_app->ctx_count_top); cde_app->ctx_count_top);
@@ -874,7 +875,7 @@ __must_hold(&cde_app->mutex)
if (!nvgpu_list_empty(&cde_app->free_contexts)) { if (!nvgpu_list_empty(&cde_app->free_contexts)) {
cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts, cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts,
gk20a_cde_ctx, list); gk20a_cde_ctx, list);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
"cde: got free %p count=%d use=%d max=%d", "cde: got free %p count=%d use=%d max=%d",
cde_ctx, cde_app->ctx_count, cde_ctx, cde_app->ctx_count,
cde_app->ctx_usecount, cde_app->ctx_usecount,
@@ -893,7 +894,7 @@ __must_hold(&cde_app->mutex)
/* no free contexts, get a temporary one */ /* no free contexts, get a temporary one */
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
"cde: no free contexts, count=%d", "cde: no free contexts, count=%d",
cde_app->ctx_count); cde_app->ctx_count);
@@ -967,7 +968,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l
INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work, INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work,
gk20a_cde_ctx_deleter_fn); gk20a_cde_ctx_deleter_fn);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx);
trace_gk20a_cde_allocate_context(cde_ctx); trace_gk20a_cde_allocate_context(cde_ctx);
return cde_ctx; return cde_ctx;
} }
@@ -1005,7 +1006,7 @@ __releases(&l->cde_app->mutex)
u32 submit_op; u32 submit_op;
struct dma_buf_attachment *attachment; struct dma_buf_attachment *attachment;
gk20a_dbg(gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu", nvgpu_log(g, gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu",
compbits_byte_offset, scatterbuffer_byte_offset); compbits_byte_offset, scatterbuffer_byte_offset);
/* scatter buffer must be after compbits buffer */ /* scatter buffer must be after compbits buffer */
@@ -1055,11 +1056,11 @@ __releases(&l->cde_app->mutex)
compbits_byte_offset; compbits_byte_offset;
} }
gk20a_dbg(gpu_dbg_cde, "map_offset=%llu map_size=%llu", nvgpu_log(g, gpu_dbg_cde, "map_offset=%llu map_size=%llu",
map_offset, map_size); map_offset, map_size);
gk20a_dbg(gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu", nvgpu_log(g, gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu",
mapped_compbits_offset, compbits_size); mapped_compbits_offset, compbits_size);
gk20a_dbg(gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu", nvgpu_log(g, gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu",
mapped_scatterbuffer_offset, scatterbuffer_size); mapped_scatterbuffer_offset, scatterbuffer_size);
@@ -1096,7 +1097,7 @@ __releases(&l->cde_app->mutex)
scatter_buffer = surface + scatterbuffer_byte_offset; scatter_buffer = surface + scatterbuffer_byte_offset;
gk20a_dbg(gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p", nvgpu_log(g, gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p",
surface, scatter_buffer); surface, scatter_buffer);
sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf, sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf,
&attachment); &attachment);
@@ -1163,11 +1164,11 @@ __releases(&l->cde_app->mutex)
goto exit_unmap_surface; goto exit_unmap_surface;
} }
gk20a_dbg(gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n", nvgpu_log(g, gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n",
g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr); g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr);
gk20a_dbg(gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n", nvgpu_log(g, gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n",
cde_ctx->compbit_size, cde_ctx->compbit_vaddr); cde_ctx->compbit_size, cde_ctx->compbit_vaddr);
gk20a_dbg(gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n", nvgpu_log(g, gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n",
cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr); cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr);
/* take always the postfence as it is needed for protecting the /* take always the postfence as it is needed for protecting the
@@ -1234,9 +1235,9 @@ __releases(&cde_app->mutex)
return; return;
trace_gk20a_cde_finished_ctx_cb(cde_ctx); trace_gk20a_cde_finished_ctx_cb(cde_ctx);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx);
if (!cde_ctx->in_use) if (!cde_ctx->in_use)
gk20a_dbg_info("double finish cde context %p on channel %p", nvgpu_log_info(g, "double finish cde context %p on channel %p",
cde_ctx, ch); cde_ctx, ch);
if (ch->has_timedout) { if (ch->has_timedout) {
@@ -1406,12 +1407,13 @@ __acquires(&cde_app->mutex)
__releases(&cde_app->mutex) __releases(&cde_app->mutex)
{ {
struct gk20a_cde_app *cde_app = &l->cde_app; struct gk20a_cde_app *cde_app = &l->cde_app;
struct gk20a *g = &l->g;
int err; int err;
if (cde_app->initialised) if (cde_app->initialised)
return 0; return 0;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init");
err = nvgpu_mutex_init(&cde_app->mutex); err = nvgpu_mutex_init(&cde_app->mutex);
if (err) if (err)
@@ -1430,7 +1432,7 @@ __releases(&cde_app->mutex)
cde_app->initialised = true; cde_app->initialised = true;
nvgpu_mutex_release(&cde_app->mutex); nvgpu_mutex_release(&cde_app->mutex);
gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err); nvgpu_log(g, gpu_dbg_cde_ctx, "cde: init finished: %d", err);
if (err) if (err)
nvgpu_mutex_destroy(&cde_app->mutex); nvgpu_mutex_destroy(&cde_app->mutex);
@@ -1528,14 +1530,14 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
xtiles, ytiles); xtiles, ytiles);
gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", nvgpu_log(g, gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
width, height, block_height_log2, width, height, block_height_log2,
compbits_hoffset, compbits_voffset, scatterbuffer_offset); compbits_hoffset, compbits_voffset, scatterbuffer_offset);
gk20a_dbg(gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)", nvgpu_log(g, gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)",
width, height, xtiles, ytiles); width, height, xtiles, ytiles);
gk20a_dbg(gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)", nvgpu_log(g, gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)",
wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v); wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v);
gk20a_dbg(gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d", nvgpu_log(g, gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d",
hprog, hprog,
l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog], l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog],
l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog], l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog],
@@ -1634,7 +1636,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
if (!l->cde_app.initialised) if (!l->cde_app.initialised)
return -ENOSYS; return -ENOSYS;
gk20a_dbg(gpu_dbg_cde, "firmware version = %d\n", nvgpu_log(g, gpu_dbg_cde, "firmware version = %d\n",
l->cde_app.firmware_version); l->cde_app.firmware_version);
if (l->cde_app.firmware_version == 1) { if (l->cde_app.firmware_version == 1) {

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B CDE * GP10B CDE
* *
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -117,7 +117,7 @@ int gp10b_populate_scatter_buffer(struct gk20a *g,
u64 surf_pa = sg_phys(sg); u64 surf_pa = sg_phys(sg);
unsigned int n = (int)(sg->length >> page_size_log2); unsigned int n = (int)(sg->length >> page_size_log2);
gk20a_dbg(gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n); nvgpu_log(g, gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n);
for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) { for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) {
u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift; u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift;
@@ -143,9 +143,9 @@ int gp10b_populate_scatter_buffer(struct gk20a *g,
scatter_buffer[page >> 3] = d; scatter_buffer[page >> 3] = d;
if (nvgpu_log_mask_enabled(g, gpu_dbg_cde)) { if (nvgpu_log_mask_enabled(g, gpu_dbg_cde)) {
gk20a_dbg(gpu_dbg_cde, "scatterBuffer content:"); nvgpu_log(g, gpu_dbg_cde, "scatterBuffer content:");
for (i = 0; i < page >> 3; i++) { for (i = 0; i < page >> 3; i++) {
gk20a_dbg(gpu_dbg_cde, " %x", scatter_buffer[i]); nvgpu_log(g, gpu_dbg_cde, " %x", scatter_buffer[i]);
} }
} }

View File

@@ -834,7 +834,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
/* update debug settings */ /* update debug settings */
nvgpu_ltc_sync_enabled(g); nvgpu_ltc_sync_enabled(g);
gk20a_dbg_info("channel %d", c->chid); nvgpu_log_info(g, "channel %d", c->chid);
/* /*
* Job tracking is necessary for any of the following conditions: * Job tracking is necessary for any of the following conditions:
@@ -943,7 +943,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
fence ? fence->id : 0, fence ? fence->id : 0,
fence ? fence->value : 0); fence ? fence->value : 0);
gk20a_dbg_info("pre-submit put %d, get %d, size %d", nvgpu_log_info(g, "pre-submit put %d, get %d, size %d",
c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
/* /*
@@ -1023,18 +1023,18 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
post_fence ? post_fence->syncpt_id : 0, post_fence ? post_fence->syncpt_id : 0,
post_fence ? post_fence->syncpt_value : 0); post_fence ? post_fence->syncpt_value : 0);
gk20a_dbg_info("post-submit put %d, get %d, size %d", nvgpu_log_info(g, "post-submit put %d, get %d, size %d",
c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
if (profile) if (profile)
profile->timestamp[PROFILE_END] = sched_clock(); profile->timestamp[PROFILE_END] = sched_clock();
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return err; return err;
clean_up_job: clean_up_job:
channel_gk20a_free_job(c, job); channel_gk20a_free_job(c, job);
clean_up: clean_up:
gk20a_dbg_fn("fail"); nvgpu_log_fn(g, "fail");
gk20a_fence_put(post_fence); gk20a_fence_put(post_fence);
if (c->deterministic) if (c->deterministic)
nvgpu_rwsem_up_read(&g->deterministic_busy); nvgpu_rwsem_up_read(&g->deterministic_busy);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -79,13 +79,14 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
loff_t *off) loff_t *off)
{ {
struct gk20a_ctxsw_dev *dev = filp->private_data; struct gk20a_ctxsw_dev *dev = filp->private_data;
struct gk20a *g = dev->g;
struct nvgpu_ctxsw_ring_header *hdr = dev->hdr; struct nvgpu_ctxsw_ring_header *hdr = dev->hdr;
struct nvgpu_ctxsw_trace_entry __user *entry = struct nvgpu_ctxsw_trace_entry __user *entry =
(struct nvgpu_ctxsw_trace_entry *) buf; (struct nvgpu_ctxsw_trace_entry *) buf;
size_t copied = 0; size_t copied = 0;
int err; int err;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
"filp=%p buf=%p size=%zu", filp, buf, size); "filp=%p buf=%p size=%zu", filp, buf, size);
nvgpu_mutex_acquire(&dev->write_lock); nvgpu_mutex_acquire(&dev->write_lock);
@@ -119,7 +120,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
size -= sizeof(*entry); size -= sizeof(*entry);
} }
gk20a_dbg(gpu_dbg_ctxsw, "copied=%zu read_idx=%d", copied, nvgpu_log(g, gpu_dbg_ctxsw, "copied=%zu read_idx=%d", copied,
hdr->read_idx); hdr->read_idx);
*off = hdr->read_idx; *off = hdr->read_idx;
@@ -130,7 +131,9 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev) static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev)
{ {
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled"); struct gk20a *g = dev->g;
nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled");
nvgpu_mutex_acquire(&dev->write_lock); nvgpu_mutex_acquire(&dev->write_lock);
dev->write_enabled = true; dev->write_enabled = true;
nvgpu_mutex_release(&dev->write_lock); nvgpu_mutex_release(&dev->write_lock);
@@ -140,7 +143,9 @@ static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev)
static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev) static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev)
{ {
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled"); struct gk20a *g = dev->g;
nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled");
dev->g->ops.fecs_trace.disable(dev->g); dev->g->ops.fecs_trace.disable(dev->g);
nvgpu_mutex_acquire(&dev->write_lock); nvgpu_mutex_acquire(&dev->write_lock);
dev->write_enabled = false; dev->write_enabled = false;
@@ -168,7 +173,7 @@ static int gk20a_ctxsw_dev_alloc_buffer(struct gk20a_ctxsw_dev *dev,
dev->size = size; dev->size = size;
dev->num_ents = dev->hdr->num_ents; dev->num_ents = dev->hdr->num_ents;
gk20a_dbg(gpu_dbg_ctxsw, "size=%zu hdr=%p ents=%p num_ents=%d", nvgpu_log(g, gpu_dbg_ctxsw, "size=%zu hdr=%p ents=%p num_ents=%d",
dev->size, dev->hdr, dev->ents, dev->hdr->num_ents); dev->size, dev->hdr, dev->ents, dev->hdr->num_ents);
return 0; return 0;
} }
@@ -208,10 +213,11 @@ int gk20a_ctxsw_dev_ring_free(struct gk20a *g)
static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev, static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev,
struct nvgpu_ctxsw_ring_setup_args *args) struct nvgpu_ctxsw_ring_setup_args *args)
{ {
struct gk20a *g = dev->g;
size_t size = args->size; size_t size = args->size;
int ret; int ret;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "size=%zu", size); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "size=%zu", size);
if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE) if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE)
return -EINVAL; return -EINVAL;
@@ -252,7 +258,7 @@ static int gk20a_ctxsw_dev_ioctl_poll(struct gk20a_ctxsw_dev *dev)
struct gk20a *g = dev->g; struct gk20a *g = dev->g;
int err; int err;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
err = gk20a_busy(g); err = gk20a_busy(g);
if (err) if (err)
@@ -286,7 +292,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
if (!g) if (!g)
return -ENODEV; return -ENODEV;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p", g); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p", g);
if (!capable(CAP_SYS_ADMIN)) { if (!capable(CAP_SYS_ADMIN)) {
err = -EPERM; err = -EPERM;
@@ -322,13 +328,13 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
size = sizeof(struct nvgpu_ctxsw_ring_header) + size = sizeof(struct nvgpu_ctxsw_ring_header) +
n * sizeof(struct nvgpu_ctxsw_trace_entry); n * sizeof(struct nvgpu_ctxsw_trace_entry);
gk20a_dbg(gpu_dbg_ctxsw, "size=%zu entries=%d ent_size=%zu", nvgpu_log(g, gpu_dbg_ctxsw, "size=%zu entries=%d ent_size=%zu",
size, n, sizeof(struct nvgpu_ctxsw_trace_entry)); size, n, sizeof(struct nvgpu_ctxsw_trace_entry));
err = gk20a_ctxsw_dev_alloc_buffer(dev, size); err = gk20a_ctxsw_dev_alloc_buffer(dev, size);
if (!err) { if (!err) {
filp->private_data = dev; filp->private_data = dev;
gk20a_dbg(gpu_dbg_ctxsw, "filp=%p dev=%p size=%zu", nvgpu_log(g, gpu_dbg_ctxsw, "filp=%p dev=%p size=%zu",
filp, dev, size); filp, dev, size);
} }
@@ -348,7 +354,7 @@ int gk20a_ctxsw_dev_release(struct inode *inode, struct file *filp)
struct gk20a_ctxsw_dev *dev = filp->private_data; struct gk20a_ctxsw_dev *dev = filp->private_data;
struct gk20a *g = dev->g; struct gk20a *g = dev->g;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "dev: %p", dev); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "dev: %p", dev);
g->ops.fecs_trace.disable(g); g->ops.fecs_trace.disable(g);
@@ -372,7 +378,7 @@ long gk20a_ctxsw_dev_ioctl(struct file *filp, unsigned int cmd,
u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE];
int err = 0; int err = 0;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "nr=%d", _IOC_NR(cmd)); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "nr=%d", _IOC_NR(cmd));
if ((_IOC_TYPE(cmd) != NVGPU_CTXSW_IOCTL_MAGIC) || if ((_IOC_TYPE(cmd) != NVGPU_CTXSW_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) || (_IOC_NR(cmd) == 0) ||
@@ -423,10 +429,11 @@ long gk20a_ctxsw_dev_ioctl(struct file *filp, unsigned int cmd,
unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait) unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
{ {
struct gk20a_ctxsw_dev *dev = filp->private_data; struct gk20a_ctxsw_dev *dev = filp->private_data;
struct gk20a *g = dev->g;
struct nvgpu_ctxsw_ring_header *hdr = dev->hdr; struct nvgpu_ctxsw_ring_header *hdr = dev->hdr;
unsigned int mask = 0; unsigned int mask = 0;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
nvgpu_mutex_acquire(&dev->write_lock); nvgpu_mutex_acquire(&dev->write_lock);
poll_wait(filp, &dev->readout_wq.wq, wait); poll_wait(filp, &dev->readout_wq.wq, wait);
@@ -440,18 +447,20 @@ unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
static void gk20a_ctxsw_dev_vma_open(struct vm_area_struct *vma) static void gk20a_ctxsw_dev_vma_open(struct vm_area_struct *vma)
{ {
struct gk20a_ctxsw_dev *dev = vma->vm_private_data; struct gk20a_ctxsw_dev *dev = vma->vm_private_data;
struct gk20a *g = dev->g;
nvgpu_atomic_inc(&dev->vma_ref); nvgpu_atomic_inc(&dev->vma_ref);
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d",
nvgpu_atomic_read(&dev->vma_ref)); nvgpu_atomic_read(&dev->vma_ref));
} }
static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma) static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma)
{ {
struct gk20a_ctxsw_dev *dev = vma->vm_private_data; struct gk20a_ctxsw_dev *dev = vma->vm_private_data;
struct gk20a *g = dev->g;
nvgpu_atomic_dec(&dev->vma_ref); nvgpu_atomic_dec(&dev->vma_ref);
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d",
nvgpu_atomic_read(&dev->vma_ref)); nvgpu_atomic_read(&dev->vma_ref));
} }
@@ -469,9 +478,10 @@ int gk20a_ctxsw_dev_mmap_buffer(struct gk20a *g,
int gk20a_ctxsw_dev_mmap(struct file *filp, struct vm_area_struct *vma) int gk20a_ctxsw_dev_mmap(struct file *filp, struct vm_area_struct *vma)
{ {
struct gk20a_ctxsw_dev *dev = filp->private_data; struct gk20a_ctxsw_dev *dev = filp->private_data;
struct gk20a *g = dev->g;
int ret; int ret;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vm_start=%lx vm_end=%lx", nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vm_start=%lx vm_end=%lx",
vma->vm_start, vma->vm_end); vma->vm_start, vma->vm_end);
ret = dev->g->ops.fecs_trace.mmap_user_buffer(dev->g, vma); ret = dev->g->ops.fecs_trace.mmap_user_buffer(dev->g, vma);
@@ -513,7 +523,7 @@ int gk20a_ctxsw_trace_init(struct gk20a *g)
struct gk20a_ctxsw_trace *trace = g->ctxsw_trace; struct gk20a_ctxsw_trace *trace = g->ctxsw_trace;
int err; int err;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p trace=%p", g, trace); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p trace=%p", g, trace);
/* if tracing is not supported, skip this */ /* if tracing is not supported, skip this */
if (!g->ops.fecs_trace.init) if (!g->ops.fecs_trace.init)
@@ -590,7 +600,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
dev = &g->ctxsw_trace->devs[entry->vmid]; dev = &g->ctxsw_trace->devs[entry->vmid];
hdr = dev->hdr; hdr = dev->hdr;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
"dev=%p hdr=%p", dev, hdr); "dev=%p hdr=%p", dev, hdr);
nvgpu_mutex_acquire(&dev->write_lock); nvgpu_mutex_acquire(&dev->write_lock);
@@ -630,7 +640,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
goto filter; goto filter;
} }
gk20a_dbg(gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_ctxsw,
"seqno=%d context_id=%08x pid=%lld tag=%x timestamp=%llx", "seqno=%d context_id=%08x pid=%lld tag=%x timestamp=%llx",
entry->seqno, entry->context_id, entry->pid, entry->seqno, entry->context_id, entry->pid,
entry->tag, entry->timestamp); entry->tag, entry->timestamp);
@@ -644,7 +654,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
if (unlikely(write_idx >= hdr->num_ents)) if (unlikely(write_idx >= hdr->num_ents))
write_idx = 0; write_idx = 0;
hdr->write_idx = write_idx; hdr->write_idx = write_idx;
gk20a_dbg(gpu_dbg_ctxsw, "added: read=%d write=%d len=%d", nvgpu_log(g, gpu_dbg_ctxsw, "added: read=%d write=%d len=%d",
hdr->read_idx, hdr->write_idx, ring_len(hdr)); hdr->read_idx, hdr->write_idx, ring_len(hdr));
nvgpu_mutex_release(&dev->write_lock); nvgpu_mutex_release(&dev->write_lock);
@@ -657,7 +667,7 @@ drop:
hdr->drop_count++; hdr->drop_count++;
filter: filter:
gk20a_dbg(gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_ctxsw,
"dropping seqno=%d context_id=%08x pid=%lld " "dropping seqno=%d context_id=%08x pid=%lld "
"tag=%x time=%llx (%s)", "tag=%x time=%llx (%s)",
entry->seqno, entry->context_id, entry->pid, entry->seqno, entry->context_id, entry->pid,

View File

@@ -307,10 +307,6 @@ void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
debugfs_create_u32("disable_syncpoints", S_IRUGO, debugfs_create_u32("disable_syncpoints", S_IRUGO,
l->debugfs, &g->disable_syncpoints); l->debugfs, &g->disable_syncpoints);
/* Legacy debugging API. */
debugfs_create_u64("dbg_mask", S_IRUGO|S_IWUSR,
l->debugfs, &nvgpu_dbg_mask);
/* New debug logging API. */ /* New debug logging API. */
debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR, debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR,
l->debugfs, &g->log_mask); l->debugfs, &g->log_mask);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2017 NVIDIA Corporation. All rights reserved. * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
* *
* This software is licensed under the terms of the GNU General Public * This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and * License version 2, as published by the Free Software Foundation, and
@@ -108,6 +108,7 @@ static const struct seq_operations gk20a_fifo_sched_debugfs_seq_ops = {
static int gk20a_fifo_sched_debugfs_open(struct inode *inode, static int gk20a_fifo_sched_debugfs_open(struct inode *inode,
struct file *file) struct file *file)
{ {
struct gk20a *g = inode->i_private;
int err; int err;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
@@ -117,7 +118,7 @@ static int gk20a_fifo_sched_debugfs_open(struct inode *inode,
if (err) if (err)
return err; return err;
gk20a_dbg(gpu_dbg_info, "i_private=%p", inode->i_private); nvgpu_log(g, gpu_dbg_info, "i_private=%p", inode->i_private);
((struct seq_file *)file->private_data)->private = inode->i_private; ((struct seq_file *)file->private_data)->private = inode->i_private;
return 0; return 0;
@@ -301,7 +302,7 @@ void gk20a_fifo_debugfs_init(struct gk20a *g)
if (IS_ERR_OR_NULL(fifo_root)) if (IS_ERR_OR_NULL(fifo_root))
return; return;
gk20a_dbg(gpu_dbg_info, "g=%p", g); nvgpu_log(g, gpu_dbg_info, "g=%p", g);
debugfs_create_file("sched", 0600, fifo_root, g, debugfs_create_file("sched", 0600, fifo_root, g,
&gk20a_fifo_sched_debugfs_fops); &gk20a_fifo_sched_debugfs_fops);

View File

@@ -87,7 +87,7 @@ static void nvgpu_init_gr_vars(struct gk20a *g)
{ {
gk20a_init_gr(g); gk20a_init_gr(g);
gk20a_dbg_info("total ram pages : %lu", totalram_pages); nvgpu_log_info(g, "total ram pages : %lu", totalram_pages);
g->gr.max_comptag_mem = totalram_pages g->gr.max_comptag_mem = totalram_pages
>> (10 - (PAGE_SHIFT - 10)); >> (10 - (PAGE_SHIFT - 10));
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -50,7 +50,7 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
int hw_irq_count; int hw_irq_count;
gk20a_dbg(gpu_dbg_intr, "interrupt thread launched"); nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched");
trace_mc_gk20a_intr_thread_stall(g->name); trace_mc_gk20a_intr_thread_stall(g->name);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -23,11 +23,11 @@ void nvgpu_writel(struct gk20a *g, u32 r, u32 v)
if (unlikely(!l->regs)) { if (unlikely(!l->regs)) {
__gk20a_warn_on_no_regs(); __gk20a_warn_on_no_regs();
gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
} else { } else {
writel_relaxed(v, l->regs + r); writel_relaxed(v, l->regs + r);
nvgpu_wmb(); nvgpu_wmb();
gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
} }
} }
@@ -48,10 +48,10 @@ u32 __nvgpu_readl(struct gk20a *g, u32 r)
if (unlikely(!l->regs)) { if (unlikely(!l->regs)) {
__gk20a_warn_on_no_regs(); __gk20a_warn_on_no_regs();
gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
} else { } else {
v = readl(l->regs + r); v = readl(l->regs + r);
gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
} }
return v; return v;
@@ -63,13 +63,13 @@ void nvgpu_writel_check(struct gk20a *g, u32 r, u32 v)
if (unlikely(!l->regs)) { if (unlikely(!l->regs)) {
__gk20a_warn_on_no_regs(); __gk20a_warn_on_no_regs();
gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
} else { } else {
nvgpu_wmb(); nvgpu_wmb();
do { do {
writel_relaxed(v, l->regs + r); writel_relaxed(v, l->regs + r);
} while (readl(l->regs + r) != v); } while (readl(l->regs + r) != v);
gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
} }
} }
@@ -79,11 +79,11 @@ void nvgpu_bar1_writel(struct gk20a *g, u32 b, u32 v)
if (unlikely(!l->bar1)) { if (unlikely(!l->bar1)) {
__gk20a_warn_on_no_regs(); __gk20a_warn_on_no_regs();
gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v);
} else { } else {
nvgpu_wmb(); nvgpu_wmb();
writel_relaxed(v, l->bar1 + b); writel_relaxed(v, l->bar1 + b);
gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x", b, v); nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x", b, v);
} }
} }
@@ -94,10 +94,10 @@ u32 nvgpu_bar1_readl(struct gk20a *g, u32 b)
if (unlikely(!l->bar1)) { if (unlikely(!l->bar1)) {
__gk20a_warn_on_no_regs(); __gk20a_warn_on_no_regs();
gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v);
} else { } else {
v = readl(l->bar1 + b); v = readl(l->bar1 + b);
gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x", b, v); nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x", b, v);
} }
return v; return v;

View File

@@ -25,5 +25,5 @@ void nvgpu_usermode_writel(struct gk20a *g, u32 r, u32 v)
void __iomem *reg = l->usermode_regs + (r - usermode_cfg0_r()); void __iomem *reg = l->usermode_regs + (r - usermode_cfg0_r());
writel_relaxed(v, reg); writel_relaxed(v, reg);
gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v); nvgpu_log(g, gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v);
} }

View File

@@ -1,7 +1,7 @@
/* /*
* NVGPU IOCTLs * NVGPU IOCTLs
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -139,8 +139,9 @@ static int gk20a_create_device(
{ {
struct device *subdev; struct device *subdev;
int err; int err;
struct gk20a *g = gk20a_from_dev(dev);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
cdev_init(cdev, ops); cdev_init(cdev, ops);
cdev->owner = THIS_MODULE; cdev->owner = THIS_MODULE;

View File

@@ -50,8 +50,9 @@ static int gk20a_as_ioctl_bind_channel(
{ {
int err = 0; int err = 0;
struct channel_gk20a *ch; struct channel_gk20a *ch;
struct gk20a *g = gk20a_from_vm(as_share->vm);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
ch = gk20a_get_channel_from_file(args->channel_fd); ch = gk20a_get_channel_from_file(args->channel_fd);
if (!ch) if (!ch)
@@ -76,7 +77,7 @@ static int gk20a_as_ioctl_alloc_space(
{ {
struct gk20a *g = gk20a_from_vm(as_share->vm); struct gk20a *g = gk20a_from_vm(as_share->vm);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size, return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
&args->o_a.offset, &args->o_a.offset,
gk20a_as_translate_linux_flags(g, gk20a_as_translate_linux_flags(g,
@@ -87,7 +88,9 @@ static int gk20a_as_ioctl_free_space(
struct gk20a_as_share *as_share, struct gk20a_as_share *as_share,
struct nvgpu_as_free_space_args *args) struct nvgpu_as_free_space_args *args)
{ {
gk20a_dbg_fn(""); struct gk20a *g = gk20a_from_vm(as_share->vm);
nvgpu_log_fn(g, " ");
return nvgpu_vm_area_free(as_share->vm, args->offset); return nvgpu_vm_area_free(as_share->vm, args->offset);
} }
@@ -95,7 +98,9 @@ static int gk20a_as_ioctl_map_buffer_ex(
struct gk20a_as_share *as_share, struct gk20a_as_share *as_share,
struct nvgpu_as_map_buffer_ex_args *args) struct nvgpu_as_map_buffer_ex_args *args)
{ {
gk20a_dbg_fn(""); struct gk20a *g = gk20a_from_vm(as_share->vm);
nvgpu_log_fn(g, " ");
/* unsupported, direct kind control must be used */ /* unsupported, direct kind control must be used */
if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) { if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) {
@@ -117,7 +122,9 @@ static int gk20a_as_ioctl_unmap_buffer(
struct gk20a_as_share *as_share, struct gk20a_as_share *as_share,
struct nvgpu_as_unmap_buffer_args *args) struct nvgpu_as_unmap_buffer_args *args)
{ {
gk20a_dbg_fn(""); struct gk20a *g = gk20a_from_vm(as_share->vm);
nvgpu_log_fn(g, " ");
nvgpu_vm_unmap(as_share->vm, args->offset, NULL); nvgpu_vm_unmap(as_share->vm, args->offset, NULL);
@@ -128,6 +135,7 @@ static int gk20a_as_ioctl_map_buffer_batch(
struct gk20a_as_share *as_share, struct gk20a_as_share *as_share,
struct nvgpu_as_map_buffer_batch_args *args) struct nvgpu_as_map_buffer_batch_args *args)
{ {
struct gk20a *g = gk20a_from_vm(as_share->vm);
u32 i; u32 i;
int err = 0; int err = 0;
@@ -140,7 +148,7 @@ static int gk20a_as_ioctl_map_buffer_batch(
struct vm_gk20a_mapping_batch batch; struct vm_gk20a_mapping_batch batch;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT || if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT ||
args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT) args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT)
@@ -220,9 +228,10 @@ static int gk20a_as_ioctl_get_va_regions(
unsigned int write_entries; unsigned int write_entries;
struct nvgpu_as_va_region __user *user_region_ptr; struct nvgpu_as_va_region __user *user_region_ptr;
struct vm_gk20a *vm = as_share->vm; struct vm_gk20a *vm = as_share->vm;
struct gk20a *g = gk20a_from_vm(vm);
unsigned int page_sizes = gmmu_page_size_kernel; unsigned int page_sizes = gmmu_page_size_kernel;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!vm->big_pages) if (!vm->big_pages)
page_sizes--; page_sizes--;
@@ -293,14 +302,14 @@ int gk20a_as_dev_open(struct inode *inode, struct file *filp)
struct gk20a *g; struct gk20a *g;
int err; int err;
gk20a_dbg_fn("");
l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev); l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev);
g = &l->g; g = &l->g;
nvgpu_log_fn(g, " ");
err = gk20a_as_alloc_share(g, 0, 0, &as_share); err = gk20a_as_alloc_share(g, 0, 0, &as_share);
if (err) { if (err) {
gk20a_dbg_fn("failed to alloc share"); nvgpu_log_fn(g, "failed to alloc share");
return err; return err;
} }
@@ -312,8 +321,6 @@ int gk20a_as_dev_release(struct inode *inode, struct file *filp)
{ {
struct gk20a_as_share *as_share = filp->private_data; struct gk20a_as_share *as_share = filp->private_data;
gk20a_dbg_fn("");
if (!as_share) if (!as_share)
return 0; return 0;
@@ -328,7 +335,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE]; u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
gk20a_dbg_fn("start %d", _IOC_NR(cmd)); nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) || if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) || (_IOC_NR(cmd) == 0) ||

View File

@@ -476,7 +476,7 @@ static int __gk20a_channel_open(struct gk20a *g,
struct channel_gk20a *ch; struct channel_gk20a *ch;
struct channel_priv *priv; struct channel_priv *priv;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g = gk20a_get(g); g = gk20a_get(g);
if (!g) if (!g)
@@ -529,10 +529,10 @@ int gk20a_channel_open(struct inode *inode, struct file *filp)
struct gk20a *g = &l->g; struct gk20a *g = &l->g;
int ret; int ret;
gk20a_dbg_fn("start"); nvgpu_log_fn(g, "start");
ret = __gk20a_channel_open(g, filp, -1); ret = __gk20a_channel_open(g, filp, -1);
gk20a_dbg_fn("end"); nvgpu_log_fn(g, "end");
return ret; return ret;
} }
@@ -676,7 +676,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
int remain, ret = 0; int remain, ret = 0;
u64 end; u64 end;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (ch->has_timedout) if (ch->has_timedout)
return -ETIMEDOUT; return -ETIMEDOUT;
@@ -760,7 +760,7 @@ static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct gr_gk20a *gr = &g->gr; struct gr_gk20a *gr = &g->gr;
gk20a_dbg_fn(""); nvgpu_log_fn(gr->g, " ");
return g->ops.gr.bind_ctxsw_zcull(g, gr, ch, return g->ops.gr.bind_ctxsw_zcull(g, gr, ch,
args->gpu_va, args->mode); args->gpu_va, args->mode);
@@ -775,9 +775,10 @@ static int gk20a_ioctl_channel_submit_gpfifo(
struct fifo_profile_gk20a *profile = NULL; struct fifo_profile_gk20a *profile = NULL;
u32 submit_flags = 0; u32 submit_flags = 0;
int fd = -1; int fd = -1;
struct gk20a *g = ch->g;
int ret = 0; int ret = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
profile = gk20a_fifo_profile_acquire(ch->g); profile = gk20a_fifo_profile_acquire(ch->g);
@@ -1064,8 +1065,9 @@ long gk20a_channel_ioctl(struct file *filp,
struct device *dev = dev_from_gk20a(ch->g); struct device *dev = dev_from_gk20a(ch->g);
u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0}; u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0};
int err = 0; int err = 0;
struct gk20a *g = ch->g;
gk20a_dbg_fn("start %d", _IOC_NR(cmd)); nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) || if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) || (_IOC_NR(cmd) == 0) ||
@@ -1224,7 +1226,7 @@ long gk20a_channel_ioctl(struct file *filp,
{ {
u32 timeout = u32 timeout =
(u32)((struct nvgpu_set_timeout_args *)buf)->timeout; (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
timeout, ch->chid); timeout, ch->chid);
ch->timeout_ms_max = timeout; ch->timeout_ms_max = timeout;
gk20a_channel_trace_sched_param( gk20a_channel_trace_sched_param(
@@ -1238,7 +1240,7 @@ long gk20a_channel_ioctl(struct file *filp,
bool timeout_debug_dump = !((u32) bool timeout_debug_dump = !((u32)
((struct nvgpu_set_timeout_ex_args *)buf)->flags & ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
(1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP)); (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
timeout, ch->chid); timeout, ch->chid);
ch->timeout_ms_max = timeout; ch->timeout_ms_max = timeout;
ch->timeout_debug_dump = timeout_debug_dump; ch->timeout_debug_dump = timeout_debug_dump;
@@ -1367,7 +1369,7 @@ long gk20a_channel_ioctl(struct file *filp,
gk20a_channel_put(ch); gk20a_channel_put(ch);
gk20a_dbg_fn("end"); nvgpu_log_fn(g, "end");
return err; return err;
} }

View File

@@ -209,9 +209,10 @@ static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf,
static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev, static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev,
struct nvgpu_gpu_set_event_filter_args *args) struct nvgpu_gpu_set_event_filter_args *args)
{ {
struct gk20a *g = dev->session->g;
u32 mask; u32 mask;
gk20a_dbg(gpu_dbg_fn, ""); nvgpu_log(g, gpu_dbg_fn, " ");
if (args->flags) if (args->flags)
return -EINVAL; return -EINVAL;
@@ -237,7 +238,7 @@ static long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd,
u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE]; u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE];
int err = 0; int err = 0;
gk20a_dbg(gpu_dbg_fn, "nr=%d", _IOC_NR(cmd)); nvgpu_log(g, gpu_dbg_fn, "nr=%d", _IOC_NR(cmd));
if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0)
|| (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST)) || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST))
@@ -681,7 +682,7 @@ int nvgpu_clk_arb_debugfs_init(struct gk20a *g)
struct dentry *gpu_root = l->debugfs; struct dentry *gpu_root = l->debugfs;
struct dentry *d; struct dentry *d;
gk20a_dbg(gpu_dbg_info, "g=%p", g); nvgpu_log(g, gpu_dbg_info, "g=%p", g);
d = debugfs_create_file( d = debugfs_create_file(
"arb_stats", "arb_stats",

View File

@@ -62,14 +62,14 @@ int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
struct gk20a_ctrl_priv *priv; struct gk20a_ctrl_priv *priv;
int err = 0; int err = 0;
gk20a_dbg_fn("");
l = container_of(inode->i_cdev, l = container_of(inode->i_cdev,
struct nvgpu_os_linux, ctrl.cdev); struct nvgpu_os_linux, ctrl.cdev);
g = gk20a_get(&l->g); g = gk20a_get(&l->g);
if (!g) if (!g)
return -ENODEV; return -ENODEV;
nvgpu_log_fn(g, " ");
priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv)); priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv));
if (!priv) { if (!priv) {
err = -ENOMEM; err = -ENOMEM;
@@ -102,7 +102,7 @@ int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp)
struct gk20a_ctrl_priv *priv = filp->private_data; struct gk20a_ctrl_priv *priv = filp->private_data;
struct gk20a *g = priv->g; struct gk20a *g = priv->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (priv->clk_session) if (priv->clk_session)
nvgpu_clk_arb_release_session(g, priv->clk_session); nvgpu_clk_arb_release_session(g, priv->clk_session);
@@ -684,7 +684,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
/* Copy to user space - pointed by "args->pwarpstate" */ /* Copy to user space - pointed by "args->pwarpstate" */
if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate,
w_state, ioctl_size)) { w_state, ioctl_size)) {
gk20a_dbg_fn("copy_to_user failed!"); nvgpu_log_fn(g, "copy_to_user failed!");
err = -EFAULT; err = -EFAULT;
} }
@@ -901,7 +901,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
u32 align = args->in.alignment ? args->in.alignment : SZ_4K; u32 align = args->in.alignment ? args->in.alignment : SZ_4K;
int fd; int fd;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* not yet supported */ /* not yet supported */
if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK)) if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK))
@@ -933,7 +933,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
args->out.dmabuf_fd = fd; args->out.dmabuf_fd = fd;
gk20a_dbg_fn("done, fd=%d", fd); nvgpu_log_fn(g, "done, fd=%d", fd);
return 0; return 0;
} }
@@ -943,7 +943,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g,
{ {
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (args->reserved[0] || args->reserved[1] || if (args->reserved[0] || args->reserved[1] ||
args->reserved[2] || args->reserved[3]) args->reserved[2] || args->reserved[3])
@@ -951,7 +951,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g,
err = nvgpu_vidmem_get_space(g, &args->total_free_bytes); err = nvgpu_vidmem_get_space(g, &args->total_free_bytes);
gk20a_dbg_fn("done, err=%d, bytes=%lld", err, args->total_free_bytes); nvgpu_log_fn(g, "done, err=%d, bytes=%lld", err, args->total_free_bytes);
return err; return err;
} }
@@ -973,7 +973,7 @@ static int nvgpu_gpu_clk_get_vf_points(struct gk20a *g,
u16 min_mhz; u16 min_mhz;
u16 max_mhz; u16 max_mhz;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!session || args->flags) if (!session || args->flags)
return -EINVAL; return -EINVAL;
@@ -1059,7 +1059,7 @@ static int nvgpu_gpu_clk_get_range(struct gk20a *g,
int err; int err;
u16 min_mhz, max_mhz; u16 min_mhz, max_mhz;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!session) if (!session)
return -EINVAL; return -EINVAL;
@@ -1138,7 +1138,7 @@ static int nvgpu_gpu_clk_set_info(struct gk20a *g,
int i; int i;
int ret; int ret;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!session || args->flags) if (!session || args->flags)
return -EINVAL; return -EINVAL;
@@ -1201,7 +1201,7 @@ static int nvgpu_gpu_clk_get_info(struct gk20a *g,
int err; int err;
int bit; int bit;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!session) if (!session)
return -EINVAL; return -EINVAL;
@@ -1287,7 +1287,7 @@ static int nvgpu_gpu_get_event_fd(struct gk20a *g,
{ {
struct nvgpu_clk_session *session = priv->clk_session; struct nvgpu_clk_session *session = priv->clk_session;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!session) if (!session)
return -EINVAL; return -EINVAL;
@@ -1301,7 +1301,7 @@ static int nvgpu_gpu_get_voltage(struct gk20a *g,
{ {
int err = -EINVAL; int err = -EINVAL;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (args->reserved) if (args->reserved)
return -EINVAL; return -EINVAL;
@@ -1337,7 +1337,7 @@ static int nvgpu_gpu_get_current(struct gk20a *g,
{ {
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (args->reserved[0] || args->reserved[1] || args->reserved[2]) if (args->reserved[0] || args->reserved[1] || args->reserved[2])
return -EINVAL; return -EINVAL;
@@ -1361,7 +1361,7 @@ static int nvgpu_gpu_get_power(struct gk20a *g,
{ {
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (args->reserved[0] || args->reserved[1] || args->reserved[2]) if (args->reserved[0] || args->reserved[1] || args->reserved[2])
return -EINVAL; return -EINVAL;
@@ -1386,7 +1386,7 @@ static int nvgpu_gpu_get_temperature(struct gk20a *g,
int err; int err;
u32 temp_f24_8; u32 temp_f24_8;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (args->reserved[0] || args->reserved[1] || args->reserved[2]) if (args->reserved[0] || args->reserved[1] || args->reserved[2])
return -EINVAL; return -EINVAL;
@@ -1415,7 +1415,7 @@ static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g,
{ {
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (args->reserved[0] || args->reserved[1] || args->reserved[2]) if (args->reserved[0] || args->reserved[1] || args->reserved[2])
return -EINVAL; return -EINVAL;
@@ -1491,7 +1491,7 @@ static int nvgpu_gpu_set_deterministic_opts(struct gk20a *g,
u32 i = 0; u32 i = 0;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
user_channels = (int __user *)(uintptr_t)args->channels; user_channels = (int __user *)(uintptr_t)args->channels;
@@ -1556,7 +1556,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
struct zbc_query_params *zbc_tbl; struct zbc_query_params *zbc_tbl;
int i, err = 0; int i, err = 0;
gk20a_dbg_fn("start %d", _IOC_NR(cmd)); nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) || if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) || (_IOC_NR(cmd) == 0) ||
@@ -1855,7 +1855,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
break; break;
default: default:
gk20a_dbg_info("unrecognized gpu ioctl cmd: 0x%x", cmd); nvgpu_log_info(g, "unrecognized gpu ioctl cmd: 0x%x", cmd);
err = -ENOTTY; err = -ENOTTY;
break; break;
} }

View File

@@ -56,7 +56,7 @@ static int alloc_profiler(struct gk20a *g,
struct dbg_profiler_object_data *prof; struct dbg_profiler_object_data *prof;
*_prof = NULL; *_prof = NULL;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
prof = nvgpu_kzalloc(g, sizeof(*prof)); prof = nvgpu_kzalloc(g, sizeof(*prof));
if (!prof) if (!prof)
@@ -72,7 +72,7 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_
struct dbg_session_gk20a_linux *dbg_s_linux; struct dbg_session_gk20a_linux *dbg_s_linux;
*_dbg_s_linux = NULL; *_dbg_s_linux = NULL;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux)); dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux));
if (!dbg_s_linux) if (!dbg_s_linux)
@@ -142,8 +142,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
unsigned int mask = 0; unsigned int mask = 0;
struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data; struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data;
struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
struct gk20a *g = dbg_s->g;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait); poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
@@ -151,9 +152,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
if (dbg_s->dbg_events.events_enabled && if (dbg_s->dbg_events.events_enabled &&
dbg_s->dbg_events.num_pending_events > 0) { dbg_s->dbg_events.num_pending_events > 0) {
gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d", nvgpu_log(g, gpu_dbg_gpu_dbg, "found pending event on session id %d",
dbg_s->id); dbg_s->id);
gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending",
dbg_s->dbg_events.num_pending_events); dbg_s->dbg_events.num_pending_events);
mask = (POLLPRI | POLLIN); mask = (POLLPRI | POLLIN);
} }
@@ -170,7 +171,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
struct dbg_profiler_object_data *prof_obj, *tmp_obj; struct dbg_profiler_object_data *prof_obj, *tmp_obj;
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name);
/* unbind channels */ /* unbind channels */
dbg_unbind_all_channels_gk20a(dbg_s); dbg_unbind_all_channels_gk20a(dbg_s);
@@ -213,7 +214,11 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp) int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); struct nvgpu_os_linux *l = container_of(inode->i_cdev,
struct nvgpu_os_linux, prof.cdev);
struct gk20a *g = &l->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */); return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
} }
@@ -223,7 +228,7 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
int err; int err;
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
gk20a_dbg_fn("powergate mode = %d", args->enable); nvgpu_log_fn(g, "powergate mode = %d", args->enable);
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
@@ -356,7 +361,9 @@ static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(
struct dbg_session_gk20a *dbg_s, struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args) struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); struct gk20a *g = dbg_s->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
@@ -373,7 +380,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
int err = 0; int err = 0;
gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
timeout_mode); timeout_mode);
switch (timeout_mode) { switch (timeout_mode) {
@@ -401,7 +408,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
break; break;
} }
gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s", nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
g->timeouts_enabled ? "Yes" : "No"); g->timeouts_enabled ? "Yes" : "No");
return err; return err;
@@ -431,7 +438,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
dev = dev_from_gk20a(g); dev = dev_from_gk20a(g);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name);
err = alloc_session(g, &dbg_session_linux); err = alloc_session(g, &dbg_session_linux);
if (err) if (err)
@@ -482,7 +489,7 @@ static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
struct dbg_profiler_object_data *prof_obj, *tmp_obj; struct dbg_profiler_object_data *prof_obj, *tmp_obj;
struct dbg_session_channel_data_linux *ch_data_linux; struct dbg_session_channel_data_linux *ch_data_linux;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
chid = ch_data->chid; chid = ch_data->chid;
@@ -527,7 +534,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
struct dbg_session_data *session_data; struct dbg_session_data *session_data;
int err = 0; int err = 0;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
g->name, args->channel_fd); g->name, args->channel_fd);
/* /*
@@ -541,12 +548,12 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
ch = gk20a_get_channel_from_file(args->channel_fd); ch = gk20a_get_channel_from_file(args->channel_fd);
if (!ch) { if (!ch) {
gk20a_dbg_fn("no channel found for fd"); nvgpu_log_fn(g, "no channel found for fd");
err = -EINVAL; err = -EINVAL;
goto out_fput; goto out_fput;
} }
gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid); nvgpu_log_fn(g, "%s hwchid=%d", g->name, ch->chid);
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
nvgpu_mutex_acquire(&ch->dbg_s_lock); nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -818,7 +825,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
struct channel_gk20a *ch; struct channel_gk20a *ch;
gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); nvgpu_log_fn(g, "%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) { if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) {
nvgpu_err(g, "regops limit exceeded"); nvgpu_err(g, "regops limit exceeded");
@@ -890,10 +897,10 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
(args->ops + (args->ops +
ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op)); ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op));
gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu", nvgpu_log_fn(g, "Regops fragment: start_op=%llu ops=%llu",
ops_offset, num_ops); ops_offset, num_ops);
gk20a_dbg_fn("Copying regops from userspace"); nvgpu_log_fn(g, "Copying regops from userspace");
if (copy_from_user(linux_fragment, if (copy_from_user(linux_fragment,
fragment, fragment_size)) { fragment, fragment_size)) {
@@ -917,7 +924,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
if (err) if (err)
break; break;
gk20a_dbg_fn("Copying result to userspace"); nvgpu_log_fn(g, "Copying result to userspace");
if (copy_to_user(fragment, linux_fragment, if (copy_to_user(fragment, linux_fragment,
fragment_size)) { fragment_size)) {
@@ -955,7 +962,7 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
{ {
int err; int err;
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
gk20a_dbg_fn("%s powergate mode = %d", nvgpu_log_fn(g, "%s powergate mode = %d",
g->name, args->mode); g->name, args->mode);
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -978,7 +985,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
struct channel_gk20a *ch_gk20a; struct channel_gk20a *ch_gk20a;
gk20a_dbg_fn("%s smpc ctxsw mode = %d", nvgpu_log_fn(g, "%s smpc ctxsw mode = %d",
g->name, args->mode); g->name, args->mode);
err = gk20a_busy(g); err = gk20a_busy(g);
@@ -1075,7 +1082,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
struct channel_gk20a *ch; struct channel_gk20a *ch;
int err = 0, action = args->mode; int err = 0, action = args->mode;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!ch) if (!ch)
@@ -1127,7 +1134,7 @@ static int nvgpu_ioctl_allocate_profiler_object(
struct gk20a *g = get_gk20a(dbg_session_linux->dev); struct gk20a *g = get_gk20a(dbg_session_linux->dev);
struct dbg_profiler_object_data *prof_obj; struct dbg_profiler_object_data *prof_obj;
gk20a_dbg_fn("%s", g->name); nvgpu_log_fn(g, "%s", g->name);
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -1171,7 +1178,7 @@ static int nvgpu_ioctl_free_profiler_object(
struct dbg_profiler_object_data *prof_obj, *tmp_obj; struct dbg_profiler_object_data *prof_obj, *tmp_obj;
bool obj_found = false; bool obj_found = false;
gk20a_dbg_fn("%s session_id = %d profiler_handle = %x", nvgpu_log_fn(g, "%s session_id = %d profiler_handle = %x",
g->name, dbg_s->id, args->profiler_handle); g->name, dbg_s->id, args->profiler_handle);
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -1253,7 +1260,9 @@ static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_
static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); struct gk20a *g = dbg_s->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
@@ -1265,7 +1274,9 @@ static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); struct gk20a *g = dbg_s->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
@@ -1277,7 +1288,9 @@ static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); struct gk20a *g = dbg_s->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
@@ -1294,13 +1307,13 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
{ {
int ret = 0; int ret = 0;
struct channel_gk20a *ch; struct channel_gk20a *ch;
struct gk20a *g = dbg_s->g;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!ch) { if (!ch) {
nvgpu_err(dbg_s->g, nvgpu_err(g, "no channel bound to dbg session");
"no channel bound to dbg session");
return -EINVAL; return -EINVAL;
} }
@@ -1318,8 +1331,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
break; break;
default: default:
nvgpu_err(dbg_s->g, nvgpu_err(g, "unrecognized dbg gpu events ctrl cmd: 0x%x",
"unrecognized dbg gpu events ctrl cmd: 0x%x",
args->cmd); args->cmd);
ret = -EINVAL; ret = -EINVAL;
break; break;
@@ -1422,7 +1434,7 @@ static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
if (!ch) if (!ch)
return -EINVAL; return -EINVAL;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
return g->ops.gr.update_pc_sampling ? return g->ops.gr.update_pc_sampling ?
g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL; g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
@@ -1646,7 +1658,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
struct dbg_profiler_object_data *prof_obj; struct dbg_profiler_object_data *prof_obj;
int err = 0; int err = 0;
gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -1678,7 +1690,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
struct dbg_profiler_object_data *prof_obj, *my_prof_obj; struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
int err = 0; int err = 0;
gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
if (g->profiler_reservation_count < 0) { if (g->profiler_reservation_count < 0) {
nvgpu_err(g, "Negative reservation count!"); nvgpu_err(g, "Negative reservation count!");
@@ -1782,12 +1794,12 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
struct channel_gk20a *ch; struct channel_gk20a *ch;
int err; int err;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
g->name, args->channel_fd); g->name, args->channel_fd);
ch = gk20a_get_channel_from_file(args->channel_fd); ch = gk20a_get_channel_from_file(args->channel_fd);
if (!ch) { if (!ch) {
gk20a_dbg_fn("no channel found for fd"); nvgpu_log_fn(g, "no channel found for fd");
return -EINVAL; return -EINVAL;
} }
@@ -1802,7 +1814,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
nvgpu_mutex_release(&dbg_s->ch_list_lock); nvgpu_mutex_release(&dbg_s->ch_list_lock);
if (!channel_found) { if (!channel_found) {
gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); nvgpu_log_fn(g, "channel not bounded, fd=%d\n", args->channel_fd);
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
@@ -1820,7 +1832,11 @@ out:
int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp) int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); struct nvgpu_os_linux *l = container_of(inode->i_cdev,
struct nvgpu_os_linux, dbg.cdev);
struct gk20a *g = &l->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */); return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
} }
@@ -1833,7 +1849,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE]; u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
int err = 0; int err = 0;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) || if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) || (_IOC_NR(cmd) == 0) ||
@@ -1979,7 +1995,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
nvgpu_mutex_release(&dbg_s->ioctl_lock); nvgpu_mutex_release(&dbg_s->ioctl_lock);
gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err);
if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
err = copy_to_user((void __user *)arg, err = copy_to_user((void __user *)arg,

View File

@@ -175,6 +175,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
struct gk20a_event_id_data *event_id_data; struct gk20a_event_id_data *event_id_data;
u32 event_id; u32 event_id;
int err = 0; int err = 0;
struct gk20a *g = tsg->g;
event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id); event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
@@ -187,7 +188,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
nvgpu_mutex_acquire(&event_id_data->lock); nvgpu_mutex_acquire(&event_id_data->lock);
gk20a_dbg_info( nvgpu_log_info(g,
"posting event for event_id=%d on tsg=%d\n", "posting event for event_id=%d on tsg=%d\n",
event_id, tsg->tsgid); event_id, tsg->tsgid);
event_id_data->event_posted = true; event_id_data->event_posted = true;
@@ -205,14 +206,14 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
u32 event_id = event_id_data->event_id; u32 event_id = event_id_data->event_id;
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, " ");
poll_wait(filep, &event_id_data->event_id_wq.wq, wait); poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
nvgpu_mutex_acquire(&event_id_data->lock); nvgpu_mutex_acquire(&event_id_data->lock);
if (event_id_data->event_posted) { if (event_id_data->event_posted) {
gk20a_dbg_info( nvgpu_log_info(g,
"found pending event_id=%d on TSG=%d\n", "found pending event_id=%d on TSG=%d\n",
event_id, tsg->tsgid); event_id, tsg->tsgid);
mask = (POLLPRI | POLLIN); mask = (POLLPRI | POLLIN);
@@ -363,7 +364,7 @@ int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp)
dev = dev_from_gk20a(g); dev = dev_from_gk20a(g);
gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev)); nvgpu_log(g, gpu_dbg_fn, "tsg: %s", dev_name(dev));
priv = nvgpu_kmalloc(g, sizeof(*priv)); priv = nvgpu_kmalloc(g, sizeof(*priv));
if (!priv) { if (!priv) {
@@ -397,12 +398,12 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
struct gk20a *g; struct gk20a *g;
int ret; int ret;
gk20a_dbg_fn("");
l = container_of(inode->i_cdev, l = container_of(inode->i_cdev,
struct nvgpu_os_linux, tsg.cdev); struct nvgpu_os_linux, tsg.cdev);
g = &l->g; g = &l->g;
nvgpu_log_fn(g, " ");
ret = gk20a_busy(g); ret = gk20a_busy(g);
if (ret) { if (ret) {
nvgpu_err(g, "failed to power on, %d", ret); nvgpu_err(g, "failed to power on, %d", ret);
@@ -412,7 +413,7 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
ret = nvgpu_ioctl_tsg_open(&l->g, filp); ret = nvgpu_ioctl_tsg_open(&l->g, filp);
gk20a_idle(g); gk20a_idle(g);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return ret; return ret;
} }
@@ -445,7 +446,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
u32 level = arg->level; u32 level = arg->level;
int err; int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
nvgpu_mutex_acquire(&sched->control_lock); nvgpu_mutex_acquire(&sched->control_lock);
if (sched->control_locked) { if (sched->control_locked) {
@@ -474,7 +475,7 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
struct gk20a_sched_ctrl *sched = &l->sched_ctrl; struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
int err; int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
nvgpu_mutex_acquire(&sched->control_lock); nvgpu_mutex_acquire(&sched->control_lock);
if (sched->control_locked) { if (sched->control_locked) {
@@ -509,7 +510,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE]; u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
int err = 0; int err = 0;
gk20a_dbg_fn("start %d", _IOC_NR(cmd)); nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) || if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) || (_IOC_NR(cmd) == 0) ||

View File

@@ -38,8 +38,6 @@
*/ */
#define LOG_FMT "nvgpu: %s %33s:%-4d [%s] %s\n" #define LOG_FMT "nvgpu: %s %33s:%-4d [%s] %s\n"
u64 nvgpu_dbg_mask = NVGPU_DEFAULT_DBG_MASK;
static const char *log_types[] = { static const char *log_types[] = {
"ERR", "ERR",
"WRN", "WRN",

View File

@@ -218,7 +218,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (g->power_on) if (g->power_on)
return 0; return 0;
@@ -331,7 +331,7 @@ static int gk20a_pm_prepare_poweroff(struct device *dev)
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
bool irqs_enabled; bool irqs_enabled;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&g->poweroff_lock); nvgpu_mutex_acquire(&g->poweroff_lock);
@@ -1013,7 +1013,7 @@ static int gk20a_pm_init(struct device *dev)
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* Initialise pm runtime */ /* Initialise pm runtime */
if (g->railgate_delay) { if (g->railgate_delay) {
@@ -1043,7 +1043,7 @@ void gk20a_driver_start_unload(struct gk20a *g)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n"); nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n");
down_write(&l->busy_lock); down_write(&l->busy_lock);
__nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
@@ -1134,8 +1134,6 @@ static int gk20a_probe(struct platform_device *dev)
return -ENODATA; return -ENODATA;
} }
gk20a_dbg_fn("");
platform_set_drvdata(dev, platform); platform_set_drvdata(dev, platform);
if (gk20a_gpu_is_virtual(&dev->dev)) if (gk20a_gpu_is_virtual(&dev->dev))
@@ -1148,6 +1146,9 @@ static int gk20a_probe(struct platform_device *dev)
} }
gk20a = &l->g; gk20a = &l->g;
nvgpu_log_fn(gk20a, " ");
nvgpu_init_gk20a(gk20a); nvgpu_init_gk20a(gk20a);
set_gk20a(dev, gk20a); set_gk20a(dev, gk20a);
l->dev = &dev->dev; l->dev = &dev->dev;
@@ -1248,7 +1249,7 @@ int nvgpu_remove(struct device *dev, struct class *class)
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = nvgpu_quiesce(g); err = nvgpu_quiesce(g);
WARN(err, "gpu failed to idle during driver removal"); WARN(err, "gpu failed to idle during driver removal");
@@ -1288,7 +1289,7 @@ int nvgpu_remove(struct device *dev, struct class *class)
if (platform->remove) if (platform->remove)
platform->remove(dev); platform->remove(dev);
gk20a_dbg_fn("removed"); nvgpu_log_fn(g, "removed");
return err; return err;
} }

View File

@@ -140,7 +140,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
WARN_ON(!ptr); WARN_ON(!ptr);
data = ptr[w]; data = ptr[w];
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data); nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
#endif #endif
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
u32 value; u32 value;
@@ -177,7 +177,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
memcpy(dest, src, size); memcpy(dest, src, size);
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
if (size) if (size)
gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
src, *dest, size); src, *dest, size);
#endif #endif
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
@@ -215,7 +215,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
WARN_ON(!ptr); WARN_ON(!ptr);
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data); nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
#endif #endif
ptr[w] = data; ptr[w] = data;
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
@@ -249,7 +249,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
WARN_ON(!mem->cpu_va); WARN_ON(!mem->cpu_va);
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
if (size) if (size)
gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
dest, *src, size); dest, *src, size);
#endif #endif
memcpy(dest, src, size); memcpy(dest, src, size);
@@ -296,7 +296,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
WARN_ON(!mem->cpu_va); WARN_ON(!mem->cpu_va);
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
if (size) if (size)
gk20a_dbg(gpu_dbg_mem, " %p = 0x%x [times %d]", nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x [times %d]",
dest, c, size); dest, c, size);
#endif #endif
memset(dest, c, size); memset(dest, c, size);

View File

@@ -551,6 +551,7 @@ static void gk20a_tegra_scale_init(struct device *dev)
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a_scale_profile *profile = platform->g->scale_profile; struct gk20a_scale_profile *profile = platform->g->scale_profile;
struct gk20a_emc_params *emc_params; struct gk20a_emc_params *emc_params;
struct gk20a *g = platform->g;
if (!profile) if (!profile)
return; return;
@@ -568,7 +569,7 @@ static void gk20a_tegra_scale_init(struct device *dev)
#ifdef CONFIG_TEGRA_BWMGR #ifdef CONFIG_TEGRA_BWMGR
emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU); emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU);
if (!emc_params->bwmgr_cl) { if (!emc_params->bwmgr_cl) {
gk20a_dbg_info("%s Missing GPU BWMGR client\n", __func__); nvgpu_log_info(g, "%s Missing GPU BWMGR client\n", __func__);
return; return;
} }
#endif #endif
@@ -767,6 +768,7 @@ static int gk20a_tegra_probe(struct device *dev)
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
bool joint_xpu_rail = false; bool joint_xpu_rail = false;
int ret; int ret;
struct gk20a *g = platform->g;
#ifdef CONFIG_COMMON_CLK #ifdef CONFIG_COMMON_CLK
/* DVFS is not guaranteed to be initialized at the time of probe on /* DVFS is not guaranteed to be initialized at the time of probe on
@@ -775,13 +777,13 @@ static int gk20a_tegra_probe(struct device *dev)
if (!platform->gpu_rail) { if (!platform->gpu_rail) {
platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME); platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME);
if (!platform->gpu_rail) { if (!platform->gpu_rail) {
gk20a_dbg_info("deferring probe no gpu_rail\n"); nvgpu_log_info(g, "deferring probe no gpu_rail");
return -EPROBE_DEFER; return -EPROBE_DEFER;
} }
} }
if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) { if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) {
gk20a_dbg_info("deferring probe gpu_rail not ready\n"); nvgpu_log_info(g, "deferring probe gpu_rail not ready");
return -EPROBE_DEFER; return -EPROBE_DEFER;
} }
#endif #endif
@@ -798,7 +800,7 @@ static int gk20a_tegra_probe(struct device *dev)
#endif #endif
if (joint_xpu_rail) { if (joint_xpu_rail) {
gk20a_dbg_info("XPU rails are joint\n"); nvgpu_log_info(g, "XPU rails are joint\n");
platform->g->can_railgate = false; platform->g->can_railgate = false;
} }

View File

@@ -273,11 +273,11 @@ void gp10b_tegra_prescale(struct device *dev)
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
u32 avg = 0; u32 avg = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_pmu_load_norm(g, &avg); nvgpu_pmu_load_norm(g, &avg);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
void gp10b_tegra_postscale(struct device *pdev, void gp10b_tegra_postscale(struct device *pdev,
@@ -288,7 +288,7 @@ void gp10b_tegra_postscale(struct device *pdev,
struct gk20a *g = get_gk20a(pdev); struct gk20a *g = get_gk20a(pdev);
unsigned long emc_rate; unsigned long emc_rate;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (profile && !platform->is_railgated(pdev)) { if (profile && !platform->is_railgated(pdev)) {
unsigned long emc_scale; unsigned long emc_scale;
@@ -306,7 +306,7 @@ void gp10b_tegra_postscale(struct device *pdev,
(struct tegra_bwmgr_client *)profile->private_data, (struct tegra_bwmgr_client *)profile->private_data,
emc_rate, TEGRA_BWMGR_SET_EMC_FLOOR); emc_rate, TEGRA_BWMGR_SET_EMC_FLOOR);
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
long gp10b_round_clk_rate(struct device *dev, unsigned long rate) long gp10b_round_clk_rate(struct device *dev, unsigned long rate)
@@ -328,6 +328,7 @@ int gp10b_clk_get_freqs(struct device *dev,
unsigned long **freqs, int *num_freqs) unsigned long **freqs, int *num_freqs)
{ {
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
unsigned long max_rate; unsigned long max_rate;
unsigned long new_rate = 0, prev_rate = 0; unsigned long new_rate = 0, prev_rate = 0;
int i = 0, freq_counter = 0; int i = 0, freq_counter = 0;
@@ -358,7 +359,7 @@ int gp10b_clk_get_freqs(struct device *dev,
*freqs = gp10b_freq_table; *freqs = gp10b_freq_table;
*num_freqs = freq_counter; *num_freqs = freq_counter;
gk20a_dbg_info("min rate: %ld max rate: %ld num_of_freq %d\n", nvgpu_log_info(g, "min rate: %ld max rate: %ld num_of_freq %d\n",
gp10b_freq_table[0], max_rate, *num_freqs); gp10b_freq_table[0], max_rate, *num_freqs);
return 0; return 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -37,10 +37,11 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
size_t size, loff_t *off) size_t size, loff_t *off)
{ {
struct gk20a_sched_ctrl *sched = filp->private_data; struct gk20a_sched_ctrl *sched = filp->private_data;
struct gk20a *g = sched->g;
struct nvgpu_sched_event_arg event = { 0 }; struct nvgpu_sched_event_arg event = { 0 };
int err; int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched,
"filp=%p buf=%p size=%zu", filp, buf, size); "filp=%p buf=%p size=%zu", filp, buf, size);
if (size < sizeof(event)) if (size < sizeof(event))
@@ -77,9 +78,10 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
{ {
struct gk20a_sched_ctrl *sched = filp->private_data; struct gk20a_sched_ctrl *sched = filp->private_data;
struct gk20a *g = sched->g;
unsigned int mask = 0; unsigned int mask = 0;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
nvgpu_mutex_acquire(&sched->status_lock); nvgpu_mutex_acquire(&sched->status_lock);
poll_wait(filp, &sched->readout_wq.wq, wait); poll_wait(filp, &sched->readout_wq.wq, wait);
@@ -93,7 +95,9 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
struct nvgpu_sched_get_tsgs_args *arg) struct nvgpu_sched_get_tsgs_args *arg)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", struct gk20a *g = sched->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
arg->size, arg->buffer); arg->size, arg->buffer);
if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -115,7 +119,9 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched,
struct nvgpu_sched_get_tsgs_args *arg) struct nvgpu_sched_get_tsgs_args *arg)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", struct gk20a *g = sched->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
arg->size, arg->buffer); arg->size, arg->buffer);
if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -139,7 +145,8 @@ static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched,
static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
struct nvgpu_sched_get_tsgs_by_pid_args *arg) struct nvgpu_sched_get_tsgs_by_pid_args *arg)
{ {
struct fifo_gk20a *f = &sched->g->fifo; struct gk20a *g = sched->g;
struct fifo_gk20a *f = &g->fifo;
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
u64 *bitmap; u64 *bitmap;
unsigned int tsgid; unsigned int tsgid;
@@ -147,7 +154,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
pid_t tgid = (pid_t)arg->pid; pid_t tgid = (pid_t)arg->pid;
int err = 0; int err = 0;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx", nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx",
(pid_t)arg->pid, arg->size, arg->buffer); (pid_t)arg->pid, arg->size, arg->buffer);
if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -186,7 +193,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
u32 tsgid = arg->tsgid; u32 tsgid = arg->tsgid;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
if (tsgid >= f->num_channels) if (tsgid >= f->num_channels)
return -EINVAL; return -EINVAL;
@@ -221,7 +228,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
u32 tsgid = arg->tsgid; u32 tsgid = arg->tsgid;
int err; int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
if (tsgid >= f->num_channels) if (tsgid >= f->num_channels)
return -EINVAL; return -EINVAL;
@@ -256,7 +263,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
u32 tsgid = arg->tsgid; u32 tsgid = arg->tsgid;
int err; int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
if (tsgid >= f->num_channels) if (tsgid >= f->num_channels)
return -EINVAL; return -EINVAL;
@@ -283,7 +290,9 @@ done:
static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); struct gk20a *g = sched->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
nvgpu_mutex_acquire(&sched->control_lock); nvgpu_mutex_acquire(&sched->control_lock);
sched->control_locked = true; sched->control_locked = true;
@@ -293,7 +302,9 @@ static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched)
static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); struct gk20a *g = sched->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
nvgpu_mutex_acquire(&sched->control_lock); nvgpu_mutex_acquire(&sched->control_lock);
sched->control_locked = false; sched->control_locked = false;
@@ -304,7 +315,9 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched, static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched,
struct nvgpu_sched_api_version_args *args) struct nvgpu_sched_api_version_args *args)
{ {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); struct gk20a *g = sched->g;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
args->version = NVGPU_SCHED_API_VERSION; args->version = NVGPU_SCHED_API_VERSION;
return 0; return 0;
@@ -318,7 +331,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
u32 tsgid = arg->tsgid; u32 tsgid = arg->tsgid;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
if (tsgid >= f->num_channels) if (tsgid >= f->num_channels)
return -EINVAL; return -EINVAL;
@@ -355,7 +368,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
u32 tsgid = arg->tsgid; u32 tsgid = arg->tsgid;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
if (tsgid >= f->num_channels) if (tsgid >= f->num_channels)
return -EINVAL; return -EINVAL;
@@ -390,7 +403,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
return -ENODEV; return -ENODEV;
sched = &l->sched_ctrl; sched = &l->sched_ctrl;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p", g);
if (!sched->sw_ready) { if (!sched->sw_ready) {
err = gk20a_busy(g); err = gk20a_busy(g);
@@ -410,7 +423,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size); memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size);
filp->private_data = sched; filp->private_data = sched;
gk20a_dbg(gpu_dbg_sched, "filp=%p sched=%p", filp, sched); nvgpu_log(g, gpu_dbg_sched, "filp=%p sched=%p", filp, sched);
free_ref: free_ref:
if (err) if (err)
@@ -426,7 +439,7 @@ long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd,
u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE];
int err = 0; int err = 0;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd)); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd));
if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) || if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) || (_IOC_NR(cmd) == 0) ||
@@ -509,7 +522,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
unsigned int tsgid; unsigned int tsgid;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched);
/* release any reference to TSGs */ /* release any reference to TSGs */
for (tsgid = 0; tsgid < f->num_channels; tsgid++) { for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
@@ -535,7 +548,7 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
struct gk20a_sched_ctrl *sched = &l->sched_ctrl; struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
int err; int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
if (!sched->sw_ready) { if (!sched->sw_ready) {
err = gk20a_busy(g); err = gk20a_busy(g);
@@ -560,7 +573,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct gk20a_sched_ctrl *sched = &l->sched_ctrl; struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
nvgpu_mutex_acquire(&sched->status_lock); nvgpu_mutex_acquire(&sched->status_lock);
NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap);
@@ -592,7 +605,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
sched->bitmap_size = roundup(f->num_channels, 64) / 8; sched->bitmap_size = roundup(f->num_channels, 64) / 8;
sched->status = 0; sched->status = 0;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu",
g, sched, sched->bitmap_size); g, sched, sched->bitmap_size);
sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size); sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);

View File

@@ -33,7 +33,7 @@ static unsigned long vgpu_clk_get_rate(struct gk20a *g, u32 api_domain)
int err; int err;
unsigned long ret = 0; unsigned long ret = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
switch (api_domain) { switch (api_domain) {
case CTRL_CLK_DOMAIN_GPCCLK: case CTRL_CLK_DOMAIN_GPCCLK:
@@ -65,7 +65,7 @@ static int vgpu_clk_set_rate(struct gk20a *g,
struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
int err = -EINVAL; int err = -EINVAL;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
switch (api_domain) { switch (api_domain) {
case CTRL_CLK_DOMAIN_GPCCLK: case CTRL_CLK_DOMAIN_GPCCLK:
@@ -121,7 +121,7 @@ int vgpu_clk_get_freqs(struct device *dev,
unsigned int i; unsigned int i;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE; msg.cmd = TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE;
msg.handle = vgpu_get_handle(g); msg.handle = vgpu_get_handle(g);
@@ -152,7 +152,7 @@ int vgpu_clk_cap_rate(struct device *dev, unsigned long rate)
struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE; msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE;
msg.handle = vgpu_get_handle(g); msg.handle = vgpu_get_handle(g);

View File

@@ -86,7 +86,7 @@ static int vgpu_css_init_snapshot_buffer(struct gr_gk20a *gr)
int err; int err;
u64 size; u64 size;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (data->hw_snapshot) if (data->hw_snapshot)
return 0; return 0;
@@ -125,6 +125,7 @@ fail:
void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr) void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr)
{ {
struct gk20a_cs_snapshot *data = gr->cs_data; struct gk20a_cs_snapshot *data = gr->cs_data;
struct gk20a *g = gr->g;
if (!data->hw_snapshot) if (!data->hw_snapshot)
return; return;
@@ -135,7 +136,7 @@ void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr)
vgpu_ivm_mempool_unreserve(css_cookie); vgpu_ivm_mempool_unreserve(css_cookie);
css_cookie = NULL; css_cookie = NULL;
gk20a_dbg_info("cyclestats(vgpu): buffer for snapshots released\n"); nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n");
} }
int vgpu_css_flush_snapshots(struct channel_gk20a *ch, int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
@@ -148,7 +149,7 @@ int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
struct gk20a_cs_snapshot *data = gr->cs_data; struct gk20a_cs_snapshot *data = gr->cs_data;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
msg.handle = vgpu_get_handle(g); msg.handle = vgpu_get_handle(g);
@@ -176,7 +177,7 @@ static int vgpu_css_attach(struct channel_gk20a *ch,
&msg.params.cyclestats_snapshot; &msg.params.cyclestats_snapshot;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
msg.handle = vgpu_get_handle(g); msg.handle = vgpu_get_handle(g);
@@ -203,7 +204,7 @@ int vgpu_css_detach(struct channel_gk20a *ch,
&msg.params.cyclestats_snapshot; &msg.params.cyclestats_snapshot;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
msg.handle = vgpu_get_handle(g); msg.handle = vgpu_get_handle(g);

View File

@@ -46,7 +46,7 @@ int vgpu_fecs_trace_init(struct gk20a *g)
u32 mempool; u32 mempool;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
vcst = nvgpu_kzalloc(g, sizeof(*vcst)); vcst = nvgpu_kzalloc(g, sizeof(*vcst));
if (!vcst) if (!vcst)

View File

@@ -142,7 +142,7 @@ int vgpu_pm_prepare_poweroff(struct device *dev)
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
int ret = 0; int ret = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!g->power_on) if (!g->power_on)
return 0; return 0;
@@ -162,7 +162,7 @@ int vgpu_pm_finalize_poweron(struct device *dev)
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (g->power_on) if (g->power_on)
return 0; return 0;
@@ -227,7 +227,7 @@ static int vgpu_qos_notify(struct notifier_block *nb,
u32 max_freq; u32 max_freq;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS); max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
err = vgpu_clk_cap_rate(profile->dev, max_freq); err = vgpu_clk_cap_rate(profile->dev, max_freq);
@@ -277,7 +277,7 @@ static int vgpu_pm_init(struct device *dev)
int num_freqs; int num_freqs;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (nvgpu_platform_is_simulation(g)) if (nvgpu_platform_is_simulation(g))
return 0; return 0;
@@ -321,14 +321,15 @@ int vgpu_probe(struct platform_device *pdev)
return -ENODATA; return -ENODATA;
} }
gk20a_dbg_fn("");
l = kzalloc(sizeof(*l), GFP_KERNEL); l = kzalloc(sizeof(*l), GFP_KERNEL);
if (!l) { if (!l) {
dev_err(dev, "couldn't allocate gk20a support"); dev_err(dev, "couldn't allocate gk20a support");
return -ENOMEM; return -ENOMEM;
} }
gk20a = &l->g; gk20a = &l->g;
nvgpu_log_fn(gk20a, " ");
nvgpu_init_gk20a(gk20a); nvgpu_init_gk20a(gk20a);
nvgpu_kmem_init(gk20a); nvgpu_kmem_init(gk20a);
@@ -428,7 +429,7 @@ int vgpu_probe(struct platform_device *pdev)
vgpu_create_sysfs(dev); vgpu_create_sysfs(dev);
gk20a_init_gr(gk20a); gk20a_init_gr(gk20a);
gk20a_dbg_info("total ram pages : %lu", totalram_pages); nvgpu_log_info(gk20a, "total ram pages : %lu", totalram_pages);
gk20a->gr.max_comptag_mem = totalram_pages gk20a->gr.max_comptag_mem = totalram_pages
>> (10 - (PAGE_SHIFT - 10)); >> (10 - (PAGE_SHIFT - 10));
@@ -442,7 +443,7 @@ int vgpu_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
vgpu_pm_qos_remove(dev); vgpu_pm_qos_remove(dev);
if (g->remove_support) if (g->remove_support)

View File

@@ -88,8 +88,9 @@ int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
u64 *offset) u64 *offset)
{ {
struct nvgpu_mapped_buf *mapped_buffer; struct nvgpu_mapped_buf *mapped_buffer;
struct gk20a *g = gk20a_from_vm(vm);
gk20a_dbg_fn("gpu_va=0x%llx", gpu_va); nvgpu_log_fn(g, "gpu_va=0x%llx", gpu_va);
nvgpu_mutex_acquire(&vm->update_gmmu_lock); nvgpu_mutex_acquire(&vm->update_gmmu_lock);

View File

@@ -394,7 +394,7 @@ int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space)
{ {
struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator; struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!nvgpu_alloc_initialized(allocator)) if (!nvgpu_alloc_initialized(allocator))
return -ENOSYS; return -ENOSYS;

View File

@@ -296,7 +296,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
struct pci_ext_data_struct *pci_ext_data; struct pci_ext_data_struct *pci_ext_data;
pci_rom = (struct pci_exp_rom *)&g->bios.data[offset]; pci_rom = (struct pci_exp_rom *)&g->bios.data[offset];
gk20a_dbg_fn("pci rom sig %04x ptr %04x block %x", nvgpu_log_fn(g, "pci rom sig %04x ptr %04x block %x",
pci_rom->sig, pci_rom->pci_data_struct_ptr, pci_rom->sig, pci_rom->pci_data_struct_ptr,
pci_rom->size_of_block); pci_rom->size_of_block);
@@ -309,7 +309,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
pci_data = pci_data =
(struct pci_data_struct *) (struct pci_data_struct *)
&g->bios.data[offset + pci_rom->pci_data_struct_ptr]; &g->bios.data[offset + pci_rom->pci_data_struct_ptr];
gk20a_dbg_fn("pci data sig %08x len %d image len %x type %x last %d max %08x", nvgpu_log_fn(g, "pci data sig %08x len %d image len %x type %x last %d max %08x",
pci_data->sig, pci_data->pci_data_struct_len, pci_data->sig, pci_data->pci_data_struct_len,
pci_data->image_len, pci_data->code_type, pci_data->image_len, pci_data->code_type,
pci_data->last_image, pci_data->last_image,
@@ -322,7 +322,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
pci_data->pci_data_struct_len + pci_data->pci_data_struct_len +
0xf) 0xf)
& ~0xf]; & ~0xf];
gk20a_dbg_fn("pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x", nvgpu_log_fn(g, "pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x",
pci_ext_data->sig, pci_ext_data->sig,
pci_ext_data->nv_pci_data_ext_rev, pci_ext_data->nv_pci_data_ext_rev,
pci_ext_data->nv_pci_data_ext_len, pci_ext_data->nv_pci_data_ext_len,
@@ -330,7 +330,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
pci_ext_data->priv_last_image, pci_ext_data->priv_last_image,
pci_ext_data->flags); pci_ext_data->flags);
gk20a_dbg_fn("expansion rom offset %x", nvgpu_log_fn(g, "expansion rom offset %x",
pci_data->image_len * 512); pci_data->image_len * 512);
g->bios.expansion_rom_offset = g->bios.expansion_rom_offset =
pci_data->image_len * 512; pci_data->image_len * 512;
@@ -342,7 +342,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
} }
} }
gk20a_dbg_info("read bios"); nvgpu_log_info(g, "read bios");
for (i = 0; i < g->bios.size - 6; i++) { for (i = 0; i < g->bios.size - 6; i++) {
if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID && if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID &&
nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) { nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) {
@@ -362,7 +362,7 @@ static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset)
struct biosdata biosdata; struct biosdata biosdata;
memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata)); memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata));
gk20a_dbg_fn("bios version %x, oem version %x", nvgpu_log_fn(g, "bios version %x, oem version %x",
biosdata.version, biosdata.version,
biosdata.oem_version); biosdata.oem_version);
@@ -375,9 +375,9 @@ static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, int offset)
struct nvinit_ptrs nvinit_ptrs; struct nvinit_ptrs nvinit_ptrs;
memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs)); memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs));
gk20a_dbg_fn("devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr, nvgpu_log_fn(g, "devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr,
nvinit_ptrs.devinit_tables_size); nvinit_ptrs.devinit_tables_size);
gk20a_dbg_fn("bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr, nvgpu_log_fn(g, "bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr,
nvinit_ptrs.bootscripts_size); nvinit_ptrs.bootscripts_size);
g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr]; g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr];
@@ -449,7 +449,7 @@ static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
struct devinit_engine_interface interface; struct devinit_engine_interface interface;
memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface)); memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface));
gk20a_dbg_fn("devinit version %x tables phys %x script phys %x size %d", nvgpu_log_fn(g, "devinit version %x tables phys %x script phys %x size %d",
interface.version, interface.version,
interface.tables_phys_base, interface.tables_phys_base,
interface.script_phys_base, interface.script_phys_base,
@@ -468,7 +468,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
memcpy(&hdr, &g->bios.data[offset], sizeof(hdr)); memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
gk20a_dbg_fn("appInfoHdr ver %d size %d entrySize %d entryCount %d", nvgpu_log_fn(g, "appInfoHdr ver %d size %d entrySize %d entryCount %d",
hdr.version, hdr.header_size, hdr.version, hdr.header_size,
hdr.entry_size, hdr.entry_count); hdr.entry_size, hdr.entry_count);
@@ -481,7 +481,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
memcpy(&entry, &g->bios.data[offset], sizeof(entry)); memcpy(&entry, &g->bios.data[offset], sizeof(entry));
gk20a_dbg_fn("appInfo id %d dmem_offset %d", nvgpu_log_fn(g, "appInfo id %d dmem_offset %d",
entry.id, entry.dmem_offset); entry.id, entry.dmem_offset);
if (entry.id == APPINFO_ID_DEVINIT) if (entry.id == APPINFO_ID_DEVINIT)
@@ -530,26 +530,26 @@ static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g,
memcpy(&desc, &udesc, sizeof(udesc.v2)); memcpy(&desc, &udesc, sizeof(udesc.v2));
break; break;
default: default:
gk20a_dbg_info("invalid version"); nvgpu_log_info(g, "invalid version");
return -EINVAL; return -EINVAL;
} }
gk20a_dbg_info("falcon ucode desc version %x len %x", version, desc_size); nvgpu_log_info(g, "falcon ucode desc version %x len %x", version, desc_size);
gk20a_dbg_info("falcon ucode desc stored size %x uncompressed size %x", nvgpu_log_info(g, "falcon ucode desc stored size %x uncompressed size %x",
desc.stored_size, desc.uncompressed_size); desc.stored_size, desc.uncompressed_size);
gk20a_dbg_info("falcon ucode desc virtualEntry %x, interfaceOffset %x", nvgpu_log_info(g, "falcon ucode desc virtualEntry %x, interfaceOffset %x",
desc.virtual_entry, desc.interface_offset); desc.virtual_entry, desc.interface_offset);
gk20a_dbg_info("falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x", nvgpu_log_info(g, "falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x",
desc.imem_phys_base, desc.imem_load_size, desc.imem_phys_base, desc.imem_load_size,
desc.imem_virt_base, desc.imem_sec_base, desc.imem_virt_base, desc.imem_sec_base,
desc.imem_sec_size); desc.imem_sec_size);
gk20a_dbg_info("falcon ucode DMEM offset %x phys base %x, load size %x", nvgpu_log_info(g, "falcon ucode DMEM offset %x phys base %x, load size %x",
desc.dmem_offset, desc.dmem_phys_base, desc.dmem_offset, desc.dmem_phys_base,
desc.dmem_load_size); desc.dmem_load_size);
if (desc.stored_size != desc.uncompressed_size) { if (desc.stored_size != desc.uncompressed_size) {
gk20a_dbg_info("does not match"); nvgpu_log_info(g, "does not match");
return -EINVAL; return -EINVAL;
} }
@@ -575,7 +575,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
int i; int i;
memcpy(&hdr, &g->bios.data[offset], sizeof(hdr)); memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
gk20a_dbg_fn("falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d", nvgpu_log_fn(g, "falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d",
hdr.version, hdr.header_size, hdr.version, hdr.header_size,
hdr.entry_size, hdr.entry_count, hdr.entry_size, hdr.entry_count,
hdr.desc_version, hdr.desc_size); hdr.desc_version, hdr.desc_size);
@@ -590,7 +590,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
memcpy(&entry, &g->bios.data[offset], sizeof(entry)); memcpy(&entry, &g->bios.data[offset], sizeof(entry));
gk20a_dbg_fn("falcon ucode table entry appid %x targetId %x descPtr %x", nvgpu_log_fn(g, "falcon ucode table entry appid %x targetId %x descPtr %x",
entry.application_id, entry.target_id, entry.application_id, entry.target_id,
entry.desc_ptr); entry.desc_ptr);
@@ -638,7 +638,7 @@ static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset)
int err; int err;
memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data)); memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data));
gk20a_dbg_fn("falcon ucode table ptr %x", nvgpu_log_fn(g, "falcon ucode table ptr %x",
falcon_data.falcon_ucode_table_ptr); falcon_data.falcon_ucode_table_ptr);
err = nvgpu_bios_parse_falcon_ucode_table(g, err = nvgpu_bios_parse_falcon_ucode_table(g,
falcon_data.falcon_ucode_table_ptr); falcon_data.falcon_ucode_table_ptr);
@@ -676,7 +676,7 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
if (table_id < (ptoken->data_size/data_size)) { if (table_id < (ptoken->data_size/data_size)) {
gk20a_dbg_info("Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x", nvgpu_log_info(g, "Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x",
(ptoken->data_ptr + (ptoken->data_ptr +
(table_id * data_size)), (table_id * data_size)),
perf_table_id_offset); perf_table_id_offset);
@@ -705,18 +705,18 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
struct bit_token bit_token; struct bit_token bit_token;
int i; int i;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
memcpy(&bit, &g->bios.data[offset], sizeof(bit)); memcpy(&bit, &g->bios.data[offset], sizeof(bit));
gk20a_dbg_info("BIT header: %04x %08x", bit.id, bit.signature); nvgpu_log_info(g, "BIT header: %04x %08x", bit.id, bit.signature);
gk20a_dbg_info("tokens: %d entries * %d bytes", nvgpu_log_info(g, "tokens: %d entries * %d bytes",
bit.token_entries, bit.token_size); bit.token_entries, bit.token_size);
offset += bit.header_size; offset += bit.header_size;
for (i = 0; i < bit.token_entries; i++) { for (i = 0; i < bit.token_entries; i++) {
memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token)); memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token));
gk20a_dbg_info("BIT token id %d ptr %d size %d ver %d", nvgpu_log_info(g, "BIT token id %d ptr %d size %d ver %d",
bit_token.token_id, bit_token.data_ptr, bit_token.token_id, bit_token.data_ptr,
bit_token.data_size, bit_token.data_version); bit_token.data_size, bit_token.data_version);
@@ -753,7 +753,7 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
offset += bit.token_size; offset += bit.token_size;
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset) static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset)

View File

@@ -50,21 +50,21 @@
static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr) static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr)
{ {
gk20a_dbg(gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); nvgpu_log(g, gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n");
return ce2_intr_status_nonblockpipe_pending_f(); return ce2_intr_status_nonblockpipe_pending_f();
} }
static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr) static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr)
{ {
gk20a_dbg(gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); nvgpu_log(g, gpu_dbg_intr, "ce2 blocking pipe interrupt\n");
return ce2_intr_status_blockpipe_pending_f(); return ce2_intr_status_blockpipe_pending_f();
} }
static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr) static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr)
{ {
gk20a_dbg(gpu_dbg_intr, "ce2 launch error interrupt\n"); nvgpu_log(g, gpu_dbg_intr, "ce2 launch error interrupt\n");
return ce2_intr_status_launcherr_pending_f(); return ce2_intr_status_launcherr_pending_f();
} }
@@ -74,7 +74,7 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r());
u32 clear_intr = 0; u32 clear_intr = 0;
gk20a_dbg(gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr);
/* clear blocking interrupts: they exibit broken behavior */ /* clear blocking interrupts: they exibit broken behavior */
if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) if (ce2_intr & ce2_intr_status_blockpipe_pending_f())
@@ -92,7 +92,7 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
int ops = 0; int ops = 0;
u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r());
gk20a_dbg(gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr);
if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) { if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) {
gk20a_writel(g, ce2_intr_status_r(), gk20a_writel(g, ce2_intr_status_r(),
@@ -340,7 +340,7 @@ int gk20a_init_ce_support(struct gk20a *g)
return 0; return 0;
} }
gk20a_dbg(gpu_dbg_fn, "ce: init"); nvgpu_log(g, gpu_dbg_fn, "ce: init");
err = nvgpu_mutex_init(&ce_app->app_mutex); err = nvgpu_mutex_init(&ce_app->app_mutex);
if (err) if (err)
@@ -355,7 +355,7 @@ int gk20a_init_ce_support(struct gk20a *g)
ce_app->app_state = NVGPU_CE_ACTIVE; ce_app->app_state = NVGPU_CE_ACTIVE;
nvgpu_mutex_release(&ce_app->app_mutex); nvgpu_mutex_release(&ce_app->app_mutex);
gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished"); nvgpu_log(g, gpu_dbg_cde_ctx, "ce: init finished");
return 0; return 0;
} }

View File

@@ -116,7 +116,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g->ops.mm.init_inst_block(&c->inst_block, c->vm, g->ops.mm.init_inst_block(&c->inst_block, c->vm,
c->vm->gmmu_page_sizes[gmmu_page_size_big]); c->vm->gmmu_page_sizes[gmmu_page_size_big]);
@@ -208,7 +208,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt) void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(ch->g, " ");
if (gk20a_is_channel_marked_as_tsg(ch)) if (gk20a_is_channel_marked_as_tsg(ch))
return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt); return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt);
@@ -291,7 +291,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
struct dbg_session_channel_data *ch_data, *tmp; struct dbg_session_channel_data *ch_data, *tmp;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
WARN_ON(ch->g == NULL); WARN_ON(ch->g == NULL);
@@ -351,7 +351,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
/* if engine reset was deferred, perform it now */ /* if engine reset was deferred, perform it now */
nvgpu_mutex_acquire(&f->deferred_reset_mutex); nvgpu_mutex_acquire(&f->deferred_reset_mutex);
if (g->fifo.deferred_reset_pending) { if (g->fifo.deferred_reset_pending) {
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was"
" deferred, running now"); " deferred, running now");
/* if lock is already taken, a reset is taking place /* if lock is already taken, a reset is taking place
so no need to repeat */ so no need to repeat */
@@ -365,7 +365,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
if (!gk20a_channel_as_bound(ch)) if (!gk20a_channel_as_bound(ch))
goto unbind; goto unbind;
gk20a_dbg_info("freeing bound channel context, timeout=%ld", nvgpu_log_info(g, "freeing bound channel context, timeout=%ld",
timeout); timeout);
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_GK20A_CTXSW_TRACE
@@ -626,7 +626,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
runlist_id = gk20a_fifo_get_gr_runlist_id(g); runlist_id = gk20a_fifo_get_gr_runlist_id(g);
} }
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
ch = allocate_channel(f); ch = allocate_channel(f);
if (ch == NULL) { if (ch == NULL) {
@@ -765,7 +765,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
u32 free_count; u32 free_count;
u32 size = orig_size; u32 size = orig_size;
gk20a_dbg_fn("size %d", orig_size); nvgpu_log_fn(c->g, "size %d", orig_size);
if (!e) { if (!e) {
nvgpu_err(c->g, nvgpu_err(c->g,
@@ -779,7 +779,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
if (q->put + size > q->size) if (q->put + size > q->size)
size = orig_size + (q->size - q->put); size = orig_size + (q->size - q->put);
gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d",
c->chid, q->get, q->put); c->chid, q->get, q->put);
free_count = (q->size - (q->put - q->get) - 1) % q->size; free_count = (q->size - (q->put - q->get) - 1) % q->size;
@@ -812,7 +812,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
nvgpu_smp_wmb(); nvgpu_smp_wmb();
e->valid = true; e->valid = true;
gk20a_dbg_fn("done"); nvgpu_log_fn(c->g, "done");
return 0; return 0;
} }
@@ -1132,7 +1132,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
c->gpfifo.entry_num = gpfifo_size; c->gpfifo.entry_num = gpfifo_size;
c->gpfifo.get = c->gpfifo.put = 0; c->gpfifo.get = c->gpfifo.put = 0;
gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", nvgpu_log_info(g, "channel %d : gpfifo_base 0x%016llx, size %d",
c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num);
g->ops.fifo.setup_userd(c); g->ops.fifo.setup_userd(c);
@@ -1184,7 +1184,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
g->ops.fifo.bind_channel(c); g->ops.fifo.bind_channel(c);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
clean_up_priv_cmd: clean_up_priv_cmd:
@@ -1400,7 +1400,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
u64 pb_get; u64 pb_get;
u64 new_pb_get; u64 new_pb_get;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* Get status and clear the timer */ /* Get status and clear the timer */
nvgpu_raw_spinlock_acquire(&ch->timeout.lock); nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
@@ -1480,7 +1480,7 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g)
*/ */
static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch) static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(ch->g, " ");
gk20a_channel_clean_up_jobs(ch, true); gk20a_channel_clean_up_jobs(ch, true);
@@ -1499,7 +1499,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g)
{ {
int put; int put;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* /*
* Currently, the only work type is associated with a lock, which deals * Currently, the only work type is associated with a lock, which deals
@@ -1596,7 +1596,7 @@ static int gk20a_channel_poll_worker(void *arg)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
int get = 0; int get = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_timeout_init(g, &timeout, watchdog_interval, nvgpu_timeout_init(g, &timeout, watchdog_interval,
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
@@ -1699,7 +1699,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* /*
* Warn if worker thread cannot run * Warn if worker thread cannot run
@@ -2142,12 +2142,12 @@ int gk20a_channel_suspend(struct gk20a *g)
bool channels_in_use = false; bool channels_in_use = false;
u32 active_runlist_ids = 0; u32 active_runlist_ids = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = &f->channel[chid]; struct channel_gk20a *ch = &f->channel[chid];
if (gk20a_channel_get(ch)) { if (gk20a_channel_get(ch)) {
gk20a_dbg_info("suspend channel %d", chid); nvgpu_log_info(g, "suspend channel %d", chid);
/* disable channel */ /* disable channel */
gk20a_disable_channel_tsg(g, ch); gk20a_disable_channel_tsg(g, ch);
/* preempt the channel */ /* preempt the channel */
@@ -2175,7 +2175,7 @@ int gk20a_channel_suspend(struct gk20a *g)
} }
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -2186,11 +2186,11 @@ int gk20a_channel_resume(struct gk20a *g)
bool channels_in_use = false; bool channels_in_use = false;
u32 active_runlist_ids = 0; u32 active_runlist_ids = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
if (gk20a_channel_get(&f->channel[chid])) { if (gk20a_channel_get(&f->channel[chid])) {
gk20a_dbg_info("resume channel %d", chid); nvgpu_log_info(g, "resume channel %d", chid);
g->ops.fifo.bind_channel(&f->channel[chid]); g->ops.fifo.bind_channel(&f->channel[chid]);
channels_in_use = true; channels_in_use = true;
active_runlist_ids |= BIT(f->channel[chid].runlist_id); active_runlist_ids |= BIT(f->channel[chid].runlist_id);
@@ -2201,7 +2201,7 @@ int gk20a_channel_resume(struct gk20a *g)
if (channels_in_use) if (channels_in_use)
gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true); gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -2210,7 +2210,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
u32 chid; u32 chid;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* /*
* Ensure that all pending writes are actually done before trying to * Ensure that all pending writes are actually done before trying to

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Cycle stats snapshots support (subsystem for gr_gk20a). * GK20A Cycle stats snapshots support (subsystem for gr_gk20a).
* *
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -189,7 +189,7 @@ int css_hw_enable_snapshot(struct channel_gk20a *ch,
perf_pmasys_mem_block_valid_true_f() | perf_pmasys_mem_block_valid_true_f() |
perf_pmasys_mem_block_target_lfb_f()); perf_pmasys_mem_block_target_lfb_f());
gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n");
return 0; return 0;
@@ -227,7 +227,7 @@ void css_hw_disable_snapshot(struct gr_gk20a *gr)
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
data->hw_snapshot = NULL; data->hw_snapshot = NULL;
gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n");
} }
static void css_gr_free_shared_data(struct gr_gk20a *gr) static void css_gr_free_shared_data(struct gr_gk20a *gr)

View File

@@ -90,8 +90,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
{ {
struct dbg_session_data *session_data; struct dbg_session_data *session_data;
struct dbg_session_gk20a *dbg_s; struct dbg_session_gk20a *dbg_s;
struct gk20a *g = ch->g;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
/* guard against the session list being modified */ /* guard against the session list being modified */
nvgpu_mutex_acquire(&ch->dbg_s_lock); nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -100,9 +101,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
dbg_session_data, dbg_s_entry) { dbg_session_data, dbg_s_entry) {
dbg_s = session_data->dbg_s; dbg_s = session_data->dbg_s;
if (dbg_s->dbg_events.events_enabled) { if (dbg_s->dbg_events.events_enabled) {
gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d", nvgpu_log(g, gpu_dbg_gpu_dbg, "posting event on session id %d",
dbg_s->id); dbg_s->id);
gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending",
dbg_s->dbg_events.num_pending_events); dbg_s->dbg_events.num_pending_events);
dbg_s->dbg_events.num_pending_events++; dbg_s->dbg_events.num_pending_events++;
@@ -119,8 +120,9 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
struct dbg_session_data *session_data; struct dbg_session_data *session_data;
struct dbg_session_gk20a *dbg_s; struct dbg_session_gk20a *dbg_s;
bool broadcast = false; bool broadcast = false;
struct gk20a *g = ch->g;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
/* guard against the session list being modified */ /* guard against the session list being modified */
nvgpu_mutex_acquire(&ch->dbg_s_lock); nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -129,7 +131,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
dbg_session_data, dbg_s_entry) { dbg_session_data, dbg_s_entry) {
dbg_s = session_data->dbg_s; dbg_s = session_data->dbg_s;
if (dbg_s->broadcast_stop_trigger) { if (dbg_s->broadcast_stop_trigger) {
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr,
"stop trigger broadcast enabled"); "stop trigger broadcast enabled");
broadcast = true; broadcast = true;
break; break;
@@ -145,8 +147,9 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
{ {
struct dbg_session_data *session_data; struct dbg_session_data *session_data;
struct dbg_session_gk20a *dbg_s; struct dbg_session_gk20a *dbg_s;
struct gk20a *g = ch->g;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
/* guard against the session list being modified */ /* guard against the session list being modified */
nvgpu_mutex_acquire(&ch->dbg_s_lock); nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -155,7 +158,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
dbg_session_data, dbg_s_entry) { dbg_session_data, dbg_s_entry) {
dbg_s = session_data->dbg_s; dbg_s = session_data->dbg_s;
if (dbg_s->broadcast_stop_trigger) { if (dbg_s->broadcast_stop_trigger) {
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr,
"stop trigger broadcast disabled"); "stop trigger broadcast disabled");
dbg_s->broadcast_stop_trigger = false; dbg_s->broadcast_stop_trigger = false;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A memory interface * GK20A memory interface
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,7 @@ void fb_gk20a_reset(struct gk20a *g)
{ {
u32 val; u32 val;
gk20a_dbg_info("reset gk20a fb"); nvgpu_log_info(g, "reset gk20a fb");
g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
mc_enable_l2_enabled_f() | mc_enable_l2_enabled_f() |
@@ -63,7 +63,7 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
u32 addr_lo; u32 addr_lo;
u32 data; u32 data;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* pagetables are considered sw states which are preserved after /* pagetables are considered sw states which are preserved after
prepare_poweroff. When gk20a deinit releases those pagetables, prepare_poweroff. When gk20a deinit releases those pagetables,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -137,7 +137,7 @@ static int gk20a_fecs_trace_get_write_index(struct gk20a *g)
static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index) static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index)
{ {
gk20a_dbg(gpu_dbg_ctxsw, "set read=%d", index); nvgpu_log(g, gpu_dbg_ctxsw, "set read=%d", index);
return gr_gk20a_elpg_protected_call(g, return gr_gk20a_elpg_protected_call(g,
(gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0)); (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0));
} }
@@ -148,12 +148,12 @@ void gk20a_fecs_trace_hash_dump(struct gk20a *g)
struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace_hash_ent *ent;
struct gk20a_fecs_trace *trace = g->fecs_trace; struct gk20a_fecs_trace *trace = g->fecs_trace;
gk20a_dbg(gpu_dbg_ctxsw, "dumping hash table"); nvgpu_log(g, gpu_dbg_ctxsw, "dumping hash table");
nvgpu_mutex_acquire(&trace->hash_lock); nvgpu_mutex_acquire(&trace->hash_lock);
hash_for_each(trace->pid_hash_table, bkt, ent, node) hash_for_each(trace->pid_hash_table, bkt, ent, node)
{ {
gk20a_dbg(gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", nvgpu_log(g, gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d",
ent, bkt, ent->context_ptr, ent->pid); ent, bkt, ent->context_ptr, ent->pid);
} }
@@ -165,7 +165,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid
struct gk20a_fecs_trace_hash_ent *he; struct gk20a_fecs_trace_hash_ent *he;
struct gk20a_fecs_trace *trace = g->fecs_trace; struct gk20a_fecs_trace *trace = g->fecs_trace;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
"adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid); "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid);
he = nvgpu_kzalloc(g, sizeof(*he)); he = nvgpu_kzalloc(g, sizeof(*he));
@@ -190,7 +190,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr)
struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace_hash_ent *ent;
struct gk20a_fecs_trace *trace = g->fecs_trace; struct gk20a_fecs_trace *trace = g->fecs_trace;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
"freeing hash entry context_ptr=%x", context_ptr); "freeing hash entry context_ptr=%x", context_ptr);
nvgpu_mutex_acquire(&trace->hash_lock); nvgpu_mutex_acquire(&trace->hash_lock);
@@ -198,7 +198,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr)
context_ptr) { context_ptr) {
if (ent->context_ptr == context_ptr) { if (ent->context_ptr == context_ptr) {
hash_del(&ent->node); hash_del(&ent->node);
gk20a_dbg(gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_ctxsw,
"freed hash entry=%p context_ptr=%x", ent, "freed hash entry=%p context_ptr=%x", ent,
ent->context_ptr); ent->context_ptr);
nvgpu_kfree(g, ent); nvgpu_kfree(g, ent);
@@ -215,7 +215,7 @@ static void gk20a_fecs_trace_free_hash_table(struct gk20a *g)
struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace_hash_ent *ent;
struct gk20a_fecs_trace *trace = g->fecs_trace; struct gk20a_fecs_trace *trace = g->fecs_trace;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace);
nvgpu_mutex_acquire(&trace->hash_lock); nvgpu_mutex_acquire(&trace->hash_lock);
hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) {
@@ -235,7 +235,7 @@ static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr)
nvgpu_mutex_acquire(&trace->hash_lock); nvgpu_mutex_acquire(&trace->hash_lock);
hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) {
if (ent->context_ptr == context_ptr) { if (ent->context_ptr == context_ptr) {
gk20a_dbg(gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_ctxsw,
"found context_ptr=%x -> pid=%d", "found context_ptr=%x -> pid=%d",
ent->context_ptr, ent->pid); ent->context_ptr, ent->pid);
pid = ent->pid; pid = ent->pid;
@@ -265,7 +265,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record( struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record(
trace, index); trace, index);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
"consuming record trace=%p read=%d record=%p", trace, index, r); "consuming record trace=%p read=%d record=%p", trace, index, r);
if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) {
@@ -284,7 +284,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr); cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr);
new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr); new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
"context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)", "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)",
r->context_ptr, cur_pid, r->new_context_ptr, new_pid); r->context_ptr, cur_pid, r->new_context_ptr, new_pid);
@@ -298,7 +298,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]); entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]);
entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT;
gk20a_dbg(gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_ctxsw,
"tag=%x timestamp=%llx context_id=%08x new_context_id=%08x", "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x",
entry.tag, entry.timestamp, r->context_id, entry.tag, entry.timestamp, r->context_id,
r->new_context_id); r->new_context_id);
@@ -327,7 +327,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
continue; continue;
} }
gk20a_dbg(gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld",
entry.tag, entry.context_id, entry.pid); entry.tag, entry.context_id, entry.pid);
if (!entry.context_id) if (!entry.context_id)
@@ -368,7 +368,7 @@ int gk20a_fecs_trace_poll(struct gk20a *g)
if (!cnt) if (!cnt)
goto done; goto done;
gk20a_dbg(gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_ctxsw,
"circular buffer: read=%d (mailbox=%d) write=%d cnt=%d", "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d",
read, gk20a_fecs_trace_get_read_index(g), write, cnt); read, gk20a_fecs_trace_get_read_index(g), write, cnt);
@@ -633,7 +633,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
pid_t pid; pid_t pid;
u32 aperture; u32 aperture;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
"chid=%d context_ptr=%x inst_block=%llx", "chid=%d context_ptr=%x inst_block=%llx",
ch->chid, context_ptr, ch->chid, context_ptr,
nvgpu_inst_block_addr(g, &ch->inst_block)); nvgpu_inst_block_addr(g, &ch->inst_block));
@@ -662,7 +662,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
lo = u64_lo32(pa); lo = u64_lo32(pa);
hi = u64_hi32(pa); hi = u64_hi32(pa);
gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, nvgpu_log(g, gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi,
lo, GK20A_FECS_TRACE_NUM_RECORDS); lo, GK20A_FECS_TRACE_NUM_RECORDS);
nvgpu_mem_wr(g, mem, nvgpu_mem_wr(g, mem,
@@ -696,7 +696,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch)
u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch);
if (g->fecs_trace) { if (g->fecs_trace) {
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
"ch=%p context_ptr=%x", ch, context_ptr); "ch=%p context_ptr=%x", ch, context_ptr);
if (g->ops.fecs_trace.is_enabled(g)) { if (g->ops.fecs_trace.is_enabled(g)) {
@@ -711,7 +711,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch)
int gk20a_fecs_trace_reset(struct gk20a *g) int gk20a_fecs_trace_reset(struct gk20a *g)
{ {
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
if (!g->ops.fecs_trace.is_enabled(g)) if (!g->ops.fecs_trace.is_enabled(g))
return 0; return 0;

View File

@@ -94,7 +94,7 @@ u32 gk20a_fifo_get_engine_ids(struct gk20a *g,
engine_id[instance_cnt] = active_engine_id; engine_id[instance_cnt] = active_engine_id;
++instance_cnt; ++instance_cnt;
} else { } else {
gk20a_dbg_info("warning engine_id table sz is small %d", nvgpu_log_info(g, "warning engine_id table sz is small %d",
engine_id_sz); engine_id_sz);
} }
} }
@@ -320,7 +320,7 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
{ {
int ret = ENGINE_INVAL_GK20A; int ret = ENGINE_INVAL_GK20A;
gk20a_dbg_info("engine type %d", engine_type); nvgpu_log_info(g, "engine type %d", engine_type);
if (engine_type == top_device_info_type_enum_graphics_v()) if (engine_type == top_device_info_type_enum_graphics_v())
ret = ENGINE_GR_GK20A; ret = ENGINE_GR_GK20A;
else if ((engine_type >= top_device_info_type_enum_copy0_v()) && else if ((engine_type >= top_device_info_type_enum_copy0_v()) &&
@@ -354,7 +354,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
u32 gr_runlist_id = ~0; u32 gr_runlist_id = ~0;
bool found_pbdma_for_runlist = false; bool found_pbdma_for_runlist = false;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
f->num_engines = 0; f->num_engines = 0;
@@ -367,7 +367,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
if (top_device_info_engine_v(table_entry)) { if (top_device_info_engine_v(table_entry)) {
engine_id = engine_id =
top_device_info_engine_enum_v(table_entry); top_device_info_engine_enum_v(table_entry);
gk20a_dbg_info("info: engine_id %d", nvgpu_log_info(g, "info: engine_id %d",
top_device_info_engine_enum_v(table_entry)); top_device_info_engine_enum_v(table_entry));
} }
@@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
if (top_device_info_runlist_v(table_entry)) { if (top_device_info_runlist_v(table_entry)) {
runlist_id = runlist_id =
top_device_info_runlist_enum_v(table_entry); top_device_info_runlist_enum_v(table_entry);
gk20a_dbg_info("gr info: runlist_id %d", runlist_id); nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id);
runlist_bit = BIT(runlist_id); runlist_bit = BIT(runlist_id);
@@ -384,7 +384,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
pbdma_id++) { pbdma_id++) {
if (f->pbdma_map[pbdma_id] & if (f->pbdma_map[pbdma_id] &
runlist_bit) { runlist_bit) {
gk20a_dbg_info( nvgpu_log_info(g,
"gr info: pbdma_map[%d]=%d", "gr info: pbdma_map[%d]=%d",
pbdma_id, pbdma_id,
f->pbdma_map[pbdma_id]); f->pbdma_map[pbdma_id]);
@@ -402,13 +402,13 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
if (top_device_info_intr_v(table_entry)) { if (top_device_info_intr_v(table_entry)) {
intr_id = intr_id =
top_device_info_intr_enum_v(table_entry); top_device_info_intr_enum_v(table_entry);
gk20a_dbg_info("gr info: intr_id %d", intr_id); nvgpu_log_info(g, "gr info: intr_id %d", intr_id);
} }
if (top_device_info_reset_v(table_entry)) { if (top_device_info_reset_v(table_entry)) {
reset_id = reset_id =
top_device_info_reset_enum_v(table_entry); top_device_info_reset_enum_v(table_entry);
gk20a_dbg_info("gr info: reset_id %d", nvgpu_log_info(g, "gr info: reset_id %d",
reset_id); reset_id);
} }
} else if (entry == top_device_info_entry_engine_type_v()) { } else if (entry == top_device_info_entry_engine_type_v()) {
@@ -538,7 +538,7 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
struct gk20a *g = f->g; struct gk20a *g = f->g;
unsigned int i = 0; unsigned int i = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_channel_worker_deinit(g); nvgpu_channel_worker_deinit(g);
/* /*
@@ -616,7 +616,7 @@ static void fifo_pbdma_exception_status(struct gk20a *g,
get_exception_pbdma_info(g, eng_info); get_exception_pbdma_info(g, eng_info);
e = &eng_info->pbdma_exception_info; e = &eng_info->pbdma_exception_info;
gk20a_dbg_fn("pbdma_id %d, " nvgpu_log_fn(g, "pbdma_id %d, "
"id_type %s, id %d, chan_status %d, " "id_type %s, id %d, chan_status %d, "
"next_id_type %s, next_id %d, " "next_id_type %s, next_id %d, "
"chsw_in_progress %d", "chsw_in_progress %d",
@@ -657,7 +657,7 @@ static void fifo_engine_exception_status(struct gk20a *g,
get_exception_engine_info(g, eng_info); get_exception_engine_info(g, eng_info);
e = &eng_info->engine_exception_info; e = &eng_info->engine_exception_info;
gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, " nvgpu_log_fn(g, "engine_id %d, id_type %s, id %d, ctx_status %d, "
"faulted %d, idle %d, ctxsw_in_progress %d, ", "faulted %d, idle %d, ctxsw_in_progress %d, ",
eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid",
e->id, e->ctx_status_v, e->id, e->ctx_status_v,
@@ -745,7 +745,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
clean_up_runlist: clean_up_runlist:
gk20a_fifo_delete_runlist(f); gk20a_fifo_delete_runlist(f);
gk20a_dbg_fn("fail"); nvgpu_log_fn(g, "fail");
return -ENOMEM; return -ENOMEM;
} }
@@ -784,7 +784,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
unsigned int i; unsigned int i;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* enable pmc pfifo */ /* enable pmc pfifo */
g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); g->ops.mc.reset(g, mc_enable_pfifo_enabled_f());
@@ -805,7 +805,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
timeout = gk20a_readl(g, fifo_fb_timeout_r()); timeout = gk20a_readl(g, fifo_fb_timeout_r());
timeout = set_field(timeout, fifo_fb_timeout_period_m(), timeout = set_field(timeout, fifo_fb_timeout_period_m(),
fifo_fb_timeout_period_max_f()); fifo_fb_timeout_period_max_f());
gk20a_dbg_info("fifo_fb_timeout reg val = 0x%08x", timeout); nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout);
gk20a_writel(g, fifo_fb_timeout_r(), timeout); gk20a_writel(g, fifo_fb_timeout_r(), timeout);
/* write pbdma timeout value */ /* write pbdma timeout value */
@@ -813,7 +813,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
timeout = gk20a_readl(g, pbdma_timeout_r(i)); timeout = gk20a_readl(g, pbdma_timeout_r(i));
timeout = set_field(timeout, pbdma_timeout_period_m(), timeout = set_field(timeout, pbdma_timeout_period_m(),
pbdma_timeout_period_max_f()); pbdma_timeout_period_max_f());
gk20a_dbg_info("pbdma_timeout reg val = 0x%08x", timeout); nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", timeout);
gk20a_writel(g, pbdma_timeout_r(i), timeout); gk20a_writel(g, pbdma_timeout_r(i), timeout);
} }
if (g->ops.fifo.apply_pb_timeout) if (g->ops.fifo.apply_pb_timeout)
@@ -837,10 +837,10 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall);
gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i,
~pbdma_intr_en_0_lbreq_enabled_f()); ~pbdma_intr_en_0_lbreq_enabled_f());
gk20a_writel(g, pbdma_intr_en_1_r(i), gk20a_writel(g, pbdma_intr_en_1_r(i),
~pbdma_intr_en_0_lbreq_enabled_f()); ~pbdma_intr_en_0_lbreq_enabled_f());
@@ -852,12 +852,12 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
/* clear and enable pfifo interrupt */ /* clear and enable pfifo interrupt */
gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
mask = gk20a_fifo_intr_0_en_mask(g); mask = gk20a_fifo_intr_0_en_mask(g);
gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
gk20a_writel(g, fifo_intr_en_0_r(), mask); gk20a_writel(g, fifo_intr_en_0_r(), mask);
gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -868,7 +868,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
unsigned int chid, i; unsigned int chid, i;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
f->g = g; f->g = g;
@@ -945,7 +945,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
goto clean_up; goto clean_up;
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
clean_up: clean_up:
@@ -972,10 +972,10 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
u64 userd_base; u64 userd_base;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (f->sw_ready) { if (f->sw_ready) {
gk20a_dbg_fn("skip init"); nvgpu_log_fn(g, "skip init");
return 0; return 0;
} }
@@ -997,7 +997,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
nvgpu_err(g, "userd memory allocation failed"); nvgpu_err(g, "userd memory allocation failed");
goto clean_up; goto clean_up;
} }
gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); nvgpu_log(g, gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
userd_base = nvgpu_mem_get_addr(g, &f->userd); userd_base = nvgpu_mem_get_addr(g, &f->userd);
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
@@ -1013,11 +1013,11 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
f->sw_ready = true; f->sw_ready = true;
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
clean_up: clean_up:
gk20a_dbg_fn("fail"); nvgpu_log_fn(g, "fail");
if (nvgpu_mem_is_valid(&f->userd)) { if (nvgpu_mem_is_valid(&f->userd)) {
if (g->ops.mm.is_bar1_supported(g)) if (g->ops.mm.is_bar1_supported(g))
nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd);
@@ -1032,7 +1032,7 @@ void gk20a_fifo_handle_runlist_event(struct gk20a *g)
{ {
u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
gk20a_dbg(gpu_dbg_intr, "runlist event %08x", nvgpu_log(g, gpu_dbg_intr, "runlist event %08x",
runlist_event); runlist_event);
gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
@@ -1042,7 +1042,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* test write, read through bar1 @ userd region before /* test write, read through bar1 @ userd region before
* turning on the snooping */ * turning on the snooping */
@@ -1053,7 +1053,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
u32 bar1_vaddr = f->userd.gpu_va; u32 bar1_vaddr = f->userd.gpu_va;
volatile u32 *cpu_vaddr = f->userd.cpu_va; volatile u32 *cpu_vaddr = f->userd.cpu_va;
gk20a_dbg_info("test bar1 @ vaddr 0x%x", nvgpu_log_info(g, "test bar1 @ vaddr 0x%x",
bar1_vaddr); bar1_vaddr);
v = gk20a_bar1_readl(g, bar1_vaddr); v = gk20a_bar1_readl(g, bar1_vaddr);
@@ -1093,7 +1093,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
fifo_bar1_base_valid_true_f()); fifo_bar1_base_valid_true_f());
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -1261,7 +1261,7 @@ void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
u32 fault_info; u32 fault_info;
u32 addr_lo, addr_hi; u32 addr_lo, addr_hi;
gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id);
memset(mmfault, 0, sizeof(*mmfault)); memset(mmfault, 0, sizeof(*mmfault));
@@ -1291,7 +1291,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
u32 engine_enum = ENGINE_INVAL_GK20A; u32 engine_enum = ENGINE_INVAL_GK20A;
struct fifo_engine_info_gk20a *engine_info; struct fifo_engine_info_gk20a *engine_info;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!g) if (!g)
return; return;
@@ -1489,7 +1489,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid];
struct channel_gk20a *ch; struct channel_gk20a *ch;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g->ops.fifo.disable_tsg(tsg); g->ops.fifo.disable_tsg(tsg);
@@ -1556,7 +1556,7 @@ static bool gk20a_fifo_handle_mmu_fault(
bool verbose = true; bool verbose = true;
u32 grfifo_ctl; u32 grfifo_ctl;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g->fifo.deferred_reset_pending = false; g->fifo.deferred_reset_pending = false;
@@ -1693,7 +1693,7 @@ static bool gk20a_fifo_handle_mmu_fault(
/* handled during channel free */ /* handled during channel free */
g->fifo.deferred_reset_pending = true; g->fifo.deferred_reset_pending = true;
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
"sm debugger attached," "sm debugger attached,"
" deferring channel recovery to channel free"); " deferring channel recovery to channel free");
} else { } else {
@@ -2196,6 +2196,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
struct channel_gk20a *ch; struct channel_gk20a *ch;
bool recover = false; bool recover = false;
bool progress = false; bool progress = false;
struct gk20a *g = tsg->g;
*verbose = false; *verbose = false;
*ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
@@ -2221,7 +2222,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
* this resets timeout for channels that already completed their work * this resets timeout for channels that already completed their work
*/ */
if (progress) { if (progress) {
gk20a_dbg_info("progress on tsg=%d ch=%d", nvgpu_log_info(g, "progress on tsg=%d ch=%d",
tsg->tsgid, ch->chid); tsg->tsgid, ch->chid);
gk20a_channel_put(ch); gk20a_channel_put(ch);
*ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
@@ -2239,7 +2240,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
* caused the problem, so set timeout error notifier for all channels. * caused the problem, so set timeout error notifier for all channels.
*/ */
if (recover) { if (recover) {
gk20a_dbg_info("timeout on tsg=%d ch=%d", nvgpu_log_info(g, "timeout on tsg=%d ch=%d",
tsg->tsgid, ch->chid); tsg->tsgid, ch->chid);
*ms = ch->timeout_accumulated_ms; *ms = ch->timeout_accumulated_ms;
gk20a_channel_put(ch); gk20a_channel_put(ch);
@@ -2311,7 +2312,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g)
is_tsg, true, verbose, is_tsg, true, verbose,
RC_TYPE_CTXSW_TIMEOUT); RC_TYPE_CTXSW_TIMEOUT);
} else { } else {
gk20a_dbg_info( nvgpu_log_info(g,
"fifo is waiting for ctx switch for %d ms, " "fifo is waiting for ctx switch for %d ms, "
"%s=%d", ms, is_tsg ? "tsg" : "ch", id); "%s=%d", ms, is_tsg ? "tsg" : "ch", id);
} }
@@ -2330,7 +2331,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
bool print_channel_reset_log = false; bool print_channel_reset_log = false;
u32 handled = 0; u32 handled = 0;
gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr);
if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
/* pio mode is unused. this shouldn't happen, ever. */ /* pio mode is unused. this shouldn't happen, ever. */
@@ -2381,7 +2382,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
engine_id++) { engine_id++) {
u32 active_engine_id = g->fifo.active_engines_list[engine_id]; u32 active_engine_id = g->fifo.active_engines_list[engine_id];
u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_enum, nvgpu_log_fn(g, "enum:%d -> engine_id:%d", engine_enum,
active_engine_id); active_engine_id);
fifo_pbdma_exception_status(g, fifo_pbdma_exception_status(g,
&g->fifo.engine_info[active_engine_id]); &g->fifo.engine_info[active_engine_id]);
@@ -2632,7 +2633,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
for (i = 0; i < host_num_pbdma; i++) { for (i = 0; i < host_num_pbdma; i++) {
if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) {
gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i);
clear_intr |= clear_intr |=
gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES);
} }
@@ -2653,7 +2654,7 @@ void gk20a_fifo_isr(struct gk20a *g)
* in a threaded interrupt context... */ * in a threaded interrupt context... */
nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex);
gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
/* handle runlist update */ /* handle runlist update */
if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
@@ -2681,7 +2682,7 @@ int gk20a_fifo_nonstall_isr(struct gk20a *g)
u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
u32 clear_intr = 0; u32 clear_intr = 0;
gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) if (fifo_intr & fifo_intr_0_channel_intr_pending_f())
clear_intr = fifo_intr_0_channel_intr_pending_f(); clear_intr = fifo_intr_0_channel_intr_pending_f();
@@ -2769,7 +2770,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
int ret; int ret;
unsigned int id_type; unsigned int id_type;
gk20a_dbg_fn("%d", id); nvgpu_log_fn(g, "%d", id);
/* issue preempt */ /* issue preempt */
gk20a_fifo_issue_preempt(g, id, is_tsg); gk20a_fifo_issue_preempt(g, id, is_tsg);
@@ -2794,7 +2795,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
u32 mutex_ret = 0; u32 mutex_ret = 0;
u32 i; u32 i;
gk20a_dbg_fn("%d", chid); nvgpu_log_fn(g, "%d", chid);
/* we have no idea which runlist we are using. lock all */ /* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++) for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2821,7 +2822,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
u32 mutex_ret = 0; u32 mutex_ret = 0;
u32 i; u32 i;
gk20a_dbg_fn("%d", tsgid); nvgpu_log_fn(g, "%d", tsgid);
/* we have no idea which runlist we are using. lock all */ /* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++) for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2938,7 +2939,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
u32 mutex_ret; u32 mutex_ret;
u32 err = 0; u32 err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gr_stat = gr_stat =
gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
@@ -2988,12 +2989,12 @@ clean_up:
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (err) { if (err) {
gk20a_dbg_fn("failed"); nvgpu_log_fn(g, "failed");
if (gk20a_fifo_enable_engine_activity(g, eng_info)) if (gk20a_fifo_enable_engine_activity(g, eng_info))
nvgpu_err(g, nvgpu_err(g,
"failed to enable gr engine activity"); "failed to enable gr engine activity");
} else { } else {
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
return err; return err;
} }
@@ -3129,8 +3130,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
bool skip_next = false; bool skip_next = false;
u32 tsgid, count = 0; u32 tsgid, count = 0;
u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32);
struct gk20a *g = f->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* for each TSG, T, on this level, insert all higher-level channels /* for each TSG, T, on this level, insert all higher-level channels
and TSGs before inserting T. */ and TSGs before inserting T. */
@@ -3156,9 +3158,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
return NULL; return NULL;
/* add TSG entry */ /* add TSG entry */
gk20a_dbg_info("add TSG %d to runlist", tsg->tsgid); nvgpu_log_info(g, "add TSG %d to runlist", tsg->tsgid);
f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry);
gk20a_dbg_info("tsg runlist count %d runlist [0] %x [1] %x\n", nvgpu_log_info(g, "tsg runlist count %d runlist [0] %x [1] %x\n",
count, runlist_entry[0], runlist_entry[1]); count, runlist_entry[0], runlist_entry[1]);
runlist_entry += runlist_entry_words; runlist_entry += runlist_entry_words;
count++; count++;
@@ -3177,10 +3179,10 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
return NULL; return NULL;
} }
gk20a_dbg_info("add channel %d to runlist", nvgpu_log_info(g, "add channel %d to runlist",
ch->chid); ch->chid);
f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry);
gk20a_dbg_info( nvgpu_log_info(g,
"run list count %d runlist [0] %x [1] %x\n", "run list count %d runlist [0] %x [1] %x\n",
count, runlist_entry[0], runlist_entry[1]); count, runlist_entry[0], runlist_entry[1]);
count++; count++;
@@ -3222,7 +3224,7 @@ int gk20a_fifo_set_runlist_interleave(struct gk20a *g,
u32 runlist_id, u32 runlist_id,
u32 new_level) u32 new_level)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g->fifo.tsg[id].interleave_level = new_level; g->fifo.tsg[id].interleave_level = new_level;
@@ -3313,7 +3315,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]);
gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx",
runlist_id, (u64)runlist_iova); runlist_id, (u64)runlist_iova);
if (!runlist_iova) { if (!runlist_iova) {
@@ -3445,7 +3447,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
u32 mutex_ret; u32 mutex_ret;
u32 ret = 0; u32 ret = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
runlist = &f->runlist_info[runlist_id]; runlist = &f->runlist_info[runlist_id];
@@ -3465,7 +3467,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
int gk20a_fifo_suspend(struct gk20a *g) int gk20a_fifo_suspend(struct gk20a *g)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* stop bar1 snooping */ /* stop bar1 snooping */
if (g->ops.mm.is_bar1_supported(g)) if (g->ops.mm.is_bar1_supported(g))
@@ -3476,7 +3478,7 @@ int gk20a_fifo_suspend(struct gk20a *g)
gk20a_writel(g, fifo_intr_en_0_r(), 0); gk20a_writel(g, fifo_intr_en_0_r(), 0);
gk20a_writel(g, fifo_intr_en_1_r(), 0); gk20a_writel(g, fifo_intr_en_1_r(), 0);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -3511,7 +3513,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g)
int ret = -ETIMEDOUT; int ret = -ETIMEDOUT;
u32 i, host_num_engines; u32 i, host_num_engines;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
host_num_engines = host_num_engines =
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
@@ -3533,12 +3535,12 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g)
} while (!nvgpu_timeout_expired(&timeout)); } while (!nvgpu_timeout_expired(&timeout));
if (ret) { if (ret) {
gk20a_dbg_info("cannot idle engine %u", i); nvgpu_log_info(g, "cannot idle engine %u", i);
break; break;
} }
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return ret; return ret;
} }
@@ -3839,7 +3841,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a)
{ {
struct gk20a *g = ch_gk20a->g; struct gk20a *g = ch_gk20a->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
@@ -3854,12 +3856,12 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
u32 addr_hi; u32 addr_hi;
struct gk20a *g = c->g; struct gk20a *g = c->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
addr_hi = u64_hi32(c->userd_iova); addr_hi = u64_hi32(c->userd_iova);
gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx",
c->chid, (u64)c->userd_iova); c->chid, (u64)c->userd_iova);
nvgpu_mem_wr32(g, &c->inst_block, nvgpu_mem_wr32(g, &c->inst_block,
@@ -3885,7 +3887,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
struct gk20a *g = c->g; struct gk20a *g = c->g;
struct nvgpu_mem *mem = &c->inst_block; struct nvgpu_mem *mem = &c->inst_block;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
@@ -3946,7 +3948,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
struct gk20a *g = c->g; struct gk20a *g = c->g;
struct nvgpu_mem *mem = &c->inst_block; struct nvgpu_mem *mem = &c->inst_block;
gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid); nvgpu_log_info(g, "channel %d : set ramfc privileged_channel", c->chid);
/* Enable HCE priv mode for phys mode transfer */ /* Enable HCE priv mode for phys mode transfer */
nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
@@ -3959,7 +3961,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
struct nvgpu_mem *mem; struct nvgpu_mem *mem;
u32 offset; u32 offset;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (nvgpu_mem_is_valid(&c->usermode_userd)) { if (nvgpu_mem_is_valid(&c->usermode_userd)) {
mem = &c->usermode_userd; mem = &c->usermode_userd;
@@ -3987,16 +3989,16 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
{ {
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); err = g->ops.mm.alloc_inst_block(g, &ch->inst_block);
if (err) if (err)
return err; return err;
gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", nvgpu_log_info(g, "channel %d inst block physical addr: 0x%16llx",
ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block));
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -4086,7 +4088,7 @@ void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g,
struct priv_cmd_entry *cmd, u32 off, struct priv_cmd_entry *cmd, u32 off,
u32 id, u32 thresh, u64 gpu_va) u32 id, u32 thresh, u64 gpu_va)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
off = cmd->off + off; off = cmd->off + off;
/* syncpoint_a */ /* syncpoint_a */
@@ -4115,7 +4117,7 @@ void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g,
{ {
u32 off = cmd->off; u32 off = cmd->off;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (wfi_cmd) { if (wfi_cmd) {
/* wfi */ /* wfi */
nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E);

View File

@@ -77,7 +77,7 @@ int gk20a_detect_chip(struct gk20a *g)
gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev); gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev);
gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n",
g->params.gpu_arch, g->params.gpu_arch,
g->params.gpu_impl, g->params.gpu_impl,
g->params.gpu_rev); g->params.gpu_rev);
@@ -89,7 +89,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
{ {
int ret = 0; int ret = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (g->ops.fifo.channel_suspend) { if (g->ops.fifo.channel_suspend) {
ret = g->ops.fifo.channel_suspend(g); ret = g->ops.fifo.channel_suspend(g);
@@ -126,7 +126,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
u32 nr_pages; u32 nr_pages;
#endif #endif
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (g->power_on) if (g->power_on)
return 0; return 0;
@@ -434,7 +434,7 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount)
struct gk20a *g = container_of(refcount, struct gk20a *g = container_of(refcount,
struct gk20a, refcount); struct gk20a, refcount);
gk20a_dbg(gpu_dbg_shutdown, "Freeing GK20A struct!"); nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!");
gk20a_ce_destroy(g); gk20a_ce_destroy(g);
@@ -465,7 +465,7 @@ struct gk20a * __must_check gk20a_get(struct gk20a *g)
*/ */
success = nvgpu_ref_get_unless_zero(&g->refcount); success = nvgpu_ref_get_unless_zero(&g->refcount);
gk20a_dbg(gpu_dbg_shutdown, "GET: refs currently %d %s", nvgpu_log(g, gpu_dbg_shutdown, "GET: refs currently %d %s",
nvgpu_atomic_read(&g->refcount.refcount), nvgpu_atomic_read(&g->refcount.refcount),
success ? "" : "(FAILED)"); success ? "" : "(FAILED)");
@@ -490,7 +490,7 @@ void gk20a_put(struct gk20a *g)
* ... PUT: refs currently 2 * ... PUT: refs currently 2
* ... Freeing GK20A struct! * ... Freeing GK20A struct!
*/ */
gk20a_dbg(gpu_dbg_shutdown, "PUT: refs currently %d", nvgpu_log(g, gpu_dbg_shutdown, "PUT: refs currently %d",
nvgpu_atomic_read(&g->refcount.refcount)); nvgpu_atomic_read(&g->refcount.refcount));
nvgpu_ref_put(&g->refcount, gk20a_free_cb); nvgpu_ref_put(&g->refcount, gk20a_free_cb);

View File

@@ -1,9 +1,7 @@
/* /*
* drivers/video/tegra/host/gk20a/gr_ctx_gk20a.c
*
* GK20A Graphics Context * GK20A Graphics Context
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -79,7 +77,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
u32 i, major_v = ~0, major_v_hw, netlist_num; u32 i, major_v = ~0, major_v_hw, netlist_num;
int net, max, err = -ENOENT; int net, max, err = -ENOENT;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (g->ops.gr_ctx.is_fw_defined()) { if (g->ops.gr_ctx.is_fw_defined()) {
net = NETLIST_FINAL; net = NETLIST_FINAL;
@@ -114,63 +112,63 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
switch (netlist->regions[i].region_id) { switch (netlist->regions[i].region_id) {
case NETLIST_REGIONID_FECS_UCODE_DATA: case NETLIST_REGIONID_FECS_UCODE_DATA:
gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA"); nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_DATA");
err = gr_gk20a_alloc_load_netlist_u32(g, err = gr_gk20a_alloc_load_netlist_u32(g,
src, size, &g->gr.ctx_vars.ucode.fecs.data); src, size, &g->gr.ctx_vars.ucode.fecs.data);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_FECS_UCODE_INST: case NETLIST_REGIONID_FECS_UCODE_INST:
gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST"); nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_INST");
err = gr_gk20a_alloc_load_netlist_u32(g, err = gr_gk20a_alloc_load_netlist_u32(g,
src, size, &g->gr.ctx_vars.ucode.fecs.inst); src, size, &g->gr.ctx_vars.ucode.fecs.inst);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_GPCCS_UCODE_DATA: case NETLIST_REGIONID_GPCCS_UCODE_DATA:
gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA"); nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_DATA");
err = gr_gk20a_alloc_load_netlist_u32(g, err = gr_gk20a_alloc_load_netlist_u32(g,
src, size, &g->gr.ctx_vars.ucode.gpccs.data); src, size, &g->gr.ctx_vars.ucode.gpccs.data);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_GPCCS_UCODE_INST: case NETLIST_REGIONID_GPCCS_UCODE_INST:
gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST"); nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_INST");
err = gr_gk20a_alloc_load_netlist_u32(g, err = gr_gk20a_alloc_load_netlist_u32(g,
src, size, &g->gr.ctx_vars.ucode.gpccs.inst); src, size, &g->gr.ctx_vars.ucode.gpccs.inst);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_SW_BUNDLE_INIT: case NETLIST_REGIONID_SW_BUNDLE_INIT:
gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT"); nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE_INIT");
err = gr_gk20a_alloc_load_netlist_av(g, err = gr_gk20a_alloc_load_netlist_av(g,
src, size, &g->gr.ctx_vars.sw_bundle_init); src, size, &g->gr.ctx_vars.sw_bundle_init);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_SW_METHOD_INIT: case NETLIST_REGIONID_SW_METHOD_INIT:
gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT"); nvgpu_log_info(g, "NETLIST_REGIONID_SW_METHOD_INIT");
err = gr_gk20a_alloc_load_netlist_av(g, err = gr_gk20a_alloc_load_netlist_av(g,
src, size, &g->gr.ctx_vars.sw_method_init); src, size, &g->gr.ctx_vars.sw_method_init);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_SW_CTX_LOAD: case NETLIST_REGIONID_SW_CTX_LOAD:
gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD"); nvgpu_log_info(g, "NETLIST_REGIONID_SW_CTX_LOAD");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.sw_ctx_load); src, size, &g->gr.ctx_vars.sw_ctx_load);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_SW_NON_CTX_LOAD: case NETLIST_REGIONID_SW_NON_CTX_LOAD:
gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD"); nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOAD");
err = gr_gk20a_alloc_load_netlist_av(g, err = gr_gk20a_alloc_load_netlist_av(g,
src, size, &g->gr.ctx_vars.sw_non_ctx_load); src, size, &g->gr.ctx_vars.sw_non_ctx_load);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_SWVEIDBUNDLEINIT: case NETLIST_REGIONID_SWVEIDBUNDLEINIT:
gk20a_dbg_info( nvgpu_log_info(g,
"NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT");
err = gr_gk20a_alloc_load_netlist_av(g, err = gr_gk20a_alloc_load_netlist_av(g,
src, size, src, size,
@@ -179,56 +177,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_SYS: case NETLIST_REGIONID_CTXREG_SYS:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.sys); src, size, &g->gr.ctx_vars.ctxsw_regs.sys);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_GPC: case NETLIST_REGIONID_CTXREG_GPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); src, size, &g->gr.ctx_vars.ctxsw_regs.gpc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_TPC: case NETLIST_REGIONID_CTXREG_TPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); src, size, &g->gr.ctx_vars.ctxsw_regs.tpc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_ZCULL_GPC: case NETLIST_REGIONID_CTXREG_ZCULL_GPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_PPC: case NETLIST_REGIONID_CTXREG_PPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); src, size, &g->gr.ctx_vars.ctxsw_regs.ppc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_PM_SYS: case NETLIST_REGIONID_CTXREG_PM_SYS:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_SYS");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_PM_GPC: case NETLIST_REGIONID_CTXREG_PM_GPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_GPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_PM_TPC: case NETLIST_REGIONID_CTXREG_PM_TPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_TPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc);
if (err) if (err)
@@ -236,110 +234,110 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
break; break;
case NETLIST_REGIONID_BUFFER_SIZE: case NETLIST_REGIONID_BUFFER_SIZE:
g->gr.ctx_vars.buffer_size = *src; g->gr.ctx_vars.buffer_size = *src;
gk20a_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d", nvgpu_log_info(g, "NETLIST_REGIONID_BUFFER_SIZE : %d",
g->gr.ctx_vars.buffer_size); g->gr.ctx_vars.buffer_size);
break; break;
case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX: case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX:
g->gr.ctx_vars.regs_base_index = *src; g->gr.ctx_vars.regs_base_index = *src;
gk20a_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", nvgpu_log_info(g, "NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u",
g->gr.ctx_vars.regs_base_index); g->gr.ctx_vars.regs_base_index);
break; break;
case NETLIST_REGIONID_MAJORV: case NETLIST_REGIONID_MAJORV:
major_v = *src; major_v = *src;
gk20a_dbg_info("NETLIST_REGIONID_MAJORV : %d", nvgpu_log_info(g, "NETLIST_REGIONID_MAJORV : %d",
major_v); major_v);
break; break;
case NETLIST_REGIONID_NETLIST_NUM: case NETLIST_REGIONID_NETLIST_NUM:
netlist_num = *src; netlist_num = *src;
gk20a_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d", nvgpu_log_info(g, "NETLIST_REGIONID_NETLIST_NUM : %d",
netlist_num); netlist_num);
break; break;
case NETLIST_REGIONID_CTXREG_PMPPC: case NETLIST_REGIONID_CTXREG_PMPPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMPPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_NVPERF_CTXREG_SYS: case NETLIST_REGIONID_NVPERF_CTXREG_SYS:
gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_SYS"); nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_SYS");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: case NETLIST_REGIONID_NVPERF_FBP_CTXREGS:
gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_CTXREGS");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); src, size, &g->gr.ctx_vars.ctxsw_regs.fbp);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_NVPERF_CTXREG_GPC: case NETLIST_REGIONID_NVPERF_CTXREG_GPC:
gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_GPC"); nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_GPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_NVPERF_FBP_ROUTER: case NETLIST_REGIONID_NVPERF_FBP_ROUTER:
gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_ROUTER"); nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_ROUTER");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_NVPERF_GPC_ROUTER: case NETLIST_REGIONID_NVPERF_GPC_ROUTER:
gk20a_dbg_info("NETLIST_REGIONID_NVPERF_GPC_ROUTER"); nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_GPC_ROUTER");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_PMLTC: case NETLIST_REGIONID_CTXREG_PMLTC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMLTC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMLTC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_PMFBPA: case NETLIST_REGIONID_CTXREG_PMFBPA:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMFBPA"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMFBPA");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_NVPERF_SYS_ROUTER: case NETLIST_REGIONID_NVPERF_SYS_ROUTER:
gk20a_dbg_info("NETLIST_REGIONID_NVPERF_SYS_ROUTER"); nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_SYS_ROUTER");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_NVPERF_PMA: case NETLIST_REGIONID_NVPERF_PMA:
gk20a_dbg_info("NETLIST_REGIONID_NVPERF_PMA"); nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMA");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_PMROP: case NETLIST_REGIONID_CTXREG_PMROP:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMROP"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMROP");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_PMUCGPC: case NETLIST_REGIONID_CTXREG_PMUCGPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMUCGPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMUCGPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc);
if (err) if (err)
goto clean_up; goto clean_up;
break; break;
case NETLIST_REGIONID_CTXREG_ETPC: case NETLIST_REGIONID_CTXREG_ETPC:
gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ETPC"); nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC");
err = gr_gk20a_alloc_load_netlist_aiv(g, err = gr_gk20a_alloc_load_netlist_aiv(g,
src, size, &g->gr.ctx_vars.ctxsw_regs.etpc); src, size, &g->gr.ctx_vars.ctxsw_regs.etpc);
if (err) if (err)
@@ -347,13 +345,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
break; break;
default: default:
gk20a_dbg_info("unrecognized region %d skipped", i); nvgpu_log_info(g, "unrecognized region %d skipped", i);
break; break;
} }
} }
if (net != NETLIST_FINAL && major_v != major_v_hw) { if (net != NETLIST_FINAL && major_v != major_v_hw) {
gk20a_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x", nvgpu_log_info(g, "skip %s: major_v 0x%08x doesn't match hw 0x%08x",
name, major_v, major_v_hw); name, major_v, major_v_hw);
goto clean_up; goto clean_up;
} }
@@ -362,7 +360,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
g->gr.netlist = net; g->gr.netlist = net;
nvgpu_release_firmware(g, netlist_fw); nvgpu_release_firmware(g, netlist_fw);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
goto done; goto done;
clean_up: clean_up:
@@ -403,7 +401,7 @@ clean_up:
done: done:
if (g->gr.ctx_vars.valid) { if (g->gr.ctx_vars.valid) {
gk20a_dbg_info("netlist image %s loaded", name); nvgpu_log_info(g, "netlist image %s loaded", name);
return 0; return 0;
} else { } else {
nvgpu_err(g, "failed to load netlist image!!"); nvgpu_err(g, "failed to load netlist image!!");

View File

@@ -1,9 +1,7 @@
/* /*
* drivers/video/tegra/host/gk20a/gr_ctx_sim_gk20a.c
*
* GK20A Graphics Context for Simulation * GK20A Graphics Context for Simulation
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -35,7 +33,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
int err = 0; int err = 0;
u32 i, temp; u32 i, temp;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info,
"querying grctx info from chiplib"); "querying grctx info from chiplib");
g->gr.ctx_vars.dynamic = true; g->gr.ctx_vars.dynamic = true;
@@ -250,7 +248,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
i, &l[i].value); i, &l[i].value);
} }
gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC");
for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) { for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) {
struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l; struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l;
g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR", g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR",
@@ -259,7 +257,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
i, &l[i].index); i, &l[i].index);
g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE", g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE",
i, &l[i].value); i, &l[i].value);
gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn,
"addr:0x%#08x index:0x%08x value:0x%08x", "addr:0x%#08x index:0x%08x value:0x%08x",
l[i].addr, l[i].index, l[i].value); l[i].addr, l[i].index, l[i].value);
} }
@@ -269,7 +267,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0, g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0,
&g->gr.ctx_vars.regs_base_index); &g->gr.ctx_vars.regs_base_index);
gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib");
return 0; return 0;
fail: fail:
nvgpu_err(g, "failed querying grctx info from chiplib"); nvgpu_err(g, "failed querying grctx info from chiplib");

View File

File diff suppressed because it is too large Load Diff

View File

@@ -41,7 +41,7 @@ int gpu_init_hal(struct gk20a *g)
switch (ver) { switch (ver) {
case GK20A_GPUID_GM20B: case GK20A_GPUID_GM20B:
case GK20A_GPUID_GM20B_B: case GK20A_GPUID_GM20B_B:
gk20a_dbg_info("gm20b detected"); nvgpu_log_info(g, "gm20b detected");
if (gm20b_init_hal(g)) if (gm20b_init_hal(g))
return -ENODEV; return -ENODEV;
break; break;

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Master Control * GK20A Master Control
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -40,7 +40,7 @@ void mc_gk20a_isr_stall(struct gk20a *g)
mc_intr_0 = g->ops.mc.intr_stall(g); mc_intr_0 = g->ops.mc.intr_stall(g);
gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); nvgpu_log(g, gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
active_engine_id = g->fifo.active_engines_list[engine_id_idx]; active_engine_id = g->fifo.active_engines_list[engine_id_idx];
@@ -200,7 +200,7 @@ void gk20a_mc_disable(struct gk20a *g, u32 units)
{ {
u32 pmc; u32 pmc;
gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units); nvgpu_log(g, gpu_dbg_info, "pmc disable: %08x\n", units);
nvgpu_spinlock_acquire(&g->mc_enable_lock); nvgpu_spinlock_acquire(&g->mc_enable_lock);
pmc = gk20a_readl(g, mc_enable_r()); pmc = gk20a_readl(g, mc_enable_r());
@@ -213,7 +213,7 @@ void gk20a_mc_enable(struct gk20a *g, u32 units)
{ {
u32 pmc; u32 pmc;
gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units); nvgpu_log(g, gpu_dbg_info, "pmc enable: %08x\n", units);
nvgpu_spinlock_acquire(&g->mc_enable_lock); nvgpu_spinlock_acquire(&g->mc_enable_lock);
pmc = gk20a_readl(g, mc_enable_r()); pmc = gk20a_readl(g, mc_enable_r());

View File

@@ -91,7 +91,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g->ops.fb.set_mmu_page_size(g); g->ops.fb.set_mmu_page_size(g);
if (g->ops.fb.set_use_full_comp_tag_line) if (g->ops.fb.set_use_full_comp_tag_line)
@@ -112,7 +112,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g))
return -EBUSY; return -EBUSY;
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -336,7 +336,7 @@ int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
{ {
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(ch->g, " ");
nvgpu_vm_get(vm); nvgpu_vm_get(vm);
ch->vm = vm; ch->vm = vm;
@@ -357,7 +357,7 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
u32 pdb_addr_hi = u64_hi32(pdb_addr); u32 pdb_addr_hi = u64_hi32(pdb_addr);
gk20a_dbg_info("pde pa=0x%llx", pdb_addr); nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr);
nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
nvgpu_aperture_mask(g, vm->pdb.mem, nvgpu_aperture_mask(g, vm->pdb.mem,
@@ -376,7 +376,7 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
{ {
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p",
nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va);
g->ops.mm.init_pdb(g, inst_block, vm); g->ops.mm.init_pdb(g, inst_block, vm);
@@ -395,7 +395,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
{ {
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
if (err) { if (err) {
@@ -403,7 +403,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
return err; return err;
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -415,7 +415,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
u32 retries; u32 retries;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gk20a_busy_noresume(g); gk20a_busy_noresume(g);
if (!g->power_on) { if (!g->power_on) {
@@ -448,7 +448,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
flush_fb_flush_outstanding_true_v() || flush_fb_flush_outstanding_true_v() ||
flush_fb_flush_pending_v(data) == flush_fb_flush_pending_v(data) ==
flush_fb_flush_pending_busy_v()) { flush_fb_flush_pending_busy_v()) {
gk20a_dbg_info("fb_flush 0x%x", data); nvgpu_log_info(g, "fb_flush 0x%x", data);
nvgpu_udelay(5); nvgpu_udelay(5);
} else } else
break; break;
@@ -494,7 +494,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
flush_l2_system_invalidate_outstanding_true_v() || flush_l2_system_invalidate_outstanding_true_v() ||
flush_l2_system_invalidate_pending_v(data) == flush_l2_system_invalidate_pending_v(data) ==
flush_l2_system_invalidate_pending_busy_v()) { flush_l2_system_invalidate_pending_busy_v()) {
gk20a_dbg_info("l2_system_invalidate 0x%x", nvgpu_log_info(g, "l2_system_invalidate 0x%x",
data); data);
nvgpu_udelay(5); nvgpu_udelay(5);
} else } else
@@ -526,7 +526,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
u32 retries = 2000; u32 retries = 2000;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gk20a_busy_noresume(g); gk20a_busy_noresume(g);
if (!g->power_on) if (!g->power_on)
@@ -553,7 +553,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
flush_l2_flush_dirty_outstanding_true_v() || flush_l2_flush_dirty_outstanding_true_v() ||
flush_l2_flush_dirty_pending_v(data) == flush_l2_flush_dirty_pending_v(data) ==
flush_l2_flush_dirty_pending_busy_v()) { flush_l2_flush_dirty_pending_busy_v()) {
gk20a_dbg_info("l2_flush_dirty 0x%x", data); nvgpu_log_info(g, "l2_flush_dirty 0x%x", data);
nvgpu_udelay(5); nvgpu_udelay(5);
} else } else
break; break;
@@ -578,7 +578,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
u32 retries = 200; u32 retries = 200;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gk20a_busy_noresume(g); gk20a_busy_noresume(g);
if (!g->power_on) if (!g->power_on)
@@ -602,7 +602,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
flush_l2_clean_comptags_outstanding_true_v() || flush_l2_clean_comptags_outstanding_true_v() ||
flush_l2_clean_comptags_pending_v(data) == flush_l2_clean_comptags_pending_v(data) ==
flush_l2_clean_comptags_pending_busy_v()) { flush_l2_clean_comptags_pending_busy_v()) {
gk20a_dbg_info("l2_clean_comptags 0x%x", data); nvgpu_log_info(g, "l2_clean_comptags 0x%x", data);
nvgpu_udelay(5); nvgpu_udelay(5);
} else } else
break; break;

View File

@@ -39,8 +39,8 @@
#include <nvgpu/hw/gk20a/hw_pwr_gk20a.h> #include <nvgpu/hw/gk20a/hw_pwr_gk20a.h>
#include <nvgpu/hw/gk20a/hw_top_gk20a.h> #include <nvgpu/hw/gk20a/hw_top_gk20a.h>
#define gk20a_dbg_pmu(fmt, arg...) \ #define gk20a_dbg_pmu(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
{ {
@@ -139,7 +139,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
u32 intr_mask; u32 intr_mask;
u32 intr_dest; u32 intr_dest;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true,
mc_intr_mask_0_pmu_enabled_f()); mc_intr_mask_0_pmu_enabled_f());
@@ -166,7 +166,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
mc_intr_mask_0_pmu_enabled_f()); mc_intr_mask_0_pmu_enabled_f());
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
@@ -179,7 +179,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
u64 addr_code, addr_data, addr_load; u64 addr_code, addr_data, addr_load;
u32 i, blocks, addr_args; u32 i, blocks, addr_args;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) | gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -286,7 +286,7 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) {
BUG_ON(mutex->ref_cnt == 0); BUG_ON(mutex->ref_cnt == 0);
gk20a_dbg_pmu("already acquired by owner : 0x%08x", *token); gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token);
mutex->ref_cnt++; mutex->ref_cnt++;
return 0; return 0;
} }
@@ -313,12 +313,12 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
if (owner == data) { if (owner == data) {
mutex->ref_cnt = 1; mutex->ref_cnt = 1;
gk20a_dbg_pmu("mutex acquired: id=%d, token=0x%x", gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x",
mutex->index, *token); mutex->index, *token);
*token = owner; *token = owner;
return 0; return 0;
} else { } else {
gk20a_dbg_info("fail to acquire mutex idx=0x%08x", nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x",
mutex->index); mutex->index);
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
@@ -370,7 +370,7 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
pwr_pmu_mutex_id_release_value_f(owner)); pwr_pmu_mutex_id_release_value_f(owner));
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
gk20a_dbg_pmu("mutex released: id=%d, token=0x%x", gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x",
mutex->index, *token); mutex->index, *token);
return 0; return 0;
@@ -475,7 +475,7 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g)
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_mutex_acquire(&pmu->isr_mutex);
nvgpu_flcn_reset(pmu->flcn); nvgpu_flcn_reset(pmu->flcn);
@@ -554,7 +554,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status) void *param, u32 handle, u32 status)
{ {
struct nvgpu_pmu *pmu = param; struct nvgpu_pmu *pmu = param;
gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE");
pmu->zbc_save_done = 1; pmu->zbc_save_done = 1;
} }
@@ -575,7 +575,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
pmu->zbc_save_done = 0; pmu->zbc_save_done = 0;
gk20a_dbg_pmu("cmd post ZBC_TABLE_UPDATE"); gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_zbc_msg, pmu, &seq, ~0); pmu_handle_zbc_msg, pmu, &seq, ~0);
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
@@ -587,18 +587,20 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
struct nv_pmu_therm_msg *msg) struct nv_pmu_therm_msg *msg)
{ {
gk20a_dbg_fn(""); struct gk20a *g = gk20a_from_pmu(pmu);
nvgpu_log_fn(g, " ");
switch (msg->msg_type) { switch (msg->msg_type) {
case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION:
if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1))
nvgpu_clk_arb_send_thermal_alarm(pmu->g); nvgpu_clk_arb_send_thermal_alarm(pmu->g);
else else
gk20a_dbg_pmu("Unwanted/Unregistered thermal event received %d", gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d",
msg->hw_slct_msg.mask); msg->hw_slct_msg.mask);
break; break;
default: default:
gk20a_dbg_pmu("unkown therm event received %d", msg->msg_type); gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type);
break; break;
} }
@@ -609,22 +611,22 @@ void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
{ {
struct gk20a *g = gk20a_from_pmu(pmu); struct gk20a *g = gk20a_from_pmu(pmu);
gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x", gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_supp_r(3): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3)));
gk20a_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_1_supp_r(3): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3)));
gk20a_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", gk20a_dbg_pmu(g, "pwr_pmu_idle_ctrl_supp_r(3): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3)));
gk20a_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x", gk20a_dbg_pmu(g, "pwr_pmu_pg_idle_cnt_r(0): 0x%08x",
gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0)));
gk20a_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x", gk20a_dbg_pmu(g, "pwr_pmu_pg_intren_r(0): 0x%08x",
gk20a_readl(g, pwr_pmu_pg_intren_r(0))); gk20a_readl(g, pwr_pmu_pg_intren_r(0)));
gk20a_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x", gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(3): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_count_r(3))); gk20a_readl(g, pwr_pmu_idle_count_r(3)));
gk20a_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x", gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(4): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_count_r(4))); gk20a_readl(g, pwr_pmu_idle_count_r(4)));
gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x", gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(7): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_count_r(7))); gk20a_readl(g, pwr_pmu_idle_count_r(7)));
} }
@@ -693,7 +695,7 @@ void gk20a_pmu_isr(struct gk20a *g)
u32 intr, mask; u32 intr, mask;
bool recheck = false; bool recheck = false;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_mutex_acquire(&pmu->isr_mutex);
if (!pmu->isr_enabled) { if (!pmu->isr_enabled) {
@@ -706,7 +708,7 @@ void gk20a_pmu_isr(struct gk20a *g)
intr = gk20a_readl(g, pwr_falcon_irqstat_r()); intr = gk20a_readl(g, pwr_falcon_irqstat_r());
gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr); gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr);
intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask;
if (!intr || pmu->pmu_state == PMU_STATE_OFF) { if (!intr || pmu->pmu_state == PMU_STATE_OFF) {

View File

@@ -45,7 +45,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
bus_bar0_window_target_vid_mem_f()) | bus_bar0_window_target_vid_mem_f()) |
bus_bar0_window_base_f(hi); bus_bar0_window_base_f(hi);
gk20a_dbg(gpu_dbg_mem, nvgpu_log(g, gpu_dbg_mem,
"0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)",
hi, lo, mem, sgl, bufbase, hi, lo, mem, sgl, bufbase,
bufbase + nvgpu_sgt_get_phys(g, sgt, sgl), bufbase + nvgpu_sgt_get_phys(g, sgt, sgl),
@@ -67,7 +67,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem,
struct nvgpu_sgl *sgl) struct nvgpu_sgl *sgl)
{ {
gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl); nvgpu_log(g, gpu_dbg_mem, "end for %p,%p", mem, sgl);
nvgpu_spinlock_release(&g->mm.pramin_window_lock); nvgpu_spinlock_release(&g->mm.pramin_window_lock);
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A priv ring * GK20A priv ring
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -66,11 +66,11 @@ void gk20a_priv_ring_isr(struct gk20a *g)
status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r()); status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r());
status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r()); status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r());
gk20a_dbg(gpu_dbg_intr, "ringmaster intr status0: 0x%08x," nvgpu_log(g, gpu_dbg_intr, "ringmaster intr status0: 0x%08x,"
"status1: 0x%08x", status0, status1); "status1: 0x%08x", status0, status1);
if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) {
gk20a_dbg(gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", nvgpu_log(g, gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x",
gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()), gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()),
gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()), gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()),
gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()), gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()),
@@ -79,7 +79,7 @@ void gk20a_priv_ring_isr(struct gk20a *g)
for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
if (status1 & BIT(gpc)) { if (status1 & BIT(gpc)) {
gk20a_dbg(gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, nvgpu_log(g, gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc,
gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride), gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride),
gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride), gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride),
gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride), gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride),

View File

@@ -1,7 +1,7 @@
/* /*
* Tegra GK20A GPU Debugger Driver Register Ops * Tegra GK20A GPU Debugger Driver Register Ops
* *
* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -72,7 +72,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
bool skip_read_lo, skip_read_hi; bool skip_read_lo, skip_read_hi;
bool ok; bool ok;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
@@ -108,7 +108,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
case REGOP(READ_32): case REGOP(READ_32):
ops[i].value_hi = 0; ops[i].value_hi = 0;
ops[i].value_lo = gk20a_readl(g, ops[i].offset); ops[i].value_lo = gk20a_readl(g, ops[i].offset);
gk20a_dbg(gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", nvgpu_log(g, gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x",
ops[i].value_lo, ops[i].offset); ops[i].value_lo, ops[i].offset);
break; break;
@@ -118,7 +118,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
ops[i].value_hi = ops[i].value_hi =
gk20a_readl(g, ops[i].offset + 4); gk20a_readl(g, ops[i].offset + 4);
gk20a_dbg(gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", nvgpu_log(g, gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x",
ops[i].value_hi, ops[i].value_lo, ops[i].value_hi, ops[i].value_lo,
ops[i].offset); ops[i].offset);
break; break;
@@ -157,12 +157,12 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
/* now update first 32bits */ /* now update first 32bits */
gk20a_writel(g, ops[i].offset, data32_lo); gk20a_writel(g, ops[i].offset, data32_lo);
gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
data32_lo, ops[i].offset); data32_lo, ops[i].offset);
/* if desired, update second 32bits */ /* if desired, update second 32bits */
if (ops[i].op == REGOP(WRITE_64)) { if (ops[i].op == REGOP(WRITE_64)) {
gk20a_writel(g, ops[i].offset + 4, data32_hi); gk20a_writel(g, ops[i].offset + 4, data32_hi);
gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
data32_hi, ops[i].offset + 4); data32_hi, ops[i].offset + 4);
} }
@@ -189,7 +189,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
} }
clean_up: clean_up:
gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err);
return err; return err;
} }
@@ -395,7 +395,7 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
} }
} }
gk20a_dbg(gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", nvgpu_log(g, gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d",
*ctx_wr_count, *ctx_rd_count); *ctx_wr_count, *ctx_rd_count);
return ok; return ok;

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Therm * GK20A Therm
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@ int gk20a_init_therm_support(struct gk20a *g)
{ {
u32 err; u32 err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = gk20a_init_therm_reset_enable_hw(g); err = gk20a_init_therm_reset_enable_hw(g);
if (err) if (err)
@@ -73,7 +73,7 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g)
u32 active_engine_id = 0; u32 active_engine_id = 0;
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
for (engine_id = 0; engine_id < f->num_engines; engine_id++) { for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
active_engine_id = f->active_engines_list[engine_id]; active_engine_id = f->active_engines_list[engine_id];
@@ -104,6 +104,6 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g)
idle_filter &= ~therm_hubmmu_idle_filter_value_m(); idle_filter &= ~therm_hubmmu_idle_filter_value_m();
gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }

View File

@@ -107,7 +107,9 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
struct channel_gk20a *ch) struct channel_gk20a *ch)
{ {
gk20a_dbg_fn(""); struct gk20a *g = ch->g;
nvgpu_log_fn(g, " ");
/* check if channel is already bound to some TSG */ /* check if channel is already bound to some TSG */
if (gk20a_is_channel_marked_as_tsg(ch)) { if (gk20a_is_channel_marked_as_tsg(ch)) {
@@ -137,10 +139,10 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
nvgpu_ref_get(&tsg->refcount); nvgpu_ref_get(&tsg->refcount);
gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
tsg->tsgid, ch->chid); tsg->tsgid, ch->chid);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -167,7 +169,7 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
ch->tsgid = NVGPU_INVALID_TSG_ID; ch->tsgid = NVGPU_INVALID_TSG_ID;
gk20a_dbg(gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n",
tsg->tsgid, ch->chid); tsg->tsgid, ch->chid);
return 0; return 0;
@@ -204,7 +206,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
int ret; int ret;
gk20a_dbg(gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level);
switch (level) { switch (level) {
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW:
@@ -227,7 +229,7 @@ int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
gk20a_dbg(gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice);
return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); return g->ops.fifo.tsg_set_timeslice(tsg, timeslice);
} }
@@ -300,7 +302,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
} }
} }
gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid);
return tsg; return tsg;
@@ -343,7 +345,7 @@ void gk20a_tsg_release(struct nvgpu_ref *ref)
tsg->runlist_id = ~0; tsg->runlist_id = ~0;
gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid);
} }
struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch)

View File

@@ -42,8 +42,8 @@
#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> #include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
/*Defines*/ /*Defines*/
#define gm20b_dbg_pmu(fmt, arg...) \ #define gm20b_dbg_pmu(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
@@ -101,16 +101,16 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
struct lsf_ucode_desc *lsf_desc; struct lsf_ucode_desc *lsf_desc;
int err; int err;
gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B\n");
pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0);
if (!pmu_fw) { if (!pmu_fw) {
nvgpu_err(g, "failed to load pmu ucode!!"); nvgpu_err(g, "failed to load pmu ucode!!");
return -ENOENT; return -ENOENT;
} }
g->acr.pmu_fw = pmu_fw; g->acr.pmu_fw = pmu_fw;
gm20b_dbg_pmu("Loaded PMU ucode in for blob preparation"); gm20b_dbg_pmu(g, "Loaded PMU ucode in for blob preparation");
gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n"); gm20b_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0);
if (!pmu_desc) { if (!pmu_desc) {
nvgpu_err(g, "failed to load pmu ucode desc!!"); nvgpu_err(g, "failed to load pmu ucode desc!!");
@@ -129,7 +129,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
err = nvgpu_init_pmu_fw_support(pmu); err = nvgpu_init_pmu_fw_support(pmu);
if (err) { if (err) {
gm20b_dbg_pmu("failed to set function pointers\n"); gm20b_dbg_pmu(g, "failed to set function pointers\n");
goto release_sig; goto release_sig;
} }
@@ -148,7 +148,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
p_img->header = NULL; p_img->header = NULL;
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
gm20b_dbg_pmu("requesting PMU ucode in GM20B exit\n"); gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n");
nvgpu_release_firmware(g, pmu_sig); nvgpu_release_firmware(g, pmu_sig);
return 0; return 0;
release_sig: release_sig:
@@ -221,7 +221,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
p_img->header = NULL; p_img->header = NULL;
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
gm20b_dbg_pmu("fecs fw loaded\n"); gm20b_dbg_pmu(g, "fecs fw loaded\n");
nvgpu_release_firmware(g, fecs_sig); nvgpu_release_firmware(g, fecs_sig);
return 0; return 0;
free_lsf_desc: free_lsf_desc:
@@ -292,7 +292,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
p_img->header = NULL; p_img->header = NULL;
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
gm20b_dbg_pmu("gpccs fw loaded\n"); gm20b_dbg_pmu(g, "gpccs fw loaded\n");
nvgpu_release_firmware(g, gpccs_sig); nvgpu_release_firmware(g, gpccs_sig);
return 0; return 0;
free_lsf_desc: free_lsf_desc:
@@ -361,24 +361,24 @@ int prepare_ucode_blob(struct gk20a *g)
non WPR blob of ucodes*/ non WPR blob of ucodes*/
err = nvgpu_init_pmu_fw_support(pmu); err = nvgpu_init_pmu_fw_support(pmu);
if (err) { if (err) {
gm20b_dbg_pmu("failed to set function pointers\n"); gm20b_dbg_pmu(g, "failed to set function pointers\n");
return err; return err;
} }
return 0; return 0;
} }
plsfm = &lsfm_l; plsfm = &lsfm_l;
memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr));
gm20b_dbg_pmu("fetching GMMU regs\n"); gm20b_dbg_pmu(g, "fetching GMMU regs\n");
g->ops.fb.vpr_info_fetch(g); g->ops.fb.vpr_info_fetch(g);
gr_gk20a_init_ctxsw_ucode(g); gr_gk20a_init_ctxsw_ucode(g);
g->ops.pmu.get_wpr(g, &wpr_inf); g->ops.pmu.get_wpr(g, &wpr_inf);
gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); gm20b_dbg_pmu(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base);
gm20b_dbg_pmu("wpr carveout size :%llx\n", wpr_inf.size); gm20b_dbg_pmu(g, "wpr carveout size :%llx\n", wpr_inf.size);
/* Discover all managed falcons*/ /* Discover all managed falcons*/
err = lsfm_discover_ucode_images(g, plsfm); err = lsfm_discover_ucode_images(g, plsfm);
gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
if (err) if (err)
goto free_sgt; goto free_sgt;
@@ -394,13 +394,13 @@ int prepare_ucode_blob(struct gk20a *g)
if (err) if (err)
goto free_sgt; goto free_sgt;
gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
plsfm->managed_flcn_cnt, plsfm->wpr_size); plsfm->managed_flcn_cnt, plsfm->wpr_size);
lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
} else { } else {
gm20b_dbg_pmu("LSFM is managing no falcons.\n"); gm20b_dbg_pmu(g, "LSFM is managing no falcons.\n");
} }
gm20b_dbg_pmu("prepare ucode blob return 0\n"); gm20b_dbg_pmu(g, "prepare ucode blob return 0\n");
free_acr_resources(g, plsfm); free_acr_resources(g, plsfm);
free_sgt: free_sgt:
return err; return err;
@@ -444,13 +444,13 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
plsfm->managed_flcn_cnt++; plsfm->managed_flcn_cnt++;
} else { } else {
gm20b_dbg_pmu("id not managed %d\n", gm20b_dbg_pmu(g, "id not managed %d\n",
ucode_img.lsf_desc->falcon_id); ucode_img.lsf_desc->falcon_id);
} }
/*Free any ucode image resources if not managing this falcon*/ /*Free any ucode image resources if not managing this falcon*/
if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) {
gm20b_dbg_pmu("pmu is not LSFM managed\n"); gm20b_dbg_pmu(g, "pmu is not LSFM managed\n");
lsfm_free_ucode_img_res(g, &ucode_img); lsfm_free_ucode_img_res(g, &ucode_img);
} }
@@ -481,7 +481,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
== 0) == 0)
plsfm->managed_flcn_cnt++; plsfm->managed_flcn_cnt++;
} else { } else {
gm20b_dbg_pmu("not managed %d\n", gm20b_dbg_pmu(g, "not managed %d\n",
ucode_img.lsf_desc->falcon_id); ucode_img.lsf_desc->falcon_id);
lsfm_free_nonpmu_ucode_img_res(g, lsfm_free_nonpmu_ucode_img_res(g,
&ucode_img); &ucode_img);
@@ -489,7 +489,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
} }
} else { } else {
/* Consumed all available falcon objects */ /* Consumed all available falcon objects */
gm20b_dbg_pmu("Done checking for ucodes %d\n", i); gm20b_dbg_pmu(g, "Done checking for ucodes %d\n", i);
break; break;
} }
} }
@@ -526,26 +526,26 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
addr_base = p_lsfm->lsb_header.ucode_off; addr_base = p_lsfm->lsb_header.ucode_off;
g->ops.pmu.get_wpr(g, &wpr_inf); g->ops.pmu.get_wpr(g, &wpr_inf);
addr_base += wpr_inf.wpr_base; addr_base += wpr_inf.wpr_base;
gm20b_dbg_pmu("pmu loader cfg u32 addrbase %x\n", (u32)addr_base); gm20b_dbg_pmu(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
/*From linux*/ /*From linux*/
addr_code = u64_lo32((addr_base + addr_code = u64_lo32((addr_base +
desc->app_start_offset + desc->app_start_offset +
desc->app_resident_code_offset) >> 8); desc->app_resident_code_offset) >> 8);
gm20b_dbg_pmu("app start %d app res code off %d\n", gm20b_dbg_pmu(g, "app start %d app res code off %d\n",
desc->app_start_offset, desc->app_resident_code_offset); desc->app_start_offset, desc->app_resident_code_offset);
addr_data = u64_lo32((addr_base + addr_data = u64_lo32((addr_base +
desc->app_start_offset + desc->app_start_offset +
desc->app_resident_data_offset) >> 8); desc->app_resident_data_offset) >> 8);
gm20b_dbg_pmu("app res data offset%d\n", gm20b_dbg_pmu(g, "app res data offset%d\n",
desc->app_resident_data_offset); desc->app_resident_data_offset);
gm20b_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); gm20b_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset);
addr_args = ((pwr_falcon_hwcfg_dmem_size_v( addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
gk20a_readl(g, pwr_falcon_hwcfg_r()))) gk20a_readl(g, pwr_falcon_hwcfg_r())))
<< GK20A_PMU_DMEM_BLKSIZE2); << GK20A_PMU_DMEM_BLKSIZE2);
addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
gm20b_dbg_pmu("addr_args %x\n", addr_args); gm20b_dbg_pmu(g, "addr_args %x\n", addr_args);
/* Populate the loader_config state*/ /* Populate the loader_config state*/
ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE;
@@ -599,7 +599,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
g->ops.pmu.get_wpr(g, &wpr_inf); g->ops.pmu.get_wpr(g, &wpr_inf);
addr_base += wpr_inf.wpr_base; addr_base += wpr_inf.wpr_base;
gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, gm20b_dbg_pmu(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
p_lsfm->wpr_header.falcon_id); p_lsfm->wpr_header.falcon_id);
addr_code = u64_lo32((addr_base + addr_code = u64_lo32((addr_base +
desc->app_start_offset + desc->app_start_offset +
@@ -608,7 +608,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
desc->app_start_offset + desc->app_start_offset +
desc->app_resident_data_offset) >> 8); desc->app_resident_data_offset) >> 8);
gm20b_dbg_pmu("gen cfg %x u32 addrcode %x & data %x load offset %xID\n", gm20b_dbg_pmu(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n",
(u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset,
p_lsfm->wpr_header.falcon_id); p_lsfm->wpr_header.falcon_id);
@@ -631,7 +631,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
if (pnode->wpr_header.falcon_id != pmu->falcon_id) { if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); gm20b_dbg_pmu(g, "non pmu. write flcn bl gen desc\n");
g->ops.pmu.flcn_populate_bl_dmem_desc(g, g->ops.pmu.flcn_populate_bl_dmem_desc(g,
pnode, &pnode->bl_gen_desc_size, pnode, &pnode->bl_gen_desc_size,
pnode->wpr_header.falcon_id); pnode->wpr_header.falcon_id);
@@ -639,7 +639,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
} }
if (pmu->pmu_mode & PMU_LSFM_MANAGED) { if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
gm20b_dbg_pmu("pmu write flcn bl gen desc\n"); gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n");
if (pnode->wpr_header.falcon_id == pmu->falcon_id) if (pnode->wpr_header.falcon_id == pmu->falcon_id)
return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
&pnode->bl_gen_desc_size); &pnode->bl_gen_desc_size);
@@ -672,46 +672,46 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
&pnode->wpr_header, sizeof(pnode->wpr_header)); &pnode->wpr_header, sizeof(pnode->wpr_header));
gm20b_dbg_pmu("wpr header"); gm20b_dbg_pmu(g, "wpr header");
gm20b_dbg_pmu("falconid :%d", gm20b_dbg_pmu(g, "falconid :%d",
pnode->wpr_header.falcon_id); pnode->wpr_header.falcon_id);
gm20b_dbg_pmu("lsb_offset :%x", gm20b_dbg_pmu(g, "lsb_offset :%x",
pnode->wpr_header.lsb_offset); pnode->wpr_header.lsb_offset);
gm20b_dbg_pmu("bootstrap_owner :%d", gm20b_dbg_pmu(g, "bootstrap_owner :%d",
pnode->wpr_header.bootstrap_owner); pnode->wpr_header.bootstrap_owner);
gm20b_dbg_pmu("lazy_bootstrap :%d", gm20b_dbg_pmu(g, "lazy_bootstrap :%d",
pnode->wpr_header.lazy_bootstrap); pnode->wpr_header.lazy_bootstrap);
gm20b_dbg_pmu("status :%d", gm20b_dbg_pmu(g, "status :%d",
pnode->wpr_header.status); pnode->wpr_header.status);
/*Flush LSB header to memory*/ /*Flush LSB header to memory*/
nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
&pnode->lsb_header, sizeof(pnode->lsb_header)); &pnode->lsb_header, sizeof(pnode->lsb_header));
gm20b_dbg_pmu("lsb header"); gm20b_dbg_pmu(g, "lsb header");
gm20b_dbg_pmu("ucode_off :%x", gm20b_dbg_pmu(g, "ucode_off :%x",
pnode->lsb_header.ucode_off); pnode->lsb_header.ucode_off);
gm20b_dbg_pmu("ucode_size :%x", gm20b_dbg_pmu(g, "ucode_size :%x",
pnode->lsb_header.ucode_size); pnode->lsb_header.ucode_size);
gm20b_dbg_pmu("data_size :%x", gm20b_dbg_pmu(g, "data_size :%x",
pnode->lsb_header.data_size); pnode->lsb_header.data_size);
gm20b_dbg_pmu("bl_code_size :%x", gm20b_dbg_pmu(g, "bl_code_size :%x",
pnode->lsb_header.bl_code_size); pnode->lsb_header.bl_code_size);
gm20b_dbg_pmu("bl_imem_off :%x", gm20b_dbg_pmu(g, "bl_imem_off :%x",
pnode->lsb_header.bl_imem_off); pnode->lsb_header.bl_imem_off);
gm20b_dbg_pmu("bl_data_off :%x", gm20b_dbg_pmu(g, "bl_data_off :%x",
pnode->lsb_header.bl_data_off); pnode->lsb_header.bl_data_off);
gm20b_dbg_pmu("bl_data_size :%x", gm20b_dbg_pmu(g, "bl_data_size :%x",
pnode->lsb_header.bl_data_size); pnode->lsb_header.bl_data_size);
gm20b_dbg_pmu("app_code_off :%x", gm20b_dbg_pmu(g, "app_code_off :%x",
pnode->lsb_header.app_code_off); pnode->lsb_header.app_code_off);
gm20b_dbg_pmu("app_code_size :%x", gm20b_dbg_pmu(g, "app_code_size :%x",
pnode->lsb_header.app_code_size); pnode->lsb_header.app_code_size);
gm20b_dbg_pmu("app_data_off :%x", gm20b_dbg_pmu(g, "app_data_off :%x",
pnode->lsb_header.app_data_off); pnode->lsb_header.app_data_off);
gm20b_dbg_pmu("app_data_size :%x", gm20b_dbg_pmu(g, "app_data_size :%x",
pnode->lsb_header.app_data_size); pnode->lsb_header.app_data_size);
gm20b_dbg_pmu("flags :%x", gm20b_dbg_pmu(g, "flags :%x",
pnode->lsb_header.flags); pnode->lsb_header.flags);
/*If this falcon has a boot loader and related args, /*If this falcon has a boot loader and related args,
@@ -1028,7 +1028,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
start = nvgpu_mem_get_addr(g, &acr->ucode_blob); start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
size = acr->ucode_blob.size; size = acr->ucode_blob.size;
gm20b_dbg_pmu(""); gm20b_dbg_pmu(g, " ");
if (!acr_fw) { if (!acr_fw) {
/*First time init case*/ /*First time init case*/
@@ -1141,14 +1141,14 @@ int acr_ucode_patch_sig(struct gk20a *g,
unsigned int *p_patch_ind) unsigned int *p_patch_ind)
{ {
unsigned int i, *p_sig; unsigned int i, *p_sig;
gm20b_dbg_pmu(""); gm20b_dbg_pmu(g, " ");
if (!pmu_is_debug_mode_en(g)) { if (!pmu_is_debug_mode_en(g)) {
p_sig = p_prod_sig; p_sig = p_prod_sig;
gm20b_dbg_pmu("PRODUCTION MODE\n"); gm20b_dbg_pmu(g, "PRODUCTION MODE\n");
} else { } else {
p_sig = p_dbg_sig; p_sig = p_dbg_sig;
gm20b_dbg_pmu("DEBUG MODE\n"); gm20b_dbg_pmu(g, "DEBUG MODE\n");
} }
/* Patching logic:*/ /* Patching logic:*/
@@ -1171,7 +1171,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
u32 dst; u32 dst;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) | gk20a_readl(g, pwr_falcon_itfen_r()) |
pwr_falcon_itfen_ctxen_enable_f()); pwr_falcon_itfen_ctxen_enable_f());
@@ -1193,7 +1193,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
(u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
pmu_bl_gm10x_desc->bl_start_tag); pmu_bl_gm10x_desc->bl_start_tag);
gm20b_dbg_pmu("Before starting falcon with BL\n"); gm20b_dbg_pmu(g, "Before starting falcon with BL\n");
virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
@@ -1207,7 +1207,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_mutex_acquire(&pmu->isr_mutex);
nvgpu_flcn_reset(pmu->flcn); nvgpu_flcn_reset(pmu->flcn);
@@ -1279,7 +1279,7 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g,
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_mutex_acquire(&pmu->isr_mutex);
nvgpu_flcn_reset(pmu->flcn); nvgpu_flcn_reset(pmu->flcn);
@@ -1324,7 +1324,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw;
struct hsflcn_bl_desc *pmu_bl_gm10x_desc; struct hsflcn_bl_desc *pmu_bl_gm10x_desc;
u32 *pmu_bl_gm10x = NULL; u32 *pmu_bl_gm10x = NULL;
gm20b_dbg_pmu(""); gm20b_dbg_pmu(g, " ");
if (!hsbl_fw) { if (!hsbl_fw) {
hsbl_fw = nvgpu_request_firmware(g, hsbl_fw = nvgpu_request_firmware(g,
@@ -1343,7 +1343,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size,
256); 256);
acr->hsbl_ucode.size = bl_sz; acr->hsbl_ucode.size = bl_sz;
gm20b_dbg_pmu("Executing Generic Bootloader\n"); gm20b_dbg_pmu(g, "Executing Generic Bootloader\n");
/*TODO in code verify that enable PMU is done, /*TODO in code verify that enable PMU is done,
scrubbing etc is done*/ scrubbing etc is done*/
@@ -1366,7 +1366,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
} }
nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz);
gm20b_dbg_pmu("Copied bl ucode to bl_cpuva\n"); gm20b_dbg_pmu(g, "Copied bl ucode to bl_cpuva\n");
} }
/* /*
* Disable interrupts to avoid kernel hitting breakpoint due * Disable interrupts to avoid kernel hitting breakpoint due
@@ -1377,9 +1377,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
gk20a_get_gr_idle_timeout(g))) gk20a_get_gr_idle_timeout(g)))
goto err_unmap_bl; goto err_unmap_bl;
gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
pwr_falcon_mmu_phys_sec_r())); pwr_falcon_mmu_phys_sec_r()));
gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size);
@@ -1396,10 +1396,10 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
else else
goto err_unmap_bl; goto err_unmap_bl;
} }
gm20b_dbg_pmu("after waiting for halt, err %x\n", err); gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err);
gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
pwr_falcon_mmu_phys_sec_r())); pwr_falcon_mmu_phys_sec_r()));
gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
start_gm20b_pmu(g); start_gm20b_pmu(g);
return 0; return 0;
err_unmap_bl: err_unmap_bl:
@@ -1430,7 +1430,7 @@ int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
} }
g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r());
gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities);
data = gk20a_readl(g, pwr_falcon_mailbox0_r()); data = gk20a_readl(g, pwr_falcon_mailbox0_r());
if (data) { if (data) {
nvgpu_err(g, "ACR boot failed, err %x", data); nvgpu_err(g, "ACR boot failed, err %x", data);

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B MMU * GM20B MMU
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -40,7 +40,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
u64 iova = nvgpu_inst_block_addr(g, bar1_inst); u64 iova = nvgpu_inst_block_addr(g, bar1_inst);
u32 ptr_v = (u32)(iova >> bus_bar1_block_ptr_shift_v()); u32 ptr_v = (u32)(iova >> bus_bar1_block_ptr_shift_v());
gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v); nvgpu_log_info(g, "bar1 inst block ptr: 0x%08x", ptr_v);
gk20a_writel(g, bus_bar1_block_r(), gk20a_writel(g, bus_bar1_block_r(),
nvgpu_aperture_mask(g, bar1_inst, nvgpu_aperture_mask(g, bar1_inst,

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B Clocks * GM20B Clocks
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,8 +36,8 @@
#include <nvgpu/hw/gm20b/hw_therm_gm20b.h> #include <nvgpu/hw/gm20b/hw_therm_gm20b.h>
#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h> #include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
#define gk20a_dbg_clk(fmt, arg...) \ #define gk20a_dbg_clk(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_clk, fmt, ##arg) nvgpu_log(g, gpu_dbg_clk, fmt, ##arg)
#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ #define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ #define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
@@ -138,6 +138,7 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl)
static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
struct pll_parms *pll_params, u32 *target_freq, bool best_fit) struct pll_parms *pll_params, u32 *target_freq, bool best_fit)
{ {
struct gk20a *g = clk->g;
u32 min_vco_f, max_vco_f; u32 min_vco_f, max_vco_f;
u32 best_M, best_N; u32 best_M, best_N;
u32 low_PL, high_PL, best_PL; u32 low_PL, high_PL, best_PL;
@@ -149,7 +150,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
BUG_ON(target_freq == NULL); BUG_ON(target_freq == NULL);
gk20a_dbg_fn("request target freq %d MHz", *target_freq); nvgpu_log_fn(g, "request target freq %d MHz", *target_freq);
ref_clk_f = pll->clk_in; ref_clk_f = pll->clk_in;
target_clk_f = *target_freq; target_clk_f = *target_freq;
@@ -172,7 +173,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
low_PL = min(low_PL, pll_params->max_PL); low_PL = min(low_PL, pll_params->max_PL);
low_PL = max(low_PL, pll_params->min_PL); low_PL = max(low_PL, pll_params->min_PL);
gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)", nvgpu_log_info(g, "low_PL %d(div%d), high_PL %d(div%d)",
low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL)); low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL));
for (pl = low_PL; pl <= high_PL; pl++) { for (pl = low_PL; pl <= high_PL; pl++) {
@@ -217,7 +218,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
goto found_match; goto found_match;
} }
gk20a_dbg_info("delta %d @ M %d, N %d, PL %d", nvgpu_log_info(g, "delta %d @ M %d, N %d, PL %d",
delta, m, n, pl); delta, m, n, pl);
} }
} }
@@ -229,7 +230,7 @@ found_match:
BUG_ON(best_delta == ~0U); BUG_ON(best_delta == ~0U);
if (best_fit && best_delta != 0) if (best_fit && best_delta != 0)
gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll", gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll",
target_clk_f); target_clk_f);
pll->M = best_M; pll->M = best_M;
@@ -241,10 +242,10 @@ found_match:
*target_freq = pll->freq; *target_freq = pll->freq;
gk20a_dbg_clk("actual target freq %d kHz, M %d, N %d, PL %d(div%d)", gk20a_dbg_clk(g, "actual target freq %d kHz, M %d, N %d, PL %d(div%d)",
*target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL)); *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL));
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -810,7 +811,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
if (gpll->mode == GPC_PLL_MODE_DVFS) { if (gpll->mode == GPC_PLL_MODE_DVFS) {
gk20a_readl(g, trim_sys_gpcpll_cfg_r()); gk20a_readl(g, trim_sys_gpcpll_cfg_r());
nvgpu_udelay(gpc_pll_params.na_lock_delay); nvgpu_udelay(gpc_pll_params.na_lock_delay);
gk20a_dbg_clk("NA config_pll under bypass: %u (%u) kHz %d mV", gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV",
gpll->freq, gpll->freq / 2, gpll->freq, gpll->freq / 2,
(trim_sys_gpcpll_cfg3_dfs_testout_v( (trim_sys_gpcpll_cfg3_dfs_testout_v(
gk20a_readl(g, trim_sys_gpcpll_cfg3_r())) gk20a_readl(g, trim_sys_gpcpll_cfg3_r()))
@@ -843,7 +844,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
return -EBUSY; return -EBUSY;
pll_locked: pll_locked:
gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x", gk20a_dbg_clk(g, "locked config_pll under bypass r=0x%x v=0x%x",
trim_sys_gpcpll_cfg_r(), cfg); trim_sys_gpcpll_cfg_r(), cfg);
/* set SYNC_MODE for glitchless switch out of bypass */ /* set SYNC_MODE for glitchless switch out of bypass */
@@ -878,7 +879,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
bool can_slide, pldiv_only; bool can_slide, pldiv_only;
struct pll gpll; struct pll gpll;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!nvgpu_platform_is_silicon(g)) if (!nvgpu_platform_is_silicon(g))
return 0; return 0;
@@ -1028,7 +1029,7 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll)
gpll->N = nsafe; gpll->N = nsafe;
clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs); clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs);
gk20a_dbg_clk("safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", gk20a_dbg_clk(g, "safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL), gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL),
gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff); gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff);
} }
@@ -1103,7 +1104,7 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal); clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal);
clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff); clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff);
gk20a_dbg_clk("config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", gk20a_dbg_clk(g, "config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL, gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL,
nvgpu_pl_to_div(gpll_new->PL), nvgpu_pl_to_div(gpll_new->PL),
max(gpll_new->dvfs.mv, gpll_old->dvfs.mv), max(gpll_new->dvfs.mv, gpll_old->dvfs.mv),
@@ -1168,14 +1169,14 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
unsigned long safe_rate; unsigned long safe_rate;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&clk->clk_mutex); err = nvgpu_mutex_init(&clk->clk_mutex);
if (err) if (err)
return err; return err;
if (clk->sw_ready) { if (clk->sw_ready) {
gk20a_dbg_fn("skip init"); nvgpu_log_fn(g, "skip init");
return 0; return 0;
} }
@@ -1229,7 +1230,7 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
clk->sw_ready = true; clk->sw_ready = true;
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
nvgpu_info(g, nvgpu_info(g,
"GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)", "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)",
clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "", clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "",
@@ -1321,7 +1322,7 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g)
{ {
u32 data; u32 data;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */ /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */
data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
@@ -1394,7 +1395,7 @@ static int set_pll_freq(struct gk20a *g, int allow_slide)
struct clk_gk20a *clk = &g->clk; struct clk_gk20a *clk = &g->clk;
int err = 0; int err = 0;
gk20a_dbg_fn("last freq: %dMHz, target freq %dMHz", nvgpu_log_fn(g, "last freq: %dMHz, target freq %dMHz",
clk->gpc_pll_last.freq, clk->gpc_pll.freq); clk->gpc_pll_last.freq, clk->gpc_pll.freq);
/* If programming with dynamic sliding failed, re-try under bypass */ /* If programming with dynamic sliding failed, re-try under bypass */
@@ -1427,7 +1428,7 @@ int gm20b_init_clk_support(struct gk20a *g)
struct clk_gk20a *clk = &g->clk; struct clk_gk20a *clk = &g->clk;
u32 err; u32 err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&clk->clk_mutex); nvgpu_mutex_acquire(&clk->clk_mutex);
clk->clk_hw_on = true; clk->clk_hw_on = true;

View File

@@ -38,7 +38,7 @@
void fb_gm20b_init_fs_state(struct gk20a *g) void fb_gm20b_init_fs_state(struct gk20a *g)
{ {
gk20a_dbg_info("initialize gm20b fb"); nvgpu_log_info(g, "initialize gm20b fb");
gk20a_writel(g, fb_fbhub_num_active_ltcs_r(), gk20a_writel(g, fb_fbhub_num_active_ltcs_r(),
g->ltc_count); g->ltc_count);

View File

@@ -47,7 +47,7 @@ void channel_gm20b_bind(struct channel_gk20a *c)
u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block) u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block)
>> ram_in_base_shift_v(); >> ram_in_base_shift_v();
gk20a_dbg_info("bind channel %d inst ptr 0x%08x", nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x",
c->chid, inst_ptr); c->chid, inst_ptr);

View File

@@ -47,7 +47,7 @@ void gr_gm20b_init_gpc_mmu(struct gk20a *g)
{ {
u32 temp; u32 temp;
gk20a_dbg_info("initialize gpc mmu"); nvgpu_log_info(g, "initialize gpc mmu");
if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* Bypass MMU check for non-secure boot. For /* Bypass MMU check for non-secure boot. For
@@ -168,7 +168,7 @@ void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
data = min_t(u32, data, g->gr.min_gpm_fifo_depth); data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d",
g->gr.bundle_cb_token_limit, data); g->gr.bundle_cb_token_limit, data);
gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(), gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(),
@@ -193,7 +193,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
u32 num_pes_per_gpc = nvgpu_get_litter_value(g, u32 num_pes_per_gpc = nvgpu_get_litter_value(g,
GPU_LIT_NUM_PES_PER_GPC); GPU_LIT_NUM_PES_PER_GPC);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c); tsg = tsg_gk20a_from_ch(c);
if (!tsg) if (!tsg)
@@ -280,20 +280,20 @@ void gr_gm20b_set_rd_coalesce(struct gk20a *g, u32 data)
{ {
u32 val; u32 val;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r()); val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r());
val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(), val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(),
gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data)); gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data));
gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val); gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr, int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data) u32 class_num, u32 offset, u32 data)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (class_num == MAXWELL_COMPUTE_B) { if (class_num == MAXWELL_COMPUTE_B) {
switch (offset << 2) { switch (offset << 2) {
@@ -341,7 +341,7 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF) /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF)
return; */ return; */
@@ -390,7 +390,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (cb_size > gr->attrib_cb_size) if (cb_size > gr->attrib_cb_size)
cb_size = gr->attrib_cb_size; cb_size = gr->attrib_cb_size;
@@ -665,7 +665,7 @@ int gr_gm20b_init_fs_state(struct gk20a *g)
{ {
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = gr_gk20a_init_fs_state(g); err = gr_gk20a_init_fs_state(g);
if (err) if (err)
@@ -762,7 +762,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
gr_fecs_falcon_hwcfg_r(); gr_fecs_falcon_hwcfg_r();
u8 falcon_id_mask = 0; u8 falcon_id_mask = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
@@ -829,7 +829,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff); gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff);
gk20a_writel(g, gr_fecs_cpuctl_alias_r(), gk20a_writel(g, gr_fecs_cpuctl_alias_r(),
gr_fecs_cpuctl_startcpu_f(1)); gr_fecs_cpuctl_startcpu_f(1));
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -858,7 +858,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
{ {
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
if (err) if (err)
@@ -867,7 +867,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
if (class == MAXWELL_COMPUTE_B) if (class == MAXWELL_COMPUTE_B)
gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA; gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -881,7 +881,7 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
u32 cta_preempt_option = u32 cta_preempt_option =
ctxsw_prog_main_image_preemption_options_control_cta_enabled_f(); ctxsw_prog_main_image_preemption_options_control_cta_enabled_f();
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c); tsg = tsg_gk20a_from_ch(c);
if (!tsg) if (!tsg)
@@ -889,13 +889,13 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
gr_ctx = &tsg->gr_ctx; gr_ctx = &tsg->gr_ctx;
if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
gk20a_dbg_info("CTA: %x", cta_preempt_option); nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
nvgpu_mem_wr(g, mem, nvgpu_mem_wr(g, mem,
ctxsw_prog_main_image_preemption_options_o(), ctxsw_prog_main_image_preemption_options_o(),
cta_preempt_option); cta_preempt_option);
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
int gr_gm20b_dump_gr_status_regs(struct gk20a *g, int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
@@ -1044,7 +1044,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
struct nvgpu_mem *mem; struct nvgpu_mem *mem;
u32 v; u32 v;
gk20a_dbg_fn(""); nvgpu_log_fn(c->g, " ");
tsg = tsg_gk20a_from_ch(c); tsg = tsg_gk20a_from_ch(c);
if (!tsg) if (!tsg)
@@ -1066,7 +1066,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
nvgpu_mem_end(c->g, mem); nvgpu_mem_end(c->g, mem);
gk20a_dbg_fn("done"); nvgpu_log_fn(c->g, "done");
return 0; return 0;
} }
@@ -1220,19 +1220,19 @@ void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
/* Only for debug purpose */ /* Only for debug purpose */
for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n",
sm_id, w_state[sm_id].valid_warps[0]); sm_id, w_state[sm_id].valid_warps[0]);
gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n",
sm_id, w_state[sm_id].valid_warps[1]); sm_id, w_state[sm_id].valid_warps[1]);
gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n",
sm_id, w_state[sm_id].trapped_warps[0]); sm_id, w_state[sm_id].trapped_warps[0]);
gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n",
sm_id, w_state[sm_id].trapped_warps[1]); sm_id, w_state[sm_id].trapped_warps[1]);
gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n",
sm_id, w_state[sm_id].paused_warps[0]); sm_id, w_state[sm_id].paused_warps[0]);
gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n",
sm_id, w_state[sm_id].paused_warps[1]); sm_id, w_state[sm_id].paused_warps[1]);
} }
} }

View File

@@ -61,7 +61,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (max_comptag_lines == 0U) if (max_comptag_lines == 0U)
return 0; return 0;
@@ -87,9 +87,9 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
if (max_comptag_lines > hw_max_comptag_lines) if (max_comptag_lines > hw_max_comptag_lines)
max_comptag_lines = hw_max_comptag_lines; max_comptag_lines = hw_max_comptag_lines;
gk20a_dbg_info("compbit backing store size : %d", nvgpu_log_info(g, "compbit backing store size : %d",
compbit_backing_size); compbit_backing_size);
gk20a_dbg_info("max comptag lines : %d", nvgpu_log_info(g, "max comptag lines : %d",
max_comptag_lines); max_comptag_lines);
err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
@@ -121,7 +121,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
const u32 max_lines = 16384U; const u32 max_lines = 16384U;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
@@ -134,7 +134,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
nvgpu_mutex_acquire(&g->mm.l2_op_lock); nvgpu_mutex_acquire(&g->mm.l2_op_lock);
gk20a_dbg_info("clearing CBC lines %u..%u", min, iter_max); nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max);
if (op == gk20a_cbc_op_clear) { if (op == gk20a_cbc_op_clear) {
gk20a_writel( gk20a_writel(
@@ -205,11 +205,11 @@ void gm20b_ltc_init_fs_state(struct gk20a *g)
{ {
u32 reg; u32 reg;
gk20a_dbg_info("initialize gm20b l2"); nvgpu_log_info(g, "initialize gm20b l2");
g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
gk20a_dbg_info("%d ltcs out of %d", g->ltc_count, g->max_ltc_count); nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count);
gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(), gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(),
g->ltc_count); g->ltc_count);
@@ -459,7 +459,7 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
compbit_base_post_divide); compbit_base_post_divide);
gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
"compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
(u32)(compbit_store_iova >> 32), (u32)(compbit_store_iova >> 32),
(u32)(compbit_store_iova & 0xffffffff), (u32)(compbit_store_iova & 0xffffffff),

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B MMU * GM20B MMU
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,9 +36,9 @@ void gm20b_mm_set_big_page_size(struct gk20a *g,
{ {
u32 val; u32 val;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gk20a_dbg_info("big page size %d\n", size); nvgpu_log_info(g, "big page size %d\n", size);
val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w()); val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w());
val &= ~ram_in_big_page_size_m(); val &= ~ram_in_big_page_size_m();
@@ -48,7 +48,7 @@ void gm20b_mm_set_big_page_size(struct gk20a *g,
val |= ram_in_big_page_size_128kb_f(); val |= ram_in_big_page_size_128kb_f();
nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val); nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
u32 gm20b_mm_get_big_page_sizes(void) u32 gm20b_mm_get_big_page_sizes(void)

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B PMU * GM20B PMU
* *
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -37,8 +37,8 @@
#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> #include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h> #include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
#define gm20b_dbg_pmu(fmt, arg...) \ #define gm20b_dbg_pmu(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
/* PROD settings for ELPG sequencing registers*/ /* PROD settings for ELPG sequencing registers*/
@@ -108,7 +108,7 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
u32 reg_writes; u32 reg_writes;
u32 index; u32 index;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (g->elpg_enabled) { if (g->elpg_enabled) {
reg_writes = ((sizeof(_pginitseq_gm20b) / reg_writes = ((sizeof(_pginitseq_gm20b) /
@@ -120,20 +120,20 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
} }
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return ret; return ret;
} }
static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg, static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status) void *param, u32 handle, u32 status)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS)
g->pmu_lsf_pmu_wpr_init_done = 1; g->pmu_lsf_pmu_wpr_init_done = 1;
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
@@ -143,7 +143,7 @@ int gm20b_pmu_init_acr(struct gk20a *g)
struct pmu_cmd cmd; struct pmu_cmd cmd;
u32 seq; u32 seq;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* init ACR */ /* init ACR */
memset(&cmd, 0, sizeof(struct pmu_cmd)); memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -153,11 +153,11 @@ int gm20b_pmu_init_acr(struct gk20a *g)
cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION; cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION;
cmd.cmd.acr.init_wpr.regionid = 0x01; cmd.cmd.acr.init_wpr.regionid = 0x01;
cmd.cmd.acr.init_wpr.wproffset = 0x00; cmd.cmd.acr.init_wpr.wproffset = 0x00;
gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION"); gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_acr_init_wpr_msg, pmu, &seq, ~0); pmu_handle_acr_init_wpr_msg, pmu, &seq, ~0);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -165,14 +165,14 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status) void *param, u32 handle, u32 status)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid); gm20b_dbg_pmu(g, "response code = %x\n", msg->msg.acr.acrmsg.falconid);
g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid; g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid;
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms, static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
@@ -182,7 +182,7 @@ static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
u32 reg; u32 reg;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
@@ -203,9 +203,9 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
struct pmu_cmd cmd; struct pmu_cmd cmd;
u32 seq; u32 seq;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gm20b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); gm20b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
if (g->pmu_lsf_pmu_wpr_init_done) { if (g->pmu_lsf_pmu_wpr_init_done) {
/* send message to load FECS falcon */ /* send message to load FECS falcon */
memset(&cmd, 0, sizeof(struct pmu_cmd)); memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -216,13 +216,13 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
PMU_ACR_CMD_ID_BOOTSTRAP_FALCON; PMU_ACR_CMD_ID_BOOTSTRAP_FALCON;
cmd.cmd.acr.bootstrap_falcon.flags = flags; cmd.cmd.acr.bootstrap_falcon.flags = flags;
cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id; cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id;
gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n", gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n",
falcon_id); falcon_id);
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return; return;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B THERMAL * GM20B THERMAL
* *
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ int gm20b_init_therm_setup_hw(struct gk20a *g)
{ {
u32 v; u32 v;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* program NV_THERM registers */ /* program NV_THERM registers */
gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |

View File

@@ -43,8 +43,8 @@
#include <nvgpu/hw/gp106/hw_pwr_gp106.h> #include <nvgpu/hw/gp106/hw_pwr_gp106.h>
/*Defines*/ /*Defines*/
#define gp106_dbg_pmu(fmt, arg...) \ #define gp106_dbg_pmu(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
typedef int (*get_ucode_details)(struct gk20a *g, typedef int (*get_ucode_details)(struct gk20a *g,
struct flcn_ucode_img_v1 *udata); struct flcn_ucode_img_v1 *udata);
@@ -113,7 +113,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
struct lsf_ucode_desc_v1 *lsf_desc; struct lsf_ucode_desc_v1 *lsf_desc;
int err; int err;
gp106_dbg_pmu("requesting PMU ucode in gp106\n"); gp106_dbg_pmu(g, "requesting PMU ucode in gp106\n");
pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE,
NVGPU_REQUEST_FIRMWARE_NO_SOC); NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!pmu_fw) { if (!pmu_fw) {
@@ -121,9 +121,9 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
return -ENOENT; return -ENOENT;
} }
g->acr.pmu_fw = pmu_fw; g->acr.pmu_fw = pmu_fw;
gp106_dbg_pmu("Loaded PMU ucode in for blob preparation"); gp106_dbg_pmu(g, "Loaded PMU ucode in for blob preparation");
gp106_dbg_pmu("requesting PMU ucode desc in GM20B\n"); gp106_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC,
NVGPU_REQUEST_FIRMWARE_NO_SOC); NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!pmu_desc) { if (!pmu_desc) {
@@ -164,7 +164,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
p_img->header = NULL; p_img->header = NULL;
p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
gp106_dbg_pmu("requesting PMU ucode in GM20B exit\n"); gp106_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n");
nvgpu_release_firmware(g, pmu_sig); nvgpu_release_firmware(g, pmu_sig);
return 0; return 0;
@@ -262,7 +262,7 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
p_img->header = NULL; p_img->header = NULL;
p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
gp106_dbg_pmu("fecs fw loaded\n"); gp106_dbg_pmu(g, "fecs fw loaded\n");
nvgpu_release_firmware(g, fecs_sig); nvgpu_release_firmware(g, fecs_sig);
return 0; return 0;
free_lsf_desc: free_lsf_desc:
@@ -358,7 +358,7 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
p_img->header = NULL; p_img->header = NULL;
p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
gp106_dbg_pmu("gpccs fw loaded\n"); gp106_dbg_pmu(g, "gpccs fw loaded\n");
nvgpu_release_firmware(g, gpccs_sig); nvgpu_release_firmware(g, gpccs_sig);
return 0; return 0;
free_lsf_desc: free_lsf_desc:
@@ -381,7 +381,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
non WPR blob of ucodes*/ non WPR blob of ucodes*/
err = nvgpu_init_pmu_fw_support(pmu); err = nvgpu_init_pmu_fw_support(pmu);
if (err) { if (err) {
gp106_dbg_pmu("failed to set function pointers\n"); gp106_dbg_pmu(g, "failed to set function pointers\n");
return err; return err;
} }
return 0; return 0;
@@ -391,12 +391,12 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
gr_gk20a_init_ctxsw_ucode(g); gr_gk20a_init_ctxsw_ucode(g);
g->ops.pmu.get_wpr(g, &wpr_inf); g->ops.pmu.get_wpr(g, &wpr_inf);
gp106_dbg_pmu("wpr carveout base:%llx\n", (wpr_inf.wpr_base)); gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base));
gp106_dbg_pmu("wpr carveout size :%x\n", (u32)wpr_inf.size); gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size);
/* Discover all managed falcons*/ /* Discover all managed falcons*/
err = lsfm_discover_ucode_images(g, plsfm); err = lsfm_discover_ucode_images(g, plsfm);
gp106_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
if (err) if (err)
goto exit_err; goto exit_err;
@@ -412,14 +412,14 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
if (err) if (err)
goto exit_err; goto exit_err;
gp106_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
plsfm->managed_flcn_cnt, plsfm->wpr_size); plsfm->managed_flcn_cnt, plsfm->wpr_size);
lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
} else { } else {
gp106_dbg_pmu("LSFM is managing no falcons.\n"); gp106_dbg_pmu(g, "LSFM is managing no falcons.\n");
} }
gp106_dbg_pmu("prepare ucode blob return 0\n"); gp106_dbg_pmu(g, "prepare ucode blob return 0\n");
free_acr_resources(g, plsfm); free_acr_resources(g, plsfm);
exit_err: exit_err:
@@ -465,14 +465,14 @@ int lsfm_discover_ucode_images(struct gk20a *g,
plsfm->managed_flcn_cnt++; plsfm->managed_flcn_cnt++;
} else { } else {
gp106_dbg_pmu("id not managed %d\n", gp106_dbg_pmu(g, "id not managed %d\n",
ucode_img.lsf_desc->falcon_id); ucode_img.lsf_desc->falcon_id);
} }
} }
/*Free any ucode image resources if not managing this falcon*/ /*Free any ucode image resources if not managing this falcon*/
if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) {
gp106_dbg_pmu("pmu is not LSFM managed\n"); gp106_dbg_pmu(g, "pmu is not LSFM managed\n");
lsfm_free_ucode_img_res(g, &ucode_img); lsfm_free_ucode_img_res(g, &ucode_img);
} }
@@ -503,7 +503,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
== 0) == 0)
plsfm->managed_flcn_cnt++; plsfm->managed_flcn_cnt++;
} else { } else {
gp106_dbg_pmu("not managed %d\n", gp106_dbg_pmu(g, "not managed %d\n",
ucode_img.lsf_desc->falcon_id); ucode_img.lsf_desc->falcon_id);
lsfm_free_nonpmu_ucode_img_res(g, lsfm_free_nonpmu_ucode_img_res(g,
&ucode_img); &ucode_img);
@@ -511,7 +511,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
} }
} else { } else {
/* Consumed all available falcon objects */ /* Consumed all available falcon objects */
gp106_dbg_pmu("Done checking for ucodes %d\n", i); gp106_dbg_pmu(g, "Done checking for ucodes %d\n", i);
break; break;
} }
} }
@@ -549,19 +549,19 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
g->ops.pmu.get_wpr(g, &wpr_inf); g->ops.pmu.get_wpr(g, &wpr_inf);
addr_base += (wpr_inf.wpr_base); addr_base += (wpr_inf.wpr_base);
gp106_dbg_pmu("pmu loader cfg addrbase 0x%llx\n", addr_base); gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base);
/*From linux*/ /*From linux*/
addr_code = addr_base + addr_code = addr_base +
desc->app_start_offset + desc->app_start_offset +
desc->app_resident_code_offset; desc->app_resident_code_offset;
gp106_dbg_pmu("app start %d app res code off %d\n", gp106_dbg_pmu(g, "app start %d app res code off %d\n",
desc->app_start_offset, desc->app_resident_code_offset); desc->app_start_offset, desc->app_resident_code_offset);
addr_data = addr_base + addr_data = addr_base +
desc->app_start_offset + desc->app_start_offset +
desc->app_resident_data_offset; desc->app_resident_data_offset;
gp106_dbg_pmu("app res data offset%d\n", gp106_dbg_pmu(g, "app res data offset%d\n",
desc->app_resident_data_offset); desc->app_resident_data_offset);
gp106_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); gp106_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset);
addr_args = ((pwr_falcon_hwcfg_dmem_size_v( addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
gk20a_readl(g, pwr_falcon_hwcfg_r()))) gk20a_readl(g, pwr_falcon_hwcfg_r())))
@@ -569,7 +569,7 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
gp106_dbg_pmu("addr_args %x\n", addr_args); gp106_dbg_pmu(g, "addr_args %x\n", addr_args);
/* Populate the LOADER_CONFIG state */ /* Populate the LOADER_CONFIG state */
memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1)); memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1));
@@ -621,8 +621,8 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
g->ops.pmu.get_wpr(g, &wpr_inf); g->ops.pmu.get_wpr(g, &wpr_inf);
addr_base += wpr_inf.wpr_base; addr_base += wpr_inf.wpr_base;
gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id); gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id);
gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base); gp106_dbg_pmu(g, "gen loader cfg addrbase %llx ", addr_base);
addr_code = addr_base + addr_code = addr_base +
desc->app_start_offset + desc->app_start_offset +
desc->app_resident_code_offset; desc->app_resident_code_offset;
@@ -630,7 +630,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
desc->app_start_offset + desc->app_start_offset +
desc->app_resident_data_offset; desc->app_resident_data_offset;
gp106_dbg_pmu("gen cfg addrcode %llx data %llx load offset %x", gp106_dbg_pmu(g, "gen cfg addrcode %llx data %llx load offset %x",
addr_code, addr_data, desc->bootloader_start_offset); addr_code, addr_data, desc->bootloader_start_offset);
/* Populate the LOADER_CONFIG state */ /* Populate the LOADER_CONFIG state */
@@ -653,7 +653,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
if (pnode->wpr_header.falcon_id != pmu->falcon_id) { if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
gp106_dbg_pmu("non pmu. write flcn bl gen desc\n"); gp106_dbg_pmu(g, "non pmu. write flcn bl gen desc\n");
g->ops.pmu.flcn_populate_bl_dmem_desc(g, g->ops.pmu.flcn_populate_bl_dmem_desc(g,
pnode, &pnode->bl_gen_desc_size, pnode, &pnode->bl_gen_desc_size,
pnode->wpr_header.falcon_id); pnode->wpr_header.falcon_id);
@@ -661,7 +661,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
} }
if (pmu->pmu_mode & PMU_LSFM_MANAGED) { if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
gp106_dbg_pmu("pmu write flcn bl gen desc\n"); gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n");
if (pnode->wpr_header.falcon_id == pmu->falcon_id) if (pnode->wpr_header.falcon_id == pmu->falcon_id)
return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
&pnode->bl_gen_desc_size); &pnode->bl_gen_desc_size);
@@ -694,46 +694,46 @@ void lsfm_init_wpr_contents(struct gk20a *g,
nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
&pnode->wpr_header, sizeof(pnode->wpr_header)); &pnode->wpr_header, sizeof(pnode->wpr_header));
gp106_dbg_pmu("wpr header"); gp106_dbg_pmu(g, "wpr header");
gp106_dbg_pmu("falconid :%d", gp106_dbg_pmu(g, "falconid :%d",
pnode->wpr_header.falcon_id); pnode->wpr_header.falcon_id);
gp106_dbg_pmu("lsb_offset :%x", gp106_dbg_pmu(g, "lsb_offset :%x",
pnode->wpr_header.lsb_offset); pnode->wpr_header.lsb_offset);
gp106_dbg_pmu("bootstrap_owner :%d", gp106_dbg_pmu(g, "bootstrap_owner :%d",
pnode->wpr_header.bootstrap_owner); pnode->wpr_header.bootstrap_owner);
gp106_dbg_pmu("lazy_bootstrap :%d", gp106_dbg_pmu(g, "lazy_bootstrap :%d",
pnode->wpr_header.lazy_bootstrap); pnode->wpr_header.lazy_bootstrap);
gp106_dbg_pmu("status :%d", gp106_dbg_pmu(g, "status :%d",
pnode->wpr_header.status); pnode->wpr_header.status);
/*Flush LSB header to memory*/ /*Flush LSB header to memory*/
nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
&pnode->lsb_header, sizeof(pnode->lsb_header)); &pnode->lsb_header, sizeof(pnode->lsb_header));
gp106_dbg_pmu("lsb header"); gp106_dbg_pmu(g, "lsb header");
gp106_dbg_pmu("ucode_off :%x", gp106_dbg_pmu(g, "ucode_off :%x",
pnode->lsb_header.ucode_off); pnode->lsb_header.ucode_off);
gp106_dbg_pmu("ucode_size :%x", gp106_dbg_pmu(g, "ucode_size :%x",
pnode->lsb_header.ucode_size); pnode->lsb_header.ucode_size);
gp106_dbg_pmu("data_size :%x", gp106_dbg_pmu(g, "data_size :%x",
pnode->lsb_header.data_size); pnode->lsb_header.data_size);
gp106_dbg_pmu("bl_code_size :%x", gp106_dbg_pmu(g, "bl_code_size :%x",
pnode->lsb_header.bl_code_size); pnode->lsb_header.bl_code_size);
gp106_dbg_pmu("bl_imem_off :%x", gp106_dbg_pmu(g, "bl_imem_off :%x",
pnode->lsb_header.bl_imem_off); pnode->lsb_header.bl_imem_off);
gp106_dbg_pmu("bl_data_off :%x", gp106_dbg_pmu(g, "bl_data_off :%x",
pnode->lsb_header.bl_data_off); pnode->lsb_header.bl_data_off);
gp106_dbg_pmu("bl_data_size :%x", gp106_dbg_pmu(g, "bl_data_size :%x",
pnode->lsb_header.bl_data_size); pnode->lsb_header.bl_data_size);
gp106_dbg_pmu("app_code_off :%x", gp106_dbg_pmu(g, "app_code_off :%x",
pnode->lsb_header.app_code_off); pnode->lsb_header.app_code_off);
gp106_dbg_pmu("app_code_size :%x", gp106_dbg_pmu(g, "app_code_size :%x",
pnode->lsb_header.app_code_size); pnode->lsb_header.app_code_size);
gp106_dbg_pmu("app_data_off :%x", gp106_dbg_pmu(g, "app_data_off :%x",
pnode->lsb_header.app_data_off); pnode->lsb_header.app_data_off);
gp106_dbg_pmu("app_data_size :%x", gp106_dbg_pmu(g, "app_data_size :%x",
pnode->lsb_header.app_data_size); pnode->lsb_header.app_data_size);
gp106_dbg_pmu("flags :%x", gp106_dbg_pmu(g, "flags :%x",
pnode->lsb_header.flags); pnode->lsb_header.flags);
/*If this falcon has a boot loader and related args, /*If this falcon has a boot loader and related args,
@@ -1049,7 +1049,7 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g)
u32 *acr_ucode_data_t210_load; u32 *acr_ucode_data_t210_load;
struct wpr_carveout_info wpr_inf; struct wpr_carveout_info wpr_inf;
gp106_dbg_pmu(""); gp106_dbg_pmu(g, " ");
if (!acr_fw) { if (!acr_fw) {
/*First time init case*/ /*First time init case*/

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -56,13 +56,13 @@ static void upload_data(struct gk20a *g, u32 dst, u8 *src, u32 size, u8 port)
u32 *src_u32 = (u32 *)src; u32 *src_u32 = (u32 *)src;
u32 blk; u32 blk;
gk20a_dbg_info("upload %d bytes to %x", size, dst); nvgpu_log_info(g, "upload %d bytes to %x", size, dst);
words = DIV_ROUND_UP(size, 4); words = DIV_ROUND_UP(size, 4);
blk = dst >> 8; blk = dst >> 8;
gk20a_dbg_info("upload %d words to %x blk %d", nvgpu_log_info(g, "upload %d words to %x blk %d",
words, dst, blk); words, dst, blk);
gk20a_writel(g, pwr_falcon_dmemc_r(port), gk20a_writel(g, pwr_falcon_dmemc_r(port),
pwr_falcon_dmemc_offs_f(dst >> 2) | pwr_falcon_dmemc_offs_f(dst >> 2) |
@@ -79,7 +79,7 @@ static int gp106_bios_devinit(struct gk20a *g)
int devinit_completed; int devinit_completed;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (nvgpu_flcn_reset(g->pmu.flcn)) { if (nvgpu_flcn_reset(g->pmu.flcn)) {
err = -ETIMEDOUT; err = -ETIMEDOUT;
@@ -128,7 +128,7 @@ static int gp106_bios_devinit(struct gk20a *g)
gk20a_get_gr_idle_timeout(g)); gk20a_get_gr_idle_timeout(g));
out: out:
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return err; return err;
} }
@@ -146,7 +146,7 @@ static int gp106_bios_preos(struct gk20a *g)
{ {
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (nvgpu_flcn_reset(g->pmu.flcn)) { if (nvgpu_flcn_reset(g->pmu.flcn)) {
err = -ETIMEDOUT; err = -ETIMEDOUT;
@@ -177,7 +177,7 @@ static int gp106_bios_preos(struct gk20a *g)
gk20a_get_gr_idle_timeout(g)); gk20a_get_gr_idle_timeout(g));
out: out:
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return err; return err;
} }
@@ -186,12 +186,12 @@ int gp106_bios_init(struct gk20a *g)
unsigned int i; unsigned int i;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (g->bios_is_init) if (g->bios_is_init)
return 0; return 0;
gk20a_dbg_info("reading bios from EEPROM"); nvgpu_log_info(g, "reading bios from EEPROM");
g->bios.size = BIOS_SIZE; g->bios.size = BIOS_SIZE;
g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE); g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE);
if (!g->bios.data) if (!g->bios.data)
@@ -218,7 +218,7 @@ int gp106_bios_init(struct gk20a *g)
goto free_firmware; goto free_firmware;
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
err = gp106_bios_devinit(g); err = gp106_bios_devinit(g);
if (err) { if (err) {

View File

@@ -36,9 +36,6 @@
#include <nvgpu/hw/gp106/hw_trim_gp106.h> #include <nvgpu/hw/gp106/hw_trim_gp106.h>
#define gk20a_dbg_clk(fmt, arg...) \
gk20a_dbg(gpu_dbg_clk, fmt, ##arg)
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
static int clk_gp106_debugfs_init(struct gk20a *g); static int clk_gp106_debugfs_init(struct gk20a *g);
#endif #endif
@@ -82,7 +79,7 @@ int gp106_init_clk_support(struct gk20a *g)
struct clk_gk20a *clk = &g->clk; struct clk_gk20a *clk = &g->clk;
u32 err = 0; u32 err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&clk->clk_mutex); err = nvgpu_mutex_init(&clk->clk_mutex);
if (err) if (err)
@@ -374,7 +371,7 @@ static int clk_gp106_debugfs_init(struct gk20a *g)
d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root, d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root,
g, &gpc_cfc_fops); g, &gpc_cfc_fops);
gk20a_dbg(gpu_dbg_info, "g=%p", g); nvgpu_log(g, gpu_dbg_info, "g=%p", g);
for (i = 0; i < g->clk.namemap_num; i++) { for (i = 0; i < g->clk.namemap_num; i++) {
if (g->clk.clk_namemap[i].is_enable) { if (g->clk.clk_namemap[i].is_enable) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -39,7 +39,7 @@ void gp106_fb_reset(struct gk20a *g)
do { do {
u32 w = gk20a_readl(g, fb_niso_scrub_status_r()); u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
if (fb_niso_scrub_status_flag_v(w)) { if (fb_niso_scrub_status_flag_v(w)) {
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
break; break;
} }
nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT); nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);

View File

@@ -58,7 +58,7 @@ bool gr_gp106_is_valid_class(struct gk20a *g, u32 class_num)
default: default:
break; break;
} }
gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid);
return valid; return valid;
} }
@@ -75,7 +75,7 @@ static void gr_gp106_set_go_idle_timeout(struct gk20a *g, u32 data)
int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr, int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data) u32 class_num, u32 offset, u32 data)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (class_num == PASCAL_COMPUTE_B) { if (class_num == PASCAL_COMPUTE_B) {
switch (offset << 2) { switch (offset << 2) {
@@ -177,9 +177,9 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
g->gr.max_tpc_count; g->gr.max_tpc_count;
attrib_cb_size = ALIGN(attrib_cb_size, 128); attrib_cb_size = ALIGN(attrib_cb_size, 128);
gk20a_dbg_info("gfxp context spill_size=%d", spill_size); nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
gk20a_dbg_info("gfxp context attrib_cb_size=%d", nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
attrib_cb_size); attrib_cb_size);
err = gr_gp10b_alloc_buffer(vm, err = gr_gp10b_alloc_buffer(vm,

View File

@@ -765,7 +765,7 @@ int gp106_init_hal(struct gk20a *g)
{ {
struct gpu_ops *gops = &g->ops; struct gpu_ops *gops = &g->ops;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gops->bios = gp106_ops.bios; gops->bios = gp106_ops.bios;
gops->ltc = gp106_ops.ltc; gops->ltc = gp106_ops.ltc;
@@ -828,7 +828,7 @@ int gp106_init_hal(struct gk20a *g)
g->name = "gp10x"; g->name = "gp10x";
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -2998,7 +2998,7 @@ static void mclk_seq_pmucmdhandler(struct gk20a *g, struct pmu_msg *_msg,
struct nv_pmu_seq_msg_run_script *seq_msg; struct nv_pmu_seq_msg_run_script *seq_msg;
u32 msg_status = 0; u32 msg_status = 0;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
if (status != 0) { if (status != 0) {
nvgpu_err(g, "mclk seq_script cmd aborted"); nvgpu_err(g, "mclk seq_script cmd aborted");
@@ -3041,7 +3041,7 @@ static int mclk_get_memclk_table(struct gk20a *g)
u8 *mem_entry_ptr = NULL; u8 *mem_entry_ptr = NULL;
int index; int index;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
mem_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, mem_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
g->bios.perf_token, g->bios.perf_token,
@@ -3213,7 +3213,7 @@ int gp106_mclk_init(struct gk20a *g)
u32 index; u32 index;
struct memory_config *m; struct memory_config *m;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
mclk = &g->clk_pmu.clk_mclk; mclk = &g->clk_pmu.clk_mclk;
@@ -3316,7 +3316,7 @@ int gp106_mclk_change(struct gk20a *g, u16 val)
#endif #endif
u32 speed; u32 speed;
gk20a_dbg_info(""); nvgpu_log_info(g, " ");
memset(&payload, 0, sizeof(struct pmu_payload)); memset(&payload, 0, sizeof(struct pmu_payload));
@@ -3508,7 +3508,7 @@ static int mclk_debugfs_init(struct gk20a *g)
struct dentry *gpu_root = l->debugfs; struct dentry *gpu_root = l->debugfs;
struct dentry *d; struct dentry *d;
gk20a_dbg(gpu_dbg_info, "g=%p", g); nvgpu_log(g, gpu_dbg_info, "g=%p", g);
d = debugfs_create_file( d = debugfs_create_file(
"mclk_speed_set", "mclk_speed_set",

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -98,14 +98,14 @@ u32 gp106_pmu_pg_engines_list(struct gk20a *g)
static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status) void *param, u32 handle, u32 status)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (status != 0) { if (status != 0) {
nvgpu_err(g, "PG PARAM cmd aborted"); nvgpu_err(g, "PG PARAM cmd aborted");
return; return;
} }
gp106_dbg_pmu("PG PARAM is acknowledged from PMU %x", gp106_dbg_pmu(g, "PG PARAM is acknowledged from PMU %x",
msg->msg.pg.msg_type); msg->msg.pg.msg_type);
} }
@@ -135,7 +135,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
cmd.cmd.pg.gr_init_param.featuremask = cmd.cmd.pg.gr_init_param.featuremask =
NVGPU_PMU_GR_FEATURE_MASK_RPPG; NVGPU_PMU_GR_FEATURE_MASK_RPPG;
gp106_dbg_pmu("cmd post GR PMU_PG_CMD_ID_PG_PARAM"); gp106_dbg_pmu(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_param_msg, pmu, &seq, ~0); pmu_handle_param_msg, pmu, &seq, ~0);
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
@@ -152,7 +152,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
NVGPU_PMU_MS_FEATURE_MASK_RPPG | NVGPU_PMU_MS_FEATURE_MASK_RPPG |
NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING; NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING;
gp106_dbg_pmu("cmd post MS PMU_PG_CMD_ID_PG_PARAM"); gp106_dbg_pmu(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_param_msg, pmu, &seq, ~0); pmu_handle_param_msg, pmu, &seq, ~0);
} }
@@ -240,9 +240,9 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
struct pmu_cmd cmd; struct pmu_cmd cmd;
u32 seq; u32 seq;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gp106_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); gp106_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
if (g->pmu_lsf_pmu_wpr_init_done) { if (g->pmu_lsf_pmu_wpr_init_done) {
/* send message to load FECS falcon */ /* send message to load FECS falcon */
memset(&cmd, 0, sizeof(struct pmu_cmd)); memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -258,13 +258,13 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0; cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0; cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0;
gp106_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", gp106_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
falconidmask); falconidmask);
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,8 +23,8 @@
#ifndef __PMU_GP106_H_ #ifndef __PMU_GP106_H_
#define __PMU_GP106_H_ #define __PMU_GP106_H_
#define gp106_dbg_pmu(fmt, arg...) \ #define gp106_dbg_pmu(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
struct gk20a; struct gk20a;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -31,8 +31,8 @@
#include <nvgpu/hw/gp106/hw_psec_gp106.h> #include <nvgpu/hw/gp106/hw_psec_gp106.h>
/*Defines*/ /*Defines*/
#define gm20b_dbg_pmu(fmt, arg...) \ #define gm20b_dbg_pmu(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout) int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout)
{ {
@@ -56,7 +56,7 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
} }
g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r()); g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r());
gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities);
data = gk20a_readl(g, psec_falcon_mailbox0_r()); data = gk20a_readl(g, psec_falcon_mailbox0_r());
if (data) { if (data) {
nvgpu_err(g, "ACR boot failed, err %x", data); nvgpu_err(g, "ACR boot failed, err %x", data);
@@ -87,7 +87,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
u32 data = 0; u32 data = 0;
u32 dst; u32 dst;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* SEC2 Config */ /* SEC2 Config */
gk20a_writel(g, psec_falcon_itfen_r(), gk20a_writel(g, psec_falcon_itfen_r(),
@@ -123,7 +123,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
(u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
pmu_bl_gm10x_desc->bl_start_tag); pmu_bl_gm10x_desc->bl_start_tag);
gm20b_dbg_pmu("Before starting falcon with BL\n"); gm20b_dbg_pmu(g, "Before starting falcon with BL\n");
gk20a_writel(g, psec_falcon_mailbox0_r(), 0xDEADA5A5); gk20a_writel(g, psec_falcon_mailbox0_r(), 0xDEADA5A5);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -97,7 +97,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g)
u32 active_engine_id = 0; u32 active_engine_id = 0;
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
for (engine_id = 0; engine_id < f->num_engines; engine_id++) { for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
active_engine_id = f->active_engines_list[engine_id]; active_engine_id = f->active_engines_list[engine_id];
@@ -124,7 +124,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g)
idle_filter &= ~therm_hubmmu_idle_filter_value_m(); idle_filter &= ~therm_hubmmu_idle_filter_value_m();
gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -204,19 +204,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
int attempts = 10, err_status = 0; int attempts = 10, err_status = 0;
g->ops.xve.get_speed(g, &current_link_speed); g->ops.xve.get_speed(g, &current_link_speed);
xv_sc_dbg(PRE_CHANGE, "Executing PCIe link change."); xv_sc_dbg(g, PRE_CHANGE, "Executing PCIe link change.");
xv_sc_dbg(PRE_CHANGE, " Current speed: %s", xv_sc_dbg(g, PRE_CHANGE, " Current speed: %s",
xve_speed_to_str(current_link_speed)); xve_speed_to_str(current_link_speed));
xv_sc_dbg(PRE_CHANGE, " Next speed: %s", xv_sc_dbg(g, PRE_CHANGE, " Next speed: %s",
xve_speed_to_str(next_link_speed)); xve_speed_to_str(next_link_speed));
xv_sc_dbg(PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x", xv_sc_dbg(g, PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x",
gk20a_readl(g, xp_pl_link_config_r(0))); gk20a_readl(g, xp_pl_link_config_r(0)));
xv_sc_dbg(DISABLE_ASPM, "Disabling ASPM..."); xv_sc_dbg(g, DISABLE_ASPM, "Disabling ASPM...");
disable_aspm_gp106(g); disable_aspm_gp106(g);
xv_sc_dbg(DISABLE_ASPM, " Done!"); xv_sc_dbg(g, DISABLE_ASPM, " Done!");
xv_sc_dbg(DL_SAFE_MODE, "Putting DL in safe mode..."); xv_sc_dbg(g, DL_SAFE_MODE, "Putting DL in safe mode...");
saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0)); saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0));
/* /*
@@ -225,12 +225,12 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
dl_mgr = saved_dl_mgr; dl_mgr = saved_dl_mgr;
dl_mgr |= xp_dl_mgr_safe_timing_f(1); dl_mgr |= xp_dl_mgr_safe_timing_f(1);
gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr); gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr);
xv_sc_dbg(DL_SAFE_MODE, " Done!"); xv_sc_dbg(g, DL_SAFE_MODE, " Done!");
nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
xv_sc_dbg(CHECK_LINK, "Checking for link idle..."); xv_sc_dbg(g, CHECK_LINK, "Checking for link idle...");
do { do {
pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
if ((xp_pl_link_config_ltssm_status_f(pl_link_config) == if ((xp_pl_link_config_ltssm_status_f(pl_link_config) ==
@@ -245,9 +245,9 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
goto done; goto done;
} }
xv_sc_dbg(CHECK_LINK, " Done"); xv_sc_dbg(g, CHECK_LINK, " Done");
xv_sc_dbg(LINK_SETTINGS, "Preparing next link settings"); xv_sc_dbg(g, LINK_SETTINGS, "Preparing next link settings");
pl_link_config &= ~xp_pl_link_config_max_link_rate_m(); pl_link_config &= ~xp_pl_link_config_max_link_rate_m();
switch (next_link_speed) { switch (next_link_speed) {
case GPU_XVE_SPEED_2P5: case GPU_XVE_SPEED_2P5:
@@ -297,10 +297,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
else else
BUG(); BUG();
xv_sc_dbg(LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config); xv_sc_dbg(g, LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config);
xv_sc_dbg(LINK_SETTINGS, " Done"); xv_sc_dbg(g, LINK_SETTINGS, " Done");
xv_sc_dbg(EXEC_CHANGE, "Running link speed change..."); xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change...");
nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
@@ -316,7 +316,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
goto done; goto done;
} }
xv_sc_dbg(EXEC_CHANGE, " Wrote PL_LINK_CONFIG."); xv_sc_dbg(g, EXEC_CHANGE, " Wrote PL_LINK_CONFIG.");
pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
@@ -326,7 +326,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
xp_pl_link_config_ltssm_directive_f( xp_pl_link_config_ltssm_directive_f(
xp_pl_link_config_ltssm_directive_change_speed_v())); xp_pl_link_config_ltssm_directive_change_speed_v()));
xv_sc_dbg(EXEC_CHANGE, " Executing change (0x%08x)!", xv_sc_dbg(g, EXEC_CHANGE, " Executing change (0x%08x)!",
pl_link_config); pl_link_config);
gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config); gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
@@ -348,11 +348,11 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
if (nvgpu_timeout_peek_expired(&timeout)) { if (nvgpu_timeout_peek_expired(&timeout)) {
err_status = -ETIMEDOUT; err_status = -ETIMEDOUT;
xv_sc_dbg(EXEC_CHANGE, " timeout; pl_link_config = 0x%x", xv_sc_dbg(g, EXEC_CHANGE, " timeout; pl_link_config = 0x%x",
pl_link_config); pl_link_config);
} }
xv_sc_dbg(EXEC_CHANGE, " Change done... Checking status"); xv_sc_dbg(g, EXEC_CHANGE, " Change done... Checking status");
if (pl_link_config == 0xffffffff) { if (pl_link_config == 0xffffffff) {
WARN(1, "GPU fell of PCI bus!?"); WARN(1, "GPU fell of PCI bus!?");
@@ -366,19 +366,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
link_control_status = link_control_status =
g->ops.xve.xve_readl(g, xve_link_control_status_r()); g->ops.xve.xve_readl(g, xve_link_control_status_r());
xv_sc_dbg(EXEC_CHANGE, " target %d vs current %d", xv_sc_dbg(g, EXEC_CHANGE, " target %d vs current %d",
link_speed_setting, link_speed_setting,
xve_link_control_status_link_speed_v(link_control_status)); xve_link_control_status_link_speed_v(link_control_status));
if (err_status == -ETIMEDOUT) { if (err_status == -ETIMEDOUT) {
xv_sc_dbg(EXEC_CHANGE, " Oops timed out?"); xv_sc_dbg(g, EXEC_CHANGE, " Oops timed out?");
break; break;
} }
} while (attempts-- > 0 && } while (attempts-- > 0 &&
link_speed_setting != link_speed_setting !=
xve_link_control_status_link_speed_v(link_control_status)); xve_link_control_status_link_speed_v(link_control_status));
xv_sc_dbg(EXEC_VERIF, "Verifying speed change..."); xv_sc_dbg(g, EXEC_VERIF, "Verifying speed change...");
/* /*
* Check that the new link speed is actually active. If we failed to * Check that the new link speed is actually active. If we failed to
@@ -390,10 +390,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
if (link_speed_setting != new_link_speed) { if (link_speed_setting != new_link_speed) {
u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0)); u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0));
xv_sc_dbg(EXEC_VERIF, " Current and target speeds mismatch!"); xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds mismatch!");
xv_sc_dbg(EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x", xv_sc_dbg(g, EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x",
g->ops.xve.xve_readl(g, xve_link_control_status_r())); g->ops.xve.xve_readl(g, xve_link_control_status_r()));
xv_sc_dbg(EXEC_VERIF, " Link speed is %s - should be %s", xv_sc_dbg(g, EXEC_VERIF, " Link speed is %s - should be %s",
xve_speed_to_str(new_link_speed), xve_speed_to_str(new_link_speed),
xve_speed_to_str(link_speed_setting)); xve_speed_to_str(link_speed_setting));
@@ -417,19 +417,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
gk20a_writel(g, xp_pl_link_config_r(0), link_config); gk20a_writel(g, xp_pl_link_config_r(0), link_config);
err_status = -ENODEV; err_status = -ENODEV;
} else { } else {
xv_sc_dbg(EXEC_VERIF, " Current and target speeds match!"); xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds match!");
err_status = 0; err_status = 0;
} }
done: done:
/* Restore safe timings. */ /* Restore safe timings. */
xv_sc_dbg(CLEANUP, "Restoring saved DL settings..."); xv_sc_dbg(g, CLEANUP, "Restoring saved DL settings...");
gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr); gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr);
xv_sc_dbg(CLEANUP, " Done"); xv_sc_dbg(g, CLEANUP, " Done");
xv_sc_dbg(CLEANUP, "Re-enabling ASPM settings..."); xv_sc_dbg(g, CLEANUP, "Re-enabling ASPM settings...");
enable_aspm_gp106(g); enable_aspm_gp106(g);
xv_sc_dbg(CLEANUP, " Done"); xv_sc_dbg(g, CLEANUP, " Done");
return err_status; return err_status;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -49,11 +49,11 @@ enum xv_speed_change_steps {
CLEANUP CLEANUP
}; };
#define xv_dbg(fmt, args...) \ #define xv_dbg(g, fmt, args...) \
gk20a_dbg(gpu_dbg_xv, fmt, ##args) nvgpu_log(g, gpu_dbg_xv, fmt, ##args)
#define xv_sc_dbg(step, fmt, args...) \ #define xv_sc_dbg(g, step, fmt, args...) \
xv_dbg("[%d] %15s | " fmt, step, __stringify(step), ##args) xv_dbg(g, "[%d] %15s | " fmt, step, __stringify(step), ##args)
void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val); void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val);
u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg); u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg);

View File

@@ -1,7 +1,7 @@
/* /*
* Pascal GPU series Copy Engine. * Pascal GPU series Copy Engine.
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -30,14 +30,14 @@
static u32 ce_blockpipe_isr(struct gk20a *g, u32 fifo_intr) static u32 ce_blockpipe_isr(struct gk20a *g, u32 fifo_intr)
{ {
gk20a_dbg(gpu_dbg_intr, "ce blocking pipe interrupt\n"); nvgpu_log(g, gpu_dbg_intr, "ce blocking pipe interrupt\n");
return ce_intr_status_blockpipe_pending_f(); return ce_intr_status_blockpipe_pending_f();
} }
static u32 ce_launcherr_isr(struct gk20a *g, u32 fifo_intr) static u32 ce_launcherr_isr(struct gk20a *g, u32 fifo_intr)
{ {
gk20a_dbg(gpu_dbg_intr, "ce launch error interrupt\n"); nvgpu_log(g, gpu_dbg_intr, "ce launch error interrupt\n");
return ce_intr_status_launcherr_pending_f(); return ce_intr_status_launcherr_pending_f();
} }
@@ -47,7 +47,7 @@ void gp10b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id));
u32 clear_intr = 0; u32 clear_intr = 0;
gk20a_dbg(gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); nvgpu_log(g, gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id);
/* clear blocking interrupts: they exibit broken behavior */ /* clear blocking interrupts: they exibit broken behavior */
if (ce_intr & ce_intr_status_blockpipe_pending_f()) if (ce_intr & ce_intr_status_blockpipe_pending_f())
@@ -65,7 +65,7 @@ int gp10b_ce_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
int ops = 0; int ops = 0;
u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id));
gk20a_dbg(gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); nvgpu_log(g, gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id);
if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) { if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) {
gk20a_writel(g, ce_intr_status_r(inst_id), gk20a_writel(g, ce_intr_status_r(inst_id),

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B GPU FECS traces * GP10B GPU FECS traces
* *
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@ int gp10b_fecs_trace_flush(struct gk20a *g)
}; };
int err; int err;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
err = gr_gk20a_elpg_protected_call(g, err = gr_gk20a_elpg_protected_call(g,
gr_gk20a_submit_fecs_method_op(g, op, false)); gr_gk20a_submit_fecs_method_op(g, op, false));

View File

@@ -43,7 +43,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g,
{ {
u32 val; u32 val;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
val = nvgpu_mem_rd32(g, mem, val = nvgpu_mem_rd32(g, mem,
ram_in_page_dir_base_fault_replay_tex_w()); ram_in_page_dir_base_fault_replay_tex_w());
@@ -59,7 +59,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g,
nvgpu_mem_wr32(g, mem, nvgpu_mem_wr32(g, mem,
ram_in_page_dir_base_fault_replay_gcc_w(), val); ram_in_page_dir_base_fault_replay_gcc_w(), val);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
int channel_gp10b_commit_userd(struct channel_gk20a *c) int channel_gp10b_commit_userd(struct channel_gk20a *c)
@@ -68,12 +68,12 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c)
u32 addr_hi; u32 addr_hi;
struct gk20a *g = c->g; struct gk20a *g = c->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
addr_hi = u64_hi32(c->userd_iova); addr_hi = u64_hi32(c->userd_iova);
gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx",
c->chid, (u64)c->userd_iova); c->chid, (u64)c->userd_iova);
nvgpu_mem_wr32(g, &c->inst_block, nvgpu_mem_wr32(g, &c->inst_block,
@@ -98,7 +98,7 @@ int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
struct gk20a *g = c->g; struct gk20a *g = c->g;
struct nvgpu_mem *mem = &c->inst_block; struct nvgpu_mem *mem = &c->inst_block;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
@@ -167,8 +167,9 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
{ {
u32 new_syncpt = 0, old_syncpt; u32 new_syncpt = 0, old_syncpt;
u32 v; u32 v;
struct gk20a *g = c->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
v = nvgpu_mem_rd32(c->g, &c->inst_block, v = nvgpu_mem_rd32(c->g, &c->inst_block,
ram_fc_allowed_syncpoints_w()); ram_fc_allowed_syncpoints_w());
@@ -185,7 +186,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
v = pbdma_allowed_syncpoints_0_valid_f(1); v = pbdma_allowed_syncpoints_0_valid_f(1);
gk20a_dbg_info("Channel %d, syncpt id %d\n", nvgpu_log_info(g, "Channel %d, syncpt id %d\n",
c->chid, new_syncpt); c->chid, new_syncpt);
v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt);
@@ -197,7 +198,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
/* enable channel */ /* enable channel */
gk20a_enable_channel_tsg(c->g, c); gk20a_enable_channel_tsg(c->g, c);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -207,7 +208,7 @@ int gp10b_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
{ {
int ret = ENGINE_INVAL_GK20A; int ret = ENGINE_INVAL_GK20A;
gk20a_dbg_info("engine type %d", engine_type); nvgpu_log_info(g, "engine type %d", engine_type);
if (engine_type == top_device_info_type_enum_graphics_v()) if (engine_type == top_device_info_type_enum_graphics_v())
ret = ENGINE_GR_GK20A; ret = ENGINE_GR_GK20A;
else if (engine_type == top_device_info_type_enum_lce_v()) { else if (engine_type == top_device_info_type_enum_lce_v()) {
@@ -229,13 +230,13 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
*pri_base = *pri_base =
(top_device_info_data_pri_base_v(table_entry) (top_device_info_data_pri_base_v(table_entry)
<< top_device_info_data_pri_base_align_v()); << top_device_info_data_pri_base_align_v());
gk20a_dbg_info("device info: pri_base: %d", *pri_base); nvgpu_log_info(g, "device info: pri_base: %d", *pri_base);
} }
if (fault_id && (top_device_info_data_fault_id_v(table_entry) == if (fault_id && (top_device_info_data_fault_id_v(table_entry) ==
top_device_info_data_fault_id_valid_v())) { top_device_info_data_fault_id_valid_v())) {
*fault_id = *fault_id =
g->ops.fifo.device_info_fault_id(table_entry); g->ops.fifo.device_info_fault_id(table_entry);
gk20a_dbg_info("device info: fault_id: %d", *fault_id); nvgpu_log_info(g, "device info: fault_id: %d", *fault_id);
} }
} else } else
nvgpu_err(g, "unknown device_info_data %d", nvgpu_err(g, "unknown device_info_data %d",
@@ -293,7 +294,7 @@ void gp10b_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
u32 fault_info; u32 fault_info;
u32 addr_lo, addr_hi; u32 addr_lo, addr_hi;
gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id);
memset(mmfault, 0, sizeof(*mmfault)); memset(mmfault, 0, sizeof(*mmfault));

View File

@@ -69,7 +69,7 @@ bool gr_gp10b_is_valid_class(struct gk20a *g, u32 class_num)
default: default:
break; break;
} }
gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid);
return valid; return valid;
} }
@@ -169,7 +169,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset,
0); 0);
if (lrf_ecc_sed_status) { if (lrf_ecc_sed_status) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
"Single bit error detected in SM LRF!"); "Single bit error detected in SM LRF!");
gr_gp10b_sm_lrf_ecc_overcount_war(1, gr_gp10b_sm_lrf_ecc_overcount_war(1,
@@ -181,7 +181,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
lrf_single_count_delta; lrf_single_count_delta;
} }
if (lrf_ecc_ded_status) { if (lrf_ecc_ded_status) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
"Double bit error detected in SM LRF!"); "Double bit error detected in SM LRF!");
gr_gp10b_sm_lrf_ecc_overcount_war(0, gr_gp10b_sm_lrf_ecc_overcount_war(0,
@@ -208,7 +208,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) { gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) {
u32 ecc_stats_reg_val; u32 ecc_stats_reg_val;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
"Single bit error detected in SM SHM!"); "Single bit error detected in SM SHM!");
ecc_stats_reg_val = ecc_stats_reg_val =
@@ -230,7 +230,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) { gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) {
u32 ecc_stats_reg_val; u32 ecc_stats_reg_val;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
"Double bit error detected in SM SHM!"); "Double bit error detected in SM SHM!");
ecc_stats_reg_val = ecc_stats_reg_val =
@@ -260,14 +260,14 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
u32 esr; u32 esr;
u32 ecc_stats_reg_val; u32 ecc_stats_reg_val;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
esr = gk20a_readl(g, esr = gk20a_readl(g,
gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); gr_gpc0_tpc0_tex_m_hww_esr_r() + offset);
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr);
if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) { if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
"Single bit error detected in TEX!"); "Single bit error detected in TEX!");
/* Pipe 0 counters */ /* Pipe 0 counters */
@@ -323,7 +323,7 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f()); gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f());
} }
if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) { if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
"Double bit error detected in TEX!"); "Double bit error detected in TEX!");
/* Pipe 0 counters */ /* Pipe 0 counters */
@@ -403,7 +403,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c); tsg = tsg_gk20a_from_ch(c);
if (!tsg) if (!tsg)
@@ -660,21 +660,21 @@ static void gr_gp10b_set_coalesce_buffer_size(struct gk20a *g, u32 data)
{ {
u32 val; u32 val;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); val = gk20a_readl(g, gr_gpcs_tc_debug0_r());
val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(),
gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data));
gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); gk20a_writel(g, gr_gpcs_tc_debug0_r(), val);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data) void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data)
{ {
u32 val; u32 val;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
val = gk20a_readl(g, gr_bes_crop_debug3_r()); val = gk20a_readl(g, gr_bes_crop_debug3_r());
if ((data & 1)) { if ((data & 1)) {
@@ -722,7 +722,7 @@ void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data)
int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr, int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data) u32 class_num, u32 offset, u32 data)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (class_num == PASCAL_COMPUTE_A) { if (class_num == PASCAL_COMPUTE_A) {
switch (offset << 2) { switch (offset << 2) {
@@ -800,7 +800,7 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (alpha_cb_size > gr->alpha_cb_size) if (alpha_cb_size > gr->alpha_cb_size)
alpha_cb_size = gr->alpha_cb_size; alpha_cb_size = gr->alpha_cb_size;
@@ -853,7 +853,7 @@ void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data)
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (cb_size_steady > gr->attrib_cb_size) if (cb_size_steady > gr->attrib_cb_size)
cb_size_steady = gr->attrib_cb_size; cb_size_steady = gr->attrib_cb_size;
@@ -923,7 +923,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
}; };
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = gr_gk20a_init_ctx_state(g); err = gr_gk20a_init_ctx_state(g);
if (err) if (err)
@@ -940,10 +940,10 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
} }
} }
gk20a_dbg_info("preempt image size: %u", nvgpu_log_info(g, "preempt image size: %u",
g->gr.ctx_vars.preempt_image_size); g->gr.ctx_vars.preempt_image_size);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -952,8 +952,9 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
struct nvgpu_mem *mem) struct nvgpu_mem *mem)
{ {
int err; int err;
struct gk20a *g = gk20a_from_vm(vm);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
if (err) if (err)
@@ -1029,9 +1030,9 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
g->gr.max_tpc_count; g->gr.max_tpc_count;
attrib_cb_size = ALIGN(attrib_cb_size, 128); attrib_cb_size = ALIGN(attrib_cb_size, 128);
gk20a_dbg_info("gfxp context spill_size=%d", spill_size); nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
gk20a_dbg_info("gfxp context attrib_cb_size=%d", nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
attrib_cb_size); attrib_cb_size);
err = gr_gp10b_alloc_buffer(vm, err = gr_gp10b_alloc_buffer(vm,
@@ -1112,7 +1113,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
u32 graphics_preempt_mode = 0; u32 graphics_preempt_mode = 0;
u32 compute_preempt_mode = 0; u32 compute_preempt_mode = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
if (err) if (err)
@@ -1137,7 +1138,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
goto fail_free_gk20a_ctx; goto fail_free_gk20a_ctx;
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
@@ -1215,7 +1216,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); ctxsw_prog_main_image_compute_preemption_options_control_cta_f();
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c); tsg = tsg_gk20a_from_ch(c);
if (!tsg) if (!tsg)
@@ -1224,21 +1225,21 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
gr_ctx = &tsg->gr_ctx; gr_ctx = &tsg->gr_ctx;
if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) {
gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option);
nvgpu_mem_wr(g, mem, nvgpu_mem_wr(g, mem,
ctxsw_prog_main_image_graphics_preemption_options_o(), ctxsw_prog_main_image_graphics_preemption_options_o(),
gfxp_preempt_option); gfxp_preempt_option);
} }
if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
gk20a_dbg_info("CILP: %x", cilp_preempt_option); nvgpu_log_info(g, "CILP: %x", cilp_preempt_option);
nvgpu_mem_wr(g, mem, nvgpu_mem_wr(g, mem,
ctxsw_prog_main_image_compute_preemption_options_o(), ctxsw_prog_main_image_compute_preemption_options_o(),
cilp_preempt_option); cilp_preempt_option);
} }
if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
gk20a_dbg_info("CTA: %x", cta_preempt_option); nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
nvgpu_mem_wr(g, mem, nvgpu_mem_wr(g, mem,
ctxsw_prog_main_image_compute_preemption_options_o(), ctxsw_prog_main_image_compute_preemption_options_o(),
cta_preempt_option); cta_preempt_option);
@@ -1269,7 +1270,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
(u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) <<
(32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
gk20a_dbg_info("attrib cb addr : 0x%016x", addr); nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr);
g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true);
addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >>
@@ -1315,7 +1316,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
} }
out: out:
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
} }
int gr_gp10b_dump_gr_status_regs(struct gk20a *g, int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
@@ -1475,7 +1476,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
u32 activity0, activity1, activity2, activity4; u32 activity0, activity1, activity2, activity4;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER);
@@ -1500,7 +1501,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
gr_activity_empty_or_preempted(activity4)); gr_activity_empty_or_preempted(activity4));
if (!gr_enabled || (!gr_busy && !ctxsw_active)) { if (!gr_enabled || (!gr_busy && !ctxsw_active)) {
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -1569,7 +1570,7 @@ void gr_gp10b_commit_global_bundle_cb(struct gk20a *g,
data = min_t(u32, data, g->gr.min_gpm_fifo_depth); data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d",
g->gr.bundle_cb_token_limit, data); g->gr.bundle_cb_token_limit, data);
gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(), gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(),
@@ -1626,7 +1627,7 @@ int gr_gp10b_init_fs_state(struct gk20a *g)
{ {
u32 data; u32 data;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r());
data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(),
@@ -1705,7 +1706,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
{ {
int ret = 0; int ret = 0;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
ret = gk20a_disable_channel_tsg(g, fault_ch); ret = gk20a_disable_channel_tsg(g, fault_ch);
if (ret) { if (ret) {
@@ -1721,18 +1722,18 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
return ret; return ret;
} }
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist");
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: tsgid: 0x%x", fault_ch->tsgid); "CILP: tsgid: 0x%x", fault_ch->tsgid);
if (gk20a_is_channel_marked_as_tsg(fault_ch)) { if (gk20a_is_channel_marked_as_tsg(fault_ch)) {
gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true); gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: preempted tsg"); "CILP: preempted tsg");
} else { } else {
gk20a_fifo_issue_preempt(g, fault_ch->chid, false); gk20a_fifo_issue_preempt(g, fault_ch->chid, false);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: preempted channel"); "CILP: preempted channel");
} }
@@ -1746,7 +1747,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
tsg = tsg_gk20a_from_ch(fault_ch); tsg = tsg_gk20a_from_ch(fault_ch);
if (!tsg) if (!tsg)
@@ -1755,7 +1756,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
gr_ctx = &tsg->gr_ctx; gr_ctx = &tsg->gr_ctx;
if (gr_ctx->cilp_preempt_pending) { if (gr_ctx->cilp_preempt_pending) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP is already pending for chid %d", "CILP is already pending for chid %d",
fault_ch->chid); fault_ch->chid);
return 0; return 0;
@@ -1763,7 +1764,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
/* get ctx_id from the ucode image */ /* get ctx_id from the ucode image */
if (!gr_ctx->ctx_id_valid) { if (!gr_ctx->ctx_id_valid) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: looking up ctx id"); "CILP: looking up ctx id");
ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id); ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id);
if (ret) { if (ret) {
@@ -1773,7 +1774,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
gr_ctx->ctx_id_valid = true; gr_ctx->ctx_id_valid = true;
} }
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: ctx id is 0x%x", gr_ctx->ctx_id); "CILP: ctx id is 0x%x", gr_ctx->ctx_id);
/* send ucode method to set ctxsw interrupt */ /* send ucode method to set ctxsw interrupt */
@@ -1795,10 +1796,10 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
return ret; return ret;
} }
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: enabled ctxsw completion interrupt"); "CILP: enabled ctxsw completion interrupt");
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: disabling channel %d", "CILP: disabling channel %d",
fault_ch->chid); fault_ch->chid);
@@ -1826,7 +1827,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
tsg = tsg_gk20a_from_ch(fault_ch); tsg = tsg_gk20a_from_ch(fault_ch);
if (!tsg) if (!tsg)
@@ -1837,7 +1838,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
/* The ucode is self-clearing, so all we need to do here is /* The ucode is self-clearing, so all we need to do here is
to clear cilp_preempt_pending. */ to clear cilp_preempt_pending. */
if (!gr_ctx->cilp_preempt_pending) { if (!gr_ctx->cilp_preempt_pending) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP is already cleared for chid %d\n", "CILP is already cleared for chid %d\n",
fault_ch->chid); fault_ch->chid);
return 0; return 0;
@@ -1878,7 +1879,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
NVGPU_PREEMPTION_MODE_COMPUTE_CILP); NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
} }
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n",
gpc, tpc, global_esr); gpc, tpc, global_esr);
if (cilp_enabled && sm_debugger_attached) { if (cilp_enabled && sm_debugger_attached) {
@@ -1900,19 +1901,19 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
if (warp_esr != 0 || (global_esr & global_mask) != 0) { if (warp_esr != 0 || (global_esr & global_mask) != 0) {
*ignore_debugger = true; *ignore_debugger = true;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
"CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n", "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n",
gpc, tpc); gpc, tpc);
if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
"CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n", "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n",
gpc, tpc); gpc, tpc);
g->ops.gr.suspend_all_sms(g, global_mask, false); g->ops.gr.suspend_all_sms(g, global_mask, false);
gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch);
} else { } else {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
"CILP: STOP_TRIGGER from gpc %d tpc %d\n", "CILP: STOP_TRIGGER from gpc %d tpc %d\n",
gpc, tpc); gpc, tpc);
g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true); g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true);
@@ -1923,11 +1924,11 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
gpc, tpc, sm); gpc, tpc, sm);
g->ops.gr.clear_sm_hww(g, g->ops.gr.clear_sm_hww(g,
gpc, tpc, sm, global_esr_copy); gpc, tpc, sm, global_esr_copy);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
"CILP: HWWs cleared for gpc %d tpc %d\n", "CILP: HWWs cleared for gpc %d tpc %d\n",
gpc, tpc); gpc, tpc);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
if (ret) { if (ret) {
nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
@@ -1936,7 +1937,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset);
if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
"CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n",
gpc, tpc); gpc, tpc);
dbgr_control0 = set_field(dbgr_control0, dbgr_control0 = set_field(dbgr_control0,
@@ -1945,13 +1946,13 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0);
} }
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
"CILP: resume for gpc %d tpc %d\n", "CILP: resume for gpc %d tpc %d\n",
gpc, tpc); gpc, tpc);
g->ops.gr.resume_single_sm(g, gpc, tpc, sm); g->ops.gr.resume_single_sm(g, gpc, tpc, sm);
*ignore_debugger = true; *ignore_debugger = true;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc);
} }
*early_exit = true; *early_exit = true;
@@ -1999,14 +2000,14 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
int ret = 0; int ret = 0;
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
/* /*
* INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR) * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR)
* indicates that a CILP ctxsw save has finished * indicates that a CILP ctxsw save has finished
*/ */
if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) { if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: ctxsw save completed!\n"); "CILP: ctxsw save completed!\n");
/* now clear the interrupt */ /* now clear the interrupt */
@@ -2162,7 +2163,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP preempt pending, waiting %lu msecs for preemption", "CILP preempt pending, waiting %lu msecs for preemption",
gk20a_get_gr_idle_timeout(g)); gk20a_get_gr_idle_timeout(g));
@@ -2285,7 +2286,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
if (g->ops.gr.set_ctxsw_preemption_mode) { if (g->ops.gr.set_ctxsw_preemption_mode) {
gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " nvgpu_log(g, gpu_dbg_sched, "chid=%d tsgid=%d pid=%d "
"graphics_preempt=%d compute_preempt=%d", "graphics_preempt=%d compute_preempt=%d",
ch->chid, ch->chid,
ch->tsgid, ch->tsgid,

View File

@@ -41,7 +41,7 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g)
u32 tmp; u32 tmp;
int ret; int ret;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r()); tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r());
@@ -49,9 +49,9 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g)
ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 * ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 *
ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp); ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp);
gk20a_dbg(gpu_dbg_info, "L2 size: %d\n", ret); nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return ret; return ret;
} }
@@ -83,7 +83,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (max_comptag_lines == 0U) if (max_comptag_lines == 0U)
return 0; return 0;
@@ -109,11 +109,11 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
/* must be a multiple of 64KB */ /* must be a multiple of 64KB */
compbit_backing_size = roundup(compbit_backing_size, 64*1024); compbit_backing_size = roundup(compbit_backing_size, 64*1024);
gk20a_dbg_info("compbit backing store size : %d", nvgpu_log_info(g, "compbit backing store size : %d",
compbit_backing_size); compbit_backing_size);
gk20a_dbg_info("max comptag lines : %d", nvgpu_log_info(g, "max comptag lines : %d",
max_comptag_lines); max_comptag_lines);
gk20a_dbg_info("gobs_per_comptagline_per_slice: %d", nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d",
gobs_per_comptagline_per_slice); gobs_per_comptagline_per_slice);
err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);

View File

@@ -87,7 +87,7 @@ void mc_gp10b_isr_stall(struct gk20a *g)
mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
gk20a_dbg(gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); nvgpu_log(g, gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0);
for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
active_engine_id = g->fifo.active_engines_list[engine_id_idx]; active_engine_id = g->fifo.active_engines_list[engine_id_idx];
@@ -126,7 +126,7 @@ void mc_gp10b_isr_stall(struct gk20a *g)
g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0))
g->ops.nvlink.isr(g); g->ops.nvlink.isr(g);
gk20a_dbg(gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0);
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B MMU * GP10B MMU
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,7 +53,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g)
struct nvgpu_mem *inst_block = &mm->bar1.inst_block; struct nvgpu_mem *inst_block = &mm->bar1.inst_block;
int err = 0; int err = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g->ops.fb.set_mmu_page_size(g); g->ops.fb.set_mmu_page_size(g);
@@ -73,7 +73,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g)
err = gp10b_replayable_pagefault_buffer_init(g); err = gp10b_replayable_pagefault_buffer_init(g);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return err; return err;
} }
@@ -87,7 +87,7 @@ int gp10b_init_bar2_vm(struct gk20a *g)
/* BAR2 aperture size is 32MB */ /* BAR2 aperture size is 32MB */
mm->bar2.aperture_size = 32 << 20; mm->bar2.aperture_size = 32 << 20;
gk20a_dbg_info("bar2 vm size = 0x%x", mm->bar2.aperture_size); nvgpu_log_info(g, "bar2 vm size = 0x%x", mm->bar2.aperture_size);
mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
mm->bar2.aperture_size - SZ_4K, mm->bar2.aperture_size - SZ_4K,
@@ -115,12 +115,12 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g)
struct nvgpu_mem *inst_block = &mm->bar2.inst_block; struct nvgpu_mem *inst_block = &mm->bar2.inst_block;
u64 inst_pa = nvgpu_inst_block_addr(g, inst_block); u64 inst_pa = nvgpu_inst_block_addr(g, inst_block);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
g->ops.fb.set_mmu_page_size(g); g->ops.fb.set_mmu_page_size(g);
inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v()); inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v());
gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa); nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", (u32)inst_pa);
gk20a_writel(g, bus_bar2_block_r(), gk20a_writel(g, bus_bar2_block_r(),
nvgpu_aperture_mask(g, inst_block, nvgpu_aperture_mask(g, inst_block,
@@ -130,7 +130,7 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g)
bus_bar2_block_mode_virtual_f() | bus_bar2_block_mode_virtual_f() |
bus_bar2_block_ptr_f(inst_pa)); bus_bar2_block_ptr_f(inst_pa));
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -433,7 +433,7 @@ void gp10b_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
u32 pdb_addr_hi = u64_hi32(pdb_addr); u32 pdb_addr_hi = u64_hi32(pdb_addr);
gk20a_dbg_info("pde pa=0x%llx", pdb_addr); nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr);
nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
nvgpu_aperture_mask(g, vm->pdb.mem, nvgpu_aperture_mask(g, vm->pdb.mem,

View File

@@ -37,8 +37,8 @@
#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h> #include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h> #include <nvgpu/hw/gp10b/hw_fuse_gp10b.h>
#define gp10b_dbg_pmu(fmt, arg...) \ #define gp10b_dbg_pmu(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
/* PROD settings for ELPG sequencing registers*/ /* PROD settings for ELPG sequencing registers*/
static struct pg_init_sequence_list _pginitseq_gp10b[] = { static struct pg_init_sequence_list _pginitseq_gp10b[] = {
@@ -147,9 +147,9 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
struct pmu_cmd cmd; struct pmu_cmd cmd;
u32 seq; u32 seq;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gp10b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); gp10b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
if (g->pmu_lsf_pmu_wpr_init_done) { if (g->pmu_lsf_pmu_wpr_init_done) {
/* send message to load FECS falcon */ /* send message to load FECS falcon */
memset(&cmd, 0, sizeof(struct pmu_cmd)); memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -164,13 +164,13 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
cmd.cmd.acr.boot_falcons.usevamask = 0; cmd.cmd.acr.boot_falcons.usevamask = 0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0; cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0; cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0;
gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", gp10b_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
falconidmask); falconidmask);
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return; return;
} }
@@ -209,7 +209,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status) void *param, u32 handle, u32 status)
{ {
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (status != 0) { if (status != 0) {
nvgpu_err(g, "GR PARAM cmd aborted"); nvgpu_err(g, "GR PARAM cmd aborted");
@@ -217,7 +217,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
return; return;
} }
gp10b_dbg_pmu("GR PARAM is acknowledged from PMU %x \n", gp10b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x \n",
msg->msg.pg.msg_type); msg->msg.pg.msg_type);
return; return;
@@ -243,7 +243,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor = cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor =
g->ldiv_slowdown_factor; g->ldiv_slowdown_factor;
gp10b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM "); gp10b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM ");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_gr_param_msg, pmu, &seq, ~0); pmu_handle_gr_param_msg, pmu, &seq, ~0);
@@ -276,7 +276,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g)
u32 reg_writes; u32 reg_writes;
u32 index; u32 index;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (g->elpg_enabled) { if (g->elpg_enabled) {
reg_writes = ((sizeof(_pginitseq_gp10b) / reg_writes = ((sizeof(_pginitseq_gp10b) /
@@ -288,7 +288,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g)
} }
} }
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return ret; return ret;
} }
@@ -305,7 +305,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g)
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_mutex_acquire(&pmu->isr_mutex);
nvgpu_flcn_reset(pmu->flcn); nvgpu_flcn_reset(pmu->flcn);
@@ -333,7 +333,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g)
if (err) if (err)
return err; return err;
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B RPFB * GP10B RPFB
* *
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -42,7 +42,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE * size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE *
fifo_replay_fault_buffer_size_hw_entries_v(); fifo_replay_fault_buffer_size_hw_entries_v();
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (!g->mm.bar2_desc.gpu_va) { if (!g->mm.bar2_desc.gpu_va) {
err = nvgpu_dma_alloc_map_sys(vm, rbfb_size, err = nvgpu_dma_alloc_map_sys(vm, rbfb_size,
@@ -60,7 +60,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
gk20a_writel(g, fifo_replay_fault_buffer_lo_r(), gk20a_writel(g, fifo_replay_fault_buffer_lo_r(),
fifo_replay_fault_buffer_lo_base_f(addr_lo) | fifo_replay_fault_buffer_lo_base_f(addr_lo) |
fifo_replay_fault_buffer_lo_enable_true_v()); fifo_replay_fault_buffer_lo_enable_true_v());
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -75,14 +75,14 @@ u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)
{ {
u32 get_idx = 0; u32 get_idx = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r()); get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r());
if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
nvgpu_err(g, "Error in replayable fault buffer"); nvgpu_err(g, "Error in replayable fault buffer");
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return get_idx; return get_idx;
} }
@@ -90,13 +90,13 @@ u32 gp10b_replayable_pagefault_buffer_put_index(struct gk20a *g)
{ {
u32 put_idx = 0; u32 put_idx = 0;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r()); put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r());
if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
nvgpu_err(g, "Error in UVM"); nvgpu_err(g, "Error in UVM");
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return put_idx; return put_idx;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B Therm * GP10B Therm
* *
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ int gp10b_init_therm_setup_hw(struct gk20a *g)
{ {
u32 v; u32 v;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* program NV_THERM registers */ /* program NV_THERM registers */
gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
@@ -96,7 +96,7 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g)
u32 active_engine_id = 0; u32 active_engine_id = 0;
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
for (engine_id = 0; engine_id < f->num_engines; engine_id++) { for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
active_engine_id = f->active_engines_list[engine_id]; active_engine_id = f->active_engines_list[engine_id];
@@ -130,6 +130,6 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g)
idle_filter &= ~therm_hubmmu_idle_filter_value_m(); idle_filter &= ~therm_hubmmu_idle_filter_value_m();
gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -43,8 +43,8 @@
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h> #include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
/*Defines*/ /*Defines*/
#define gv11b_dbg_pmu(fmt, arg...) \ #define gv11b_dbg_pmu(g, fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value) static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
{ {
@@ -60,7 +60,7 @@ int gv11b_alloc_blob_space(struct gk20a *g,
{ {
int err; int err;
gv11b_dbg_pmu("alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS"); gv11b_dbg_pmu(g, "alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS");
err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
size, mem); size, mem);
@@ -87,10 +87,10 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
start = nvgpu_mem_get_addr(g, &acr->ucode_blob); start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
size = acr->ucode_blob.size; size = acr->ucode_blob.size;
gv11b_dbg_pmu("acr ucode blob start %llx\n", start); gv11b_dbg_pmu(g, "acr ucode blob start %llx\n", start);
gv11b_dbg_pmu("acr ucode blob size %x\n", size); gv11b_dbg_pmu(g, "acr ucode blob size %x\n", size);
gv11b_dbg_pmu(""); gv11b_dbg_pmu(g, " ");
if (!acr_fw) { if (!acr_fw) {
/*First time init case*/ /*First time init case*/
@@ -110,17 +110,17 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
acr->fw_hdr->hdr_offset); acr->fw_hdr->hdr_offset);
img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256); img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
gv11b_dbg_pmu("sig dbg offset %u\n", gv11b_dbg_pmu(g, "sig dbg offset %u\n",
acr->fw_hdr->sig_dbg_offset); acr->fw_hdr->sig_dbg_offset);
gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size); gv11b_dbg_pmu(g, "sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
gv11b_dbg_pmu("sig prod offset %u\n", gv11b_dbg_pmu(g, "sig prod offset %u\n",
acr->fw_hdr->sig_prod_offset); acr->fw_hdr->sig_prod_offset);
gv11b_dbg_pmu("sig prod size %u\n", gv11b_dbg_pmu(g, "sig prod size %u\n",
acr->fw_hdr->sig_prod_size); acr->fw_hdr->sig_prod_size);
gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc); gv11b_dbg_pmu(g, "patch loc %u\n", acr->fw_hdr->patch_loc);
gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig); gv11b_dbg_pmu(g, "patch sig %u\n", acr->fw_hdr->patch_sig);
gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset); gv11b_dbg_pmu(g, "header offset %u\n", acr->fw_hdr->hdr_offset);
gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size); gv11b_dbg_pmu(g, "header size %u\n", acr->fw_hdr->hdr_size);
/* Lets patch the signatures first.. */ /* Lets patch the signatures first.. */
if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load, if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
@@ -144,7 +144,7 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
} }
for (index = 0; index < 9; index++) for (index = 0; index < 9; index++)
gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n", gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n",
acr_ucode_header_t210_load[index]); acr_ucode_header_t210_load[index]);
acr_dmem = (u64 *) acr_dmem = (u64 *)
@@ -212,7 +212,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
u32 dst; u32 dst;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) | gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -237,7 +237,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
(u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
pmu_bl_gm10x_desc->bl_start_tag); pmu_bl_gm10x_desc->bl_start_tag);
gv11b_dbg_pmu("Before starting falcon with BL\n"); gv11b_dbg_pmu(g, "Before starting falcon with BL\n");
virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
@@ -281,7 +281,7 @@ int gv11b_init_pmu_setup_hw1(struct gk20a *g,
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
int err; int err;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_mutex_acquire(&pmu->isr_mutex);
nvgpu_flcn_reset(pmu->flcn); nvgpu_flcn_reset(pmu->flcn);

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B Cycle stats snapshots support * GV11B Cycle stats snapshots support
* *
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -148,7 +148,7 @@ int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch,
perf_pmasys_mem_block_target_lfb_f())); perf_pmasys_mem_block_target_lfb_f()));
gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n");
return 0; return 0;
@@ -186,7 +186,7 @@ void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr)
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
data->hw_snapshot = NULL; data->hw_snapshot = NULL;
gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n");
} }
int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending,

View File

@@ -57,7 +57,7 @@ int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
u32 inst_pa_page; u32 inst_pa_page;
int err; int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
err = gk20a_busy(g); err = gk20a_busy(g);
if (err) { if (err) {
nvgpu_err(g, "failed to poweron"); nvgpu_err(g, "failed to poweron");
@@ -100,7 +100,7 @@ int gv11b_perfbuf_disable_locked(struct gk20a *g)
{ {
int err; int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
err = gk20a_busy(g); err = gk20a_busy(g);
if (err) { if (err) {
nvgpu_err(g, "failed to poweron"); nvgpu_err(g, "failed to poweron");

View File

@@ -1427,7 +1427,7 @@ static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
u32 reg_val; u32 reg_val;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&g->mm.tlb_lock); nvgpu_mutex_acquire(&g->mm.tlb_lock);

View File

@@ -60,7 +60,7 @@
void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
{ {
struct gk20a *g = tsg->g;
u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); u32 runlist_entry_0 = ram_rl_entry_type_tsg_v();
if (tsg->timeslice_timeout) if (tsg->timeslice_timeout)
@@ -79,7 +79,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid); runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid);
runlist[3] = 0; runlist[3] = 0;
gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", nvgpu_log_info(g, "gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n",
runlist[0], runlist[1], runlist[2], runlist[3]); runlist[0], runlist[1], runlist[2], runlist[3]);
} }
@@ -119,7 +119,7 @@ void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist)
ram_rl_entry_chid_f(c->chid); ram_rl_entry_chid_f(c->chid);
runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi);
gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", nvgpu_log_info(g, "gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n",
runlist[0], runlist[1], runlist[2], runlist[3]); runlist[0], runlist[1], runlist[2], runlist[3]);
} }
@@ -139,7 +139,7 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
struct nvgpu_mem *mem = &c->inst_block; struct nvgpu_mem *mem = &c->inst_block;
u32 data; u32 data;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
@@ -211,10 +211,11 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
void gv11b_ring_channel_doorbell(struct channel_gk20a *c) void gv11b_ring_channel_doorbell(struct channel_gk20a *c)
{ {
struct fifo_gk20a *f = &c->g->fifo; struct gk20a *g = c->g;
struct fifo_gk20a *f = &g->fifo;
u32 hw_chid = f->channel_base + c->chid; u32 hw_chid = f->channel_base + c->chid;
gk20a_dbg_info("channel ring door bell %d\n", c->chid); nvgpu_log_info(g, "channel ring door bell %d\n", c->chid);
nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(), nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(),
usermode_notify_channel_pending_id_f(hw_chid)); usermode_notify_channel_pending_id_f(hw_chid));
@@ -256,7 +257,7 @@ void channel_gv11b_unbind(struct channel_gk20a *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
gk20a_writel(g, ccsr_channel_inst_r(ch->chid), gk20a_writel(g, ccsr_channel_inst_r(ch->chid),
@@ -729,7 +730,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id,
timeout_rc_type); timeout_rc_type);
if (func_ret != 0) { if (func_ret != 0) {
gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id); nvgpu_log_info(g, "preempt timeout pbdma %d", pbdma_id);
ret |= func_ret; ret |= func_ret;
} }
} }
@@ -743,7 +744,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
timeout_rc_type); timeout_rc_type);
if (func_ret != 0) { if (func_ret != 0) {
gk20a_dbg_info("preempt timeout engine %d", act_eng_id); nvgpu_log_info(g, "preempt timeout engine %d", act_eng_id);
ret |= func_ret; ret |= func_ret;
} }
} }
@@ -812,10 +813,10 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
u32 mutex_ret = 0; u32 mutex_ret = 0;
u32 runlist_id; u32 runlist_id;
gk20a_dbg_fn("%d", tsgid); nvgpu_log_fn(g, "%d", tsgid);
runlist_id = f->tsg[tsgid].runlist_id; runlist_id = f->tsg[tsgid].runlist_id;
gk20a_dbg_fn("runlist_id %d", runlist_id); nvgpu_log_fn(g, "runlist_id %d", runlist_id);
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
@@ -839,7 +840,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
u32 mutex_ret = 0; u32 mutex_ret = 0;
u32 runlist_id; u32 runlist_id;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id))
@@ -910,11 +911,11 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
return -EINVAL; return -EINVAL;
if (runlist_id >= g->fifo.max_runlists) { if (runlist_id >= g->fifo.max_runlists) {
gk20a_dbg_info("runlist_id = %d", runlist_id); nvgpu_log_info(g, "runlist_id = %d", runlist_id);
return -EINVAL; return -EINVAL;
} }
gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); nvgpu_log_fn(g, "preempt id = %d, runlist_id = %d", id, runlist_id);
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
@@ -1155,7 +1156,7 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
unsigned int i; unsigned int i;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* enable pmc pfifo */ /* enable pmc pfifo */
g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); g->ops.mc.reset(g, mc_enable_pfifo_enabled_f());
@@ -1208,11 +1209,11 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i));
gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall);
gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall);
} }
@@ -1246,12 +1247,12 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
/* clear and enable pfifo interrupt */ /* clear and enable pfifo interrupt */
gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
mask = gv11b_fifo_intr_0_en_mask(g); mask = gv11b_fifo_intr_0_en_mask(g);
gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
gk20a_writel(g, fifo_intr_en_0_r(), mask); gk20a_writel(g, fifo_intr_en_0_r(), mask);
gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
gk20a_dbg_fn("done"); nvgpu_log_fn(g, "done");
return 0; return 0;
} }
@@ -1350,7 +1351,7 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info); tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info);
} }
gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid); nvgpu_log_info(g, "ctxsw timeout info: tsgid = %d", tsgid);
/* /*
* STATUS indicates whether the context request ack was eventually * STATUS indicates whether the context request ack was eventually
@@ -1391,14 +1392,14 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
if (*info_status == if (*info_status ==
fifo_intr_ctxsw_timeout_info_status_ack_received_v()) { fifo_intr_ctxsw_timeout_info_status_ack_received_v()) {
gk20a_dbg_info("ctxsw timeout info : ack received"); nvgpu_log_info(g, "ctxsw timeout info : ack received");
/* no need to recover */ /* no need to recover */
tsgid = FIFO_INVAL_TSG_ID; tsgid = FIFO_INVAL_TSG_ID;
} else if (*info_status == } else if (*info_status ==
fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) { fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) {
gk20a_dbg_info("ctxsw timeout info : dropped timeout"); nvgpu_log_info(g, "ctxsw timeout info : dropped timeout");
/* no need to recover */ /* no need to recover */
tsgid = FIFO_INVAL_TSG_ID; tsgid = FIFO_INVAL_TSG_ID;
@@ -1429,7 +1430,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r());
timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val); timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val);
gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val); nvgpu_log_info(g, "eng ctxsw timeout period = 0x%x", timeout_val);
for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
active_eng_id = g->fifo.active_engines_list[engine_id]; active_eng_id = g->fifo.active_engines_list[engine_id];
@@ -1469,7 +1470,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
true, true, verbose, true, true, verbose,
RC_TYPE_CTXSW_TIMEOUT); RC_TYPE_CTXSW_TIMEOUT);
} else { } else {
gk20a_dbg_info( nvgpu_log_info(g,
"fifo is waiting for ctx switch: " "fifo is waiting for ctx switch: "
"for %d ms, %s=%d", ms, "tsg", tsgid); "for %d ms, %s=%d", ms, "tsg", tsgid);
} }
@@ -1490,7 +1491,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
pbdma_intr_0, handled, error_notifier); pbdma_intr_0, handled, error_notifier);
if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) {
gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d", nvgpu_log(g, gpu_dbg_intr, "clear faulted error on pbdma id %d",
pbdma_id); pbdma_id);
gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
*handled |= pbdma_intr_0_clear_faulted_error_pending_f(); *handled |= pbdma_intr_0_clear_faulted_error_pending_f();
@@ -1498,7 +1499,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
} }
if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) {
gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d", nvgpu_log(g, gpu_dbg_intr, "eng reset intr on pbdma id %d",
pbdma_id); pbdma_id);
*handled |= pbdma_intr_0_eng_reset_pending_f(); *handled |= pbdma_intr_0_eng_reset_pending_f();
rc_type = RC_TYPE_PBDMA_FAULT; rc_type = RC_TYPE_PBDMA_FAULT;
@@ -1545,7 +1546,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g,
return RC_TYPE_NO_RC; return RC_TYPE_NO_RC;
if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) {
gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d",
pbdma_id); pbdma_id);
nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ",
pbdma_id, pbdma_intr_1); pbdma_id, pbdma_intr_1);
@@ -1753,7 +1754,7 @@ void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g,
u64 gpu_va = gpu_va_base + u64 gpu_va = gpu_va_base +
nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id); nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id);
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
off = cmd->off + off; off = cmd->off + off;
@@ -1792,7 +1793,7 @@ void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g,
{ {
u32 off = cmd->off; u32 off = cmd->off;
gk20a_dbg_fn(""); nvgpu_log_fn(g, " ");
/* semaphore_a */ /* semaphore_a */
nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004);

Some files were not shown because too many files have changed in this diff Show More