mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Updated RPC to support copyback & callback
- Updated & added new parameter "bool is_copy_back" to nvgpu_pmu_rpc_execute() to support copy back processed RPC request from PMU to caller by passing parameter value true & this blocks method till it receives ACK from PMU for requested RPC. - Added "struct rpc_handler_payload" to hold info required for RPC handler like RPC buff address & clear memory if copy back is not requested. - Added define PMU_RPC_EXECUTE_CPB to support to copy back processed RPC request from PMU to caller. - Updated RPC callback handler support, crated memory & assigned default handler if callback is not requested else use callback parameters data to request to PMU. - Added define PMU_RPC_EXECUTE_CB to support callback - Updated pmu_wait_message_cond(), restricted condition check to 8-bit instead 32-bit condition check. Change-Id: Ic05289b074954979fd0102daf5ab806bf1f07b62 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1664962 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1d986dc33e
commit
76ad9e8366
@@ -977,7 +977,7 @@ static u32 boardobjgrp_pmucmdsend_rpc(struct gk20a *g,
|
||||
status = nvgpu_pmu_rpc_execute(pmu, &(rpc.hdr),
|
||||
(sizeof(rpc) - sizeof(rpc.scratch)),
|
||||
pcmd->dmem_buffer_size,
|
||||
NULL, NULL);
|
||||
NULL, NULL, copy_out);
|
||||
|
||||
if (status) {
|
||||
nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
|
||||
|
||||
@@ -953,7 +953,7 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
|
||||
}
|
||||
|
||||
int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
|
||||
u32 *var, u32 val)
|
||||
void *var, u8 val)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct nvgpu_timeout timeout;
|
||||
@@ -962,7 +962,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
|
||||
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
||||
|
||||
do {
|
||||
if (*var == val)
|
||||
if (*(u8 *)var == val)
|
||||
return 0;
|
||||
|
||||
if (gk20a_pmu_is_interrupted(pmu))
|
||||
@@ -980,11 +980,12 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
{
|
||||
struct nv_pmu_rpc_header rpc;
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct nv_pmu_rpc_struct_perfmon_query *rpc_param;
|
||||
struct rpc_handler_payload *rpc_payload =
|
||||
(struct rpc_handler_payload *)param;
|
||||
struct nv_pmu_rpc_struct_perfmon_query *rpc_param;
|
||||
|
||||
memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header));
|
||||
if (param)
|
||||
memcpy(&rpc, param, sizeof(struct nv_pmu_rpc_header));
|
||||
memcpy(&rpc, rpc_payload->rpc_buff, sizeof(struct nv_pmu_rpc_header));
|
||||
|
||||
if (rpc.flcn_status) {
|
||||
nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x",
|
||||
@@ -1026,7 +1027,8 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_QUERY:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_QUERY");
|
||||
rpc_param = (struct nv_pmu_rpc_struct_perfmon_query *)param;
|
||||
rpc_param = (struct nv_pmu_rpc_struct_perfmon_query *)
|
||||
rpc_payload->rpc_buff;
|
||||
pmu->load = rpc_param->sample_buffer[0];
|
||||
pmu->perfmon_query = 1;
|
||||
/* set perfmon_query to 1 after load is copied */
|
||||
@@ -1042,32 +1044,62 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
|
||||
exit:
|
||||
/* free allocated memory */
|
||||
if (param)
|
||||
nvgpu_kfree(g, param);
|
||||
if (rpc_payload->is_mem_free_set)
|
||||
nvgpu_kfree(g, rpc_payload);
|
||||
}
|
||||
|
||||
int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
|
||||
u16 size_rpc, u16 size_scratch, pmu_callback caller_cb,
|
||||
void *caller_cb_param)
|
||||
void *caller_cb_param, bool is_copy_back)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
struct pmu_cmd cmd;
|
||||
struct pmu_payload payload;
|
||||
pmu_callback callback = caller_cb;
|
||||
struct rpc_handler_payload *rpc_payload = NULL;
|
||||
pmu_callback callback = NULL;
|
||||
void *rpc_buff = NULL;
|
||||
void *cb_param = caller_cb_param;
|
||||
u32 seq = 0;
|
||||
int status = 0;
|
||||
|
||||
if (!pmu->pmu_ready) {
|
||||
nvgpu_warn(g, "PMU is not ready to process RPC");
|
||||
return -EINVAL;
|
||||
status = EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rpc_buff = nvgpu_kzalloc(g, size_rpc);
|
||||
if (!rpc_buff)
|
||||
return -ENOMEM;
|
||||
if (caller_cb == NULL) {
|
||||
rpc_payload = nvgpu_kzalloc(g,
|
||||
sizeof(struct rpc_handler_payload) + size_rpc);
|
||||
if (!rpc_payload) {
|
||||
status = ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rpc_payload->rpc_buff = (u8 *)rpc_payload +
|
||||
sizeof(struct rpc_handler_payload);
|
||||
rpc_payload->is_mem_free_set =
|
||||
is_copy_back ? false : true;
|
||||
|
||||
/* assign default RPC handler*/
|
||||
callback = pmu_rpc_handler;
|
||||
} else {
|
||||
if (caller_cb_param == NULL) {
|
||||
nvgpu_err(g, "Invalid cb param addr");
|
||||
status = EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
rpc_payload = nvgpu_kzalloc(g,
|
||||
sizeof(struct rpc_handler_payload));
|
||||
if (!rpc_payload) {
|
||||
status = ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
rpc_payload->rpc_buff = caller_cb_param;
|
||||
rpc_payload->is_mem_free_set = true;
|
||||
callback = caller_cb;
|
||||
}
|
||||
|
||||
rpc_buff = rpc_payload->rpc_buff;
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
memset(&payload, 0, sizeof(struct pmu_payload));
|
||||
|
||||
@@ -1081,24 +1113,38 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
|
||||
payload.rpc.size_rpc = size_rpc;
|
||||
payload.rpc.size_scratch = size_scratch;
|
||||
|
||||
/* assign default RPC handler & buffer */
|
||||
if (!callback && !cb_param) {
|
||||
callback = pmu_rpc_handler;
|
||||
cb_param = rpc_buff;
|
||||
}
|
||||
|
||||
status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
|
||||
PMU_COMMAND_QUEUE_LPQ, pmu_rpc_handler,
|
||||
cb_param, &seq, ~0);
|
||||
PMU_COMMAND_QUEUE_LPQ, callback,
|
||||
rpc_payload, &seq, ~0);
|
||||
if (status) {
|
||||
nvgpu_err(g, "Failed to execute RPC status=0x%x, func=0x%x",
|
||||
status, rpc->function);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* if caller passed buff then free allocated RPC buffer */
|
||||
if (caller_cb_param)
|
||||
nvgpu_kfree(g, rpc_buff);
|
||||
/*
|
||||
* Option act like blocking call, which waits till RPC request
|
||||
* executes on PMU & copy back processed data to rpc_buff
|
||||
* to read data back in nvgpu
|
||||
*/
|
||||
if (is_copy_back) {
|
||||
/* clear buff */
|
||||
memset(rpc_buff, 0, size_rpc);
|
||||
/* wait till RPC execute in PMU & ACK */
|
||||
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
|
||||
&((struct nv_pmu_rpc_header *)rpc_buff)->function,
|
||||
rpc->function);
|
||||
/* copy back data to caller */
|
||||
memcpy(rpc, rpc_buff, size_rpc);
|
||||
/* free allocated memory */
|
||||
nvgpu_kfree(g, rpc_payload);
|
||||
}
|
||||
|
||||
exit:
|
||||
if (status) {
|
||||
if (rpc_payload)
|
||||
nvgpu_kfree(g, rpc_payload);
|
||||
}
|
||||
|
||||
return status;
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*
|
||||
* GK20A PMU (aka. gPMU outside gk20a context)
|
||||
*
|
||||
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -69,7 +69,7 @@ void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
|
||||
|
||||
void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable);
|
||||
int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
|
||||
u32 *var, u32 val);
|
||||
void *var, u8 val);
|
||||
void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status);
|
||||
void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -154,12 +154,45 @@ enum {
|
||||
\
|
||||
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
||||
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
||||
(_size), NULL, NULL); \
|
||||
(_size), NULL, NULL, false); \
|
||||
} while (0)
|
||||
|
||||
/* RPC blocking call to copy back data from PMU to _prpc */
|
||||
#define PMU_RPC_EXECUTE_CPB(_stat, _pmu, _unit, _func, _prpc, _size)\
|
||||
do { \
|
||||
memset(&((_prpc)->hdr), 0, sizeof((_prpc)->hdr));\
|
||||
\
|
||||
(_prpc)->hdr.unit_id = PMU_UNIT_##_unit; \
|
||||
(_prpc)->hdr.function = NV_PMU_RPC_ID_##_unit##_##_func;\
|
||||
(_prpc)->hdr.flags = 0x0; \
|
||||
\
|
||||
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
||||
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
||||
(_size), NULL, NULL, true); \
|
||||
} while (0)
|
||||
|
||||
/* RPC non-blocking with call_back handler option */
|
||||
#define PMU_RPC_EXECUTE_CB(_stat, _pmu, _unit, _func, _prpc, _size, _cb, _cbp)\
|
||||
do { \
|
||||
memset(&((_prpc)->hdr), 0, sizeof((_prpc)->hdr));\
|
||||
\
|
||||
(_prpc)->hdr.unit_id = PMU_UNIT_##_unit; \
|
||||
(_prpc)->hdr.function = NV_PMU_RPC_ID_##_unit##_##_func;\
|
||||
(_prpc)->hdr.flags = 0x0; \
|
||||
\
|
||||
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
||||
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
||||
(_size), _cb, _cbp, false); \
|
||||
} while (0)
|
||||
|
||||
typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32,
|
||||
u32);
|
||||
|
||||
struct rpc_handler_payload {
|
||||
void *rpc_buff;
|
||||
bool is_mem_free_set;
|
||||
};
|
||||
|
||||
struct pmu_rpc_desc {
|
||||
void *prpc;
|
||||
u16 size_rpc;
|
||||
@@ -500,6 +533,7 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
|
||||
|
||||
/* PMU RPC */
|
||||
int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
|
||||
u16 size_rpc, u16 size_scratch, pmu_callback callback, void *cb_param);
|
||||
u16 size_rpc, u16 size_scratch, pmu_callback callback, void *cb_param,
|
||||
bool is_copy_back);
|
||||
|
||||
#endif /* __NVGPU_PMU_H__ */
|
||||
|
||||
Reference in New Issue
Block a user