mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: clk: fix MISRA 10.3 issues in clk_arb
MISRA Rule 10.3 prohibits direct assignment of an object of different essential type or narrower type. This change addresses a number of miscellaneous violations in clk_arb. JIRA NVGPU-1008 Change-Id: Iac21ee0c658d55b0c9f7b2d8ea0e134d6fc3c6c5 Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2001231 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
a2ce1dfd37
commit
182aadfd71
@@ -256,10 +256,9 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
|
|||||||
u32 enabled_mask = 0;
|
u32 enabled_mask = 0;
|
||||||
u32 new_alarms_reported = 0;
|
u32 new_alarms_reported = 0;
|
||||||
u32 poll_mask = 0;
|
u32 poll_mask = 0;
|
||||||
u32 tail, head;
|
u32 tail, head, index;
|
||||||
u32 queue_index;
|
u32 queue_index;
|
||||||
size_t size;
|
size_t size;
|
||||||
int index;
|
|
||||||
|
|
||||||
enabled_mask = (u32)nvgpu_atomic_read(&dev->enabled_mask);
|
enabled_mask = (u32)nvgpu_atomic_read(&dev->enabled_mask);
|
||||||
size = arb->notification_queue.size;
|
size = arb->notification_queue.size;
|
||||||
@@ -277,7 +276,7 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
|
|||||||
u32 alarm_detected;
|
u32 alarm_detected;
|
||||||
|
|
||||||
notification = &arb->notification_queue.
|
notification = &arb->notification_queue.
|
||||||
notifications[(index+1) % size];
|
notifications[(index+1U) % size];
|
||||||
alarm_detected =
|
alarm_detected =
|
||||||
NV_ACCESS_ONCE(notification->notification);
|
NV_ACCESS_ONCE(notification->notification);
|
||||||
|
|
||||||
@@ -526,7 +525,7 @@ void nvgpu_clk_arb_worker_enqueue(struct gk20a *g,
|
|||||||
/*
|
/*
|
||||||
* Warn if worker thread cannot run
|
* Warn if worker thread cannot run
|
||||||
*/
|
*/
|
||||||
if (WARN_ON(__nvgpu_clk_arb_worker_start(g))) {
|
if (WARN_ON(__nvgpu_clk_arb_worker_start(g) != 0)) {
|
||||||
nvgpu_warn(g, "clk arb worker cannot run!");
|
nvgpu_warn(g, "clk arb worker cannot run!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -595,7 +594,7 @@ bool nvgpu_clk_arb_has_active_req(struct gk20a *g)
|
|||||||
void nvgpu_clk_arb_send_thermal_alarm(struct gk20a *g)
|
void nvgpu_clk_arb_send_thermal_alarm(struct gk20a *g)
|
||||||
{
|
{
|
||||||
nvgpu_clk_arb_schedule_alarm(g,
|
nvgpu_clk_arb_schedule_alarm(g,
|
||||||
(0x1UL << NVGPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD));
|
BIT32(NVGPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD));
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvgpu_clk_arb_schedule_alarm(struct gk20a *g, u32 alarm)
|
void nvgpu_clk_arb_schedule_alarm(struct gk20a *g, u32 alarm)
|
||||||
@@ -887,22 +886,27 @@ unsigned long nvgpu_clk_measure_freq(struct gk20a *g, u32 api_domain)
|
|||||||
int nvgpu_clk_arb_get_arbiter_effective_mhz(struct gk20a *g,
|
int nvgpu_clk_arb_get_arbiter_effective_mhz(struct gk20a *g,
|
||||||
u32 api_domain, u16 *freq_mhz)
|
u32 api_domain, u16 *freq_mhz)
|
||||||
{
|
{
|
||||||
|
u64 freq_mhz_u64;
|
||||||
if (!nvgpu_clk_arb_is_valid_domain(g, api_domain)) {
|
if (!nvgpu_clk_arb_is_valid_domain(g, api_domain)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (api_domain) {
|
switch (api_domain) {
|
||||||
case NVGPU_CLK_DOMAIN_MCLK:
|
case NVGPU_CLK_DOMAIN_MCLK:
|
||||||
*freq_mhz = g->ops.clk.measure_freq(g, CTRL_CLK_DOMAIN_MCLK) /
|
freq_mhz_u64 = g->ops.clk.measure_freq(g,
|
||||||
1000000ULL;
|
CTRL_CLK_DOMAIN_MCLK) / 1000000ULL;
|
||||||
return 0;
|
break;
|
||||||
|
|
||||||
case NVGPU_CLK_DOMAIN_GPCCLK:
|
case NVGPU_CLK_DOMAIN_GPCCLK:
|
||||||
*freq_mhz = g->ops.clk.measure_freq(g, CTRL_CLK_DOMAIN_GPCCLK) /
|
freq_mhz_u64 = g->ops.clk.measure_freq(g,
|
||||||
1000000ULL;
|
CTRL_CLK_DOMAIN_GPCCLK) / 1000000ULL;
|
||||||
return 0;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nvgpu_assert(freq_mhz_u64 <= (u64)U16_MAX);
|
||||||
|
*freq_mhz = (u16)freq_mhz_u64;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ struct nvgpu_clk_notification_queue;
|
|||||||
struct nvgpu_clk_session;
|
struct nvgpu_clk_session;
|
||||||
|
|
||||||
#define VF_POINT_INVALID_PSTATE ~0U
|
#define VF_POINT_INVALID_PSTATE ~0U
|
||||||
#define VF_POINT_SET_PSTATE_SUPPORTED(a, b) ((a)->pstates |= (1UL << (b)))
|
#define VF_POINT_SET_PSTATE_SUPPORTED(a, b) ((a)->pstates |= (BIT16(b)))
|
||||||
#define VF_POINT_GET_PSTATE(a) (((a)->pstates) ?\
|
#define VF_POINT_GET_PSTATE(a) (((a)->pstates) ?\
|
||||||
__fls((a)->pstates) :\
|
__fls((a)->pstates) :\
|
||||||
VF_POINT_INVALID_PSTATE)
|
VF_POINT_INVALID_PSTATE)
|
||||||
@@ -85,7 +85,7 @@ struct nvgpu_clk_session;
|
|||||||
#define NVGPU_EVENT_LAST NVGPU_EVENT_ALARM_GPU_LOST
|
#define NVGPU_EVENT_LAST NVGPU_EVENT_ALARM_GPU_LOST
|
||||||
|
|
||||||
/* Local Alarms */
|
/* Local Alarms */
|
||||||
#define EVENT(alarm) (0x1UL << NVGPU_EVENT_##alarm)
|
#define EVENT(alarm) (BIT32(NVGPU_EVENT_##alarm))
|
||||||
|
|
||||||
#define LOCAL_ALARM_MASK (EVENT(ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE) | \
|
#define LOCAL_ALARM_MASK (EVENT(ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE) | \
|
||||||
EVENT(VF_UPDATE))
|
EVENT(VF_UPDATE))
|
||||||
|
|||||||
Reference in New Issue
Block a user