mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: use READ_ONCE/WRITE_ONCE
In the upstream kernel ACCESS_ONCE is now deprecated with reason as
given in the following related commit:
commit 381f20fceba8e ("security: use READ_ONCE instead of deprecated
ACCESS_ONCE")
ACCESS_ONCE() does not work reliably on non-scalar types. For
example gcc 4.6 and 4.7 might remove the volatile tag for such
accesses during the SRA (scalar replacement of aggregates) step.
Replace usages of ACCESS_ONCE with READ_ONCE and WRITE_ONCE in nvgpu.
Bug 2834141
Change-Id: I9904c49e1a4d7b17ed2fe54360051d08595a2982
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2294096
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: Jonathan Hunter <jonathanh@nvidia.com>
Reviewed-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
029da0437e
commit
d0d8ef79d1
@@ -120,7 +120,7 @@ int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb)
|
|||||||
|
|
||||||
struct clk_set_info *p0_info;
|
struct clk_set_info *p0_info;
|
||||||
|
|
||||||
table = NV_ACCESS_ONCE(arb->current_vf_table);
|
table = NV_READ_ONCE(arb->current_vf_table);
|
||||||
/* make flag visible when all data has resolved in the tables */
|
/* make flag visible when all data has resolved in the tables */
|
||||||
nvgpu_smp_rmb();
|
nvgpu_smp_rmb();
|
||||||
table = (table == &arb->vf_table_pool[0]) ? &arb->vf_table_pool[1] :
|
table = (table == &arb->vf_table_pool[0]) ? &arb->vf_table_pool[1] :
|
||||||
@@ -279,7 +279,7 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
|
|||||||
|
|
||||||
l_notification = &arb->notification_queue.
|
l_notification = &arb->notification_queue.
|
||||||
clk_q_notifications[((u64)index + 1ULL) % size];
|
clk_q_notifications[((u64)index + 1ULL) % size];
|
||||||
alarm_detected = NV_ACCESS_ONCE(
|
alarm_detected = NV_READ_ONCE(
|
||||||
l_notification->clk_notification);
|
l_notification->clk_notification);
|
||||||
|
|
||||||
if ((enabled_mask & alarm_detected) == 0U) {
|
if ((enabled_mask & alarm_detected) == 0U) {
|
||||||
@@ -289,7 +289,7 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
|
|||||||
queue_index++;
|
queue_index++;
|
||||||
dev->queue.clk_q_notifications[
|
dev->queue.clk_q_notifications[
|
||||||
queue_index % dev->queue.size].timestamp =
|
queue_index % dev->queue.size].timestamp =
|
||||||
NV_ACCESS_ONCE(l_notification->timestamp);
|
NV_READ_ONCE(l_notification->timestamp);
|
||||||
|
|
||||||
dev->queue.clk_q_notifications[queue_index %
|
dev->queue.clk_q_notifications[queue_index %
|
||||||
dev->queue.size].clk_notification =
|
dev->queue.size].clk_notification =
|
||||||
@@ -628,7 +628,7 @@ void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g)
|
|||||||
*/
|
*/
|
||||||
u32 nvgpu_clk_arb_get_current_pstate(struct gk20a *g)
|
u32 nvgpu_clk_arb_get_current_pstate(struct gk20a *g)
|
||||||
{
|
{
|
||||||
return NV_ACCESS_ONCE(g->clk_arb->actual->pstate);
|
return NV_READ_ONCE(g->clk_arb->actual->pstate);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock)
|
void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock)
|
||||||
|
|||||||
@@ -344,7 +344,7 @@ void gp10b_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
|
|||||||
goto exit_arb;
|
goto exit_arb;
|
||||||
}
|
}
|
||||||
|
|
||||||
actual = ((NV_ACCESS_ONCE(arb->actual)) == &arb->actual_pool[0] ?
|
actual = ((NV_READ_ONCE(arb->actual)) == &arb->actual_pool[0] ?
|
||||||
&arb->actual_pool[1] : &arb->actual_pool[0]);
|
&arb->actual_pool[1] : &arb->actual_pool[0]);
|
||||||
|
|
||||||
/* do not reorder this pointer */
|
/* do not reorder this pointer */
|
||||||
|
|||||||
@@ -429,7 +429,7 @@ void gv100_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
|
|||||||
goto exit_arb;
|
goto exit_arb;
|
||||||
}
|
}
|
||||||
|
|
||||||
actual = NV_ACCESS_ONCE(arb->actual) == &arb->actual_pool[0] ?
|
actual = NV_READ_ONCE(arb->actual) == &arb->actual_pool[0] ?
|
||||||
&arb->actual_pool[1] : &arb->actual_pool[0];
|
&arb->actual_pool[1] : &arb->actual_pool[0];
|
||||||
|
|
||||||
/* do not reorder this pointer */
|
/* do not reorder this pointer */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -68,9 +68,9 @@ static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
head = NV_ACCESS_ONCE(pa->head);
|
head = NV_READ_ONCE(pa->head);
|
||||||
while (head >= 0) {
|
while (head >= 0) {
|
||||||
new_head = NV_ACCESS_ONCE(pa->next[head]);
|
new_head = NV_READ_ONCE(pa->next[head]);
|
||||||
ret = cmpxchg(&pa->head, head, new_head);
|
ret = cmpxchg(&pa->head, head, new_head);
|
||||||
if (ret == head) {
|
if (ret == head) {
|
||||||
addr = pa->base + U64(head) * pa->blk_size;
|
addr = pa->base + U64(head) * pa->blk_size;
|
||||||
@@ -79,7 +79,7 @@ static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len)
|
|||||||
addr);
|
addr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
head = NV_ACCESS_ONCE(pa->head);
|
head = NV_READ_ONCE(pa->head);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (addr != 0ULL) {
|
if (addr != 0ULL) {
|
||||||
@@ -102,8 +102,8 @@ static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr)
|
|||||||
alloc_dbg(a, "Free node # %llu @ addr 0x%llx", cur_idx, addr);
|
alloc_dbg(a, "Free node # %llu @ addr 0x%llx", cur_idx, addr);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
head = NV_ACCESS_ONCE(pa->head);
|
head = NV_READ_ONCE(pa->head);
|
||||||
NV_ACCESS_ONCE(pa->next[cur_idx]) = head;
|
NV_WRITE_ONCE(pa->next[cur_idx], head);
|
||||||
nvgpu_assert(cur_idx <= U64(INT_MAX));
|
nvgpu_assert(cur_idx <= U64(INT_MAX));
|
||||||
ret = cmpxchg(&pa->head, head, (int)cur_idx);
|
ret = cmpxchg(&pa->head, head, (int)cur_idx);
|
||||||
if (ret == head) {
|
if (ret == head) {
|
||||||
|
|||||||
@@ -399,7 +399,7 @@ int nvgpu_clk_arb_find_slave_points(struct nvgpu_clk_arb *arb,
|
|||||||
do {
|
do {
|
||||||
gpc2clk_target = vf_point->gpc_mhz;
|
gpc2clk_target = vf_point->gpc_mhz;
|
||||||
|
|
||||||
table = NV_ACCESS_ONCE(arb->current_vf_table);
|
table = NV_READ_ONCE(arb->current_vf_table);
|
||||||
/* pointer to table can be updated by callback */
|
/* pointer to table can be updated by callback */
|
||||||
nvgpu_smp_rmb();
|
nvgpu_smp_rmb();
|
||||||
|
|
||||||
@@ -436,7 +436,7 @@ int nvgpu_clk_arb_find_slave_points(struct nvgpu_clk_arb *arb,
|
|||||||
vf_point->gpc_mhz = gpc2clk_target;
|
vf_point->gpc_mhz = gpc2clk_target;
|
||||||
}
|
}
|
||||||
} while ((table == NULL) ||
|
} while ((table == NULL) ||
|
||||||
(NV_ACCESS_ONCE(arb->current_vf_table) != table));
|
(NV_READ_ONCE(arb->current_vf_table) != table));
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -165,7 +165,8 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
|
|||||||
pmu->pg->initialized = true;
|
pmu->pg->initialized = true;
|
||||||
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_STARTED,
|
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_STARTED,
|
||||||
true);
|
true);
|
||||||
WRITE_ONCE(pmu->pg->mscg_stat, PMU_MSCG_DISABLED);
|
NV_WRITE_ONCE(pmu->pg->mscg_stat,
|
||||||
|
PMU_MSCG_DISABLED);
|
||||||
/* make status visible */
|
/* make status visible */
|
||||||
nvgpu_smp_mb();
|
nvgpu_smp_mb();
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -44,7 +44,8 @@
|
|||||||
#define nvgpu_smp_rmb() nvgpu_smp_rmb_impl()
|
#define nvgpu_smp_rmb() nvgpu_smp_rmb_impl()
|
||||||
#define nvgpu_smp_wmb() nvgpu_smp_wmb_impl()
|
#define nvgpu_smp_wmb() nvgpu_smp_wmb_impl()
|
||||||
|
|
||||||
#define NV_ACCESS_ONCE(x) NV_ACCESS((x))
|
#define NV_READ_ONCE(x) NV_READ_ONCE_IMPL((x))
|
||||||
|
#define NV_WRITE_ONCE(x, y) NV_WRITE_ONCE_IMPL((x), (y))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sometimes we want to prevent speculation.
|
* Sometimes we want to prevent speculation.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -27,7 +27,8 @@
|
|||||||
#define nvgpu_smp_rmb_impl() smp_rmb()
|
#define nvgpu_smp_rmb_impl() smp_rmb()
|
||||||
#define nvgpu_smp_wmb_impl() smp_wmb()
|
#define nvgpu_smp_wmb_impl() smp_wmb()
|
||||||
|
|
||||||
#define NV_ACCESS(x) ACCESS_ONCE(x)
|
#define NV_READ_ONCE_IMPL(x) READ_ONCE(x)
|
||||||
|
#define NV_WRITE_ONCE_IMPL(x, y) WRITE_ONCE(x, y)
|
||||||
|
|
||||||
#define nvgpu_speculation_barrier_impl() speculation_barrier()
|
#define nvgpu_speculation_barrier_impl() speculation_barrier()
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -23,7 +23,9 @@
|
|||||||
#ifndef NVGPU_POSIX_BARRIER_H
|
#ifndef NVGPU_POSIX_BARRIER_H
|
||||||
#define NVGPU_POSIX_BARRIER_H
|
#define NVGPU_POSIX_BARRIER_H
|
||||||
|
|
||||||
#define ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&x)
|
#include <nvgpu/posix/utils.h>
|
||||||
|
|
||||||
|
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&x)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: implement all these!
|
* TODO: implement all these!
|
||||||
@@ -36,6 +38,7 @@
|
|||||||
#define nvgpu_smp_rmb_impl()
|
#define nvgpu_smp_rmb_impl()
|
||||||
#define nvgpu_smp_wmb_impl()
|
#define nvgpu_smp_wmb_impl()
|
||||||
|
|
||||||
#define NV_ACCESS(x) ACCESS_ONCE(x)
|
#define NV_READ_ONCE_IMPL(x) READ_ONCE(x)
|
||||||
|
#define NV_WRITE_ONCE_IMPL(x, y) WRITE_ONCE(x, y)
|
||||||
|
|
||||||
#endif /* NVGPU_POSIX_BARRIER_H */
|
#endif /* NVGPU_POSIX_BARRIER_H */
|
||||||
|
|||||||
@@ -516,7 +516,7 @@ static int nvgpu_clk_arb_stats_show(struct seq_file *s, void *unused)
|
|||||||
u64 num;
|
u64 num;
|
||||||
s64 tmp, avg, std, max, min;
|
s64 tmp, avg, std, max, min;
|
||||||
|
|
||||||
debug = NV_ACCESS_ONCE(arb->debug);
|
debug = READ_ONCE(arb->debug);
|
||||||
/* Make copy of structure and ensure no reordering */
|
/* Make copy of structure and ensure no reordering */
|
||||||
nvgpu_smp_rmb();
|
nvgpu_smp_rmb();
|
||||||
if (!debug)
|
if (!debug)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -553,7 +553,7 @@ static ssize_t mscg_enable_store(struct device *dev,
|
|||||||
g->mscg_enabled = true;
|
g->mscg_enabled = true;
|
||||||
if (nvgpu_pmu_is_lpwr_feature_supported(g,
|
if (nvgpu_pmu_is_lpwr_feature_supported(g,
|
||||||
PMU_PG_LPWR_FEATURE_MSCG)) {
|
PMU_PG_LPWR_FEATURE_MSCG)) {
|
||||||
if (!ACCESS_ONCE(pmu->pg->mscg_stat)) {
|
if (!READ_ONCE(pmu->pg->mscg_stat)) {
|
||||||
WRITE_ONCE(pmu->pg->mscg_stat,
|
WRITE_ONCE(pmu->pg->mscg_stat,
|
||||||
PMU_MSCG_ENABLED);
|
PMU_MSCG_ENABLED);
|
||||||
/* make status visible */
|
/* make status visible */
|
||||||
|
|||||||
Reference in New Issue
Block a user