gpu: nvgpu: use READ_ONCE/WRITE_ONCE

In the upstream kernel ACCESS_ONCE is now deprecated with reason as
given in the following related commit:

    commit 381f20fceba8e ("security: use READ_ONCE instead of deprecated
    ACCESS_ONCE")

    ACCESS_ONCE() does not work reliably on non-scalar types. For
    example gcc 4.6 and 4.7 might remove the volatile tag for such
    accesses during the SRA (scalar replacement of aggregates) step.

Replace usages of ACCESS_ONCE with READ_ONCE and WRITE_ONCE in nvgpu.

Bug 2834141

Change-Id: I9904c49e1a4d7b17ed2fe54360051d08595a2982
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2294096
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: Jonathan Hunter <jonathanh@nvidia.com>
Reviewed-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2020-02-12 22:03:12 +05:30
committed by Alex Waterman
parent 029da0437e
commit d0d8ef79d1
11 changed files with 32 additions and 26 deletions

View File

@@ -120,7 +120,7 @@ int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb)
struct clk_set_info *p0_info;
table = NV_ACCESS_ONCE(arb->current_vf_table);
table = NV_READ_ONCE(arb->current_vf_table);
/* make flag visible when all data has resolved in the tables */
nvgpu_smp_rmb();
table = (table == &arb->vf_table_pool[0]) ? &arb->vf_table_pool[1] :
@@ -279,7 +279,7 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
l_notification = &arb->notification_queue.
clk_q_notifications[((u64)index + 1ULL) % size];
alarm_detected = NV_ACCESS_ONCE(
alarm_detected = NV_READ_ONCE(
l_notification->clk_notification);
if ((enabled_mask & alarm_detected) == 0U) {
@@ -289,7 +289,7 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
queue_index++;
dev->queue.clk_q_notifications[
queue_index % dev->queue.size].timestamp =
NV_ACCESS_ONCE(l_notification->timestamp);
NV_READ_ONCE(l_notification->timestamp);
dev->queue.clk_q_notifications[queue_index %
dev->queue.size].clk_notification =
@@ -628,7 +628,7 @@ void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g)
*/
u32 nvgpu_clk_arb_get_current_pstate(struct gk20a *g)
{
return NV_ACCESS_ONCE(g->clk_arb->actual->pstate);
return NV_READ_ONCE(g->clk_arb->actual->pstate);
}
void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock)

View File

@@ -344,7 +344,7 @@ void gp10b_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
goto exit_arb;
}
actual = ((NV_ACCESS_ONCE(arb->actual)) == &arb->actual_pool[0] ?
actual = ((NV_READ_ONCE(arb->actual)) == &arb->actual_pool[0] ?
&arb->actual_pool[1] : &arb->actual_pool[0]);
/* do not reorder this pointer */

View File

@@ -429,7 +429,7 @@ void gv100_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
goto exit_arb;
}
actual = NV_ACCESS_ONCE(arb->actual) == &arb->actual_pool[0] ?
actual = NV_READ_ONCE(arb->actual) == &arb->actual_pool[0] ?
&arb->actual_pool[1] : &arb->actual_pool[0];
/* do not reorder this pointer */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -68,9 +68,9 @@ static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len)
return 0;
}
head = NV_ACCESS_ONCE(pa->head);
head = NV_READ_ONCE(pa->head);
while (head >= 0) {
new_head = NV_ACCESS_ONCE(pa->next[head]);
new_head = NV_READ_ONCE(pa->next[head]);
ret = cmpxchg(&pa->head, head, new_head);
if (ret == head) {
addr = pa->base + U64(head) * pa->blk_size;
@@ -79,7 +79,7 @@ static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len)
addr);
break;
}
head = NV_ACCESS_ONCE(pa->head);
head = NV_READ_ONCE(pa->head);
}
if (addr != 0ULL) {
@@ -102,8 +102,8 @@ static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr)
alloc_dbg(a, "Free node # %llu @ addr 0x%llx", cur_idx, addr);
while (true) {
head = NV_ACCESS_ONCE(pa->head);
NV_ACCESS_ONCE(pa->next[cur_idx]) = head;
head = NV_READ_ONCE(pa->head);
NV_WRITE_ONCE(pa->next[cur_idx], head);
nvgpu_assert(cur_idx <= U64(INT_MAX));
ret = cmpxchg(&pa->head, head, (int)cur_idx);
if (ret == head) {

View File

@@ -399,7 +399,7 @@ int nvgpu_clk_arb_find_slave_points(struct nvgpu_clk_arb *arb,
do {
gpc2clk_target = vf_point->gpc_mhz;
table = NV_ACCESS_ONCE(arb->current_vf_table);
table = NV_READ_ONCE(arb->current_vf_table);
/* pointer to table can be updated by callback */
nvgpu_smp_rmb();
@@ -436,7 +436,7 @@ int nvgpu_clk_arb_find_slave_points(struct nvgpu_clk_arb *arb,
vf_point->gpc_mhz = gpc2clk_target;
}
} while ((table == NULL) ||
(NV_ACCESS_ONCE(arb->current_vf_table) != table));
(NV_READ_ONCE(arb->current_vf_table) != table));
return status;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -165,7 +165,8 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
pmu->pg->initialized = true;
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_STARTED,
true);
WRITE_ONCE(pmu->pg->mscg_stat, PMU_MSCG_DISABLED);
NV_WRITE_ONCE(pmu->pg->mscg_stat,
PMU_MSCG_DISABLED);
/* make status visible */
nvgpu_smp_mb();
} else {