gpu: nvgpu: vgpu: add vgpu_css_init

Added vgpu_css_init to setup ivm at init time.

Background:
vgpu_css_reserve_mempool was called at runtime in many places without any lock.
To avoid racing, the patch moves it to init time and rename it to vgpu_css_init.

Bug 200598546

Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: I4c7794ee7151fc604643f94700d5b986472b2e71
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2336905
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Aparna Das <aparnad@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Richard Zhao
2020-04-29 17:44:02 -07:00
committed by Alex Waterman
parent 9d7bf6c902
commit f73d035983
4 changed files with 20 additions and 45 deletions

View File

@@ -1262,15 +1262,11 @@ void vgpu_gr_init_cyclestats(struct gk20a *g)
{ {
#if defined(CONFIG_NVGPU_CYCLESTATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
bool snapshots_supported = true; bool snapshots_supported = true;
u32 max_css_buffer_size;
/* cyclestats not supported on vgpu */ /* cyclestats not supported on vgpu */
nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, false);
max_css_buffer_size = vgpu_css_get_buffer_size(g); if (vgpu_css_init(g) != 0) {
/* snapshots not supported if the buffer size is 0 */
if (max_css_buffer_size == 0) {
snapshots_supported = false; snapshots_supported = false;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -32,19 +32,13 @@
#include "cyclestats_snapshot_vgpu.h" #include "cyclestats_snapshot_vgpu.h"
#include "common/vgpu/ivc/comm_vgpu.h" #include "common/vgpu/ivc/comm_vgpu.h"
static struct tegra_hv_ivm_cookie *css_cookie; int vgpu_css_init(struct gk20a *g)
static int vgpu_css_reserve_mempool(struct gk20a *g,
struct tegra_hv_ivm_cookie **cookie_p)
{ {
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
struct tegra_hv_ivm_cookie *cookie; struct tegra_hv_ivm_cookie *cookie;
u32 mempool; u32 mempool;
int err; int err;
if (cookie_p == NULL) {
return -EINVAL;
}
err = nvgpu_dt_read_u32_index(g, "mempool-css", 1, &mempool); err = nvgpu_dt_read_u32_index(g, "mempool-css", 1, &mempool);
if (err) { if (err) {
nvgpu_err(g, "dt missing mempool-css"); nvgpu_err(g, "dt missing mempool-css");
@@ -58,40 +52,28 @@ static int vgpu_css_reserve_mempool(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
*cookie_p = cookie; priv->css_cookie = cookie;
return 0; return 0;
} }
u32 vgpu_css_get_buffer_size(struct gk20a *g) u32 vgpu_css_get_buffer_size(struct gk20a *g)
{ {
struct tegra_hv_ivm_cookie *cookie; struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
u32 size;
int err;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (css_cookie) { if (NULL == priv->css_cookie) {
size = (u32)vgpu_ivm_get_size(css_cookie); return 0U;
nvgpu_log_info(g, "buffer size = 0x%08x", size);
return size;
} }
err = vgpu_css_reserve_mempool(g, &cookie); return vgpu_ivm_get_size(priv->css_cookie);
if (0 != err) {
return 0;
}
size = vgpu_ivm_get_size(cookie);
vgpu_ivm_mempool_unreserve(cookie);
nvgpu_log_info(g, "buffer size = 0x%08x", size);
return size;
} }
static int vgpu_css_init_snapshot_buffer(struct gk20a *g) static int vgpu_css_init_snapshot_buffer(struct gk20a *g)
{ {
struct gk20a_cs_snapshot *data = g->cs_data; struct gk20a_cs_snapshot *data = g->cs_data;
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
void *buf = NULL; void *buf = NULL;
int err; int err;
u64 size; u64 size;
@@ -102,12 +84,11 @@ static int vgpu_css_init_snapshot_buffer(struct gk20a *g)
return 0; return 0;
} }
err = vgpu_css_reserve_mempool(g, &css_cookie); if (NULL == priv->css_cookie) {
if (0 != err) { return -EINVAL;
return err;
} }
size = vgpu_ivm_get_size(css_cookie); size = vgpu_ivm_get_size(priv->css_cookie);
/* Make sure buffer size is large enough */ /* Make sure buffer size is large enough */
if (size < CSS_MIN_HW_SNAPSHOT_SIZE) { if (size < CSS_MIN_HW_SNAPSHOT_SIZE) {
nvgpu_info(g, "mempool size 0x%llx too small", size); nvgpu_info(g, "mempool size 0x%llx too small", size);
@@ -115,7 +96,7 @@ static int vgpu_css_init_snapshot_buffer(struct gk20a *g)
goto fail; goto fail;
} }
buf = vgpu_ivm_mempool_map(css_cookie); buf = vgpu_ivm_mempool_map(priv->css_cookie);
if (!buf) { if (!buf) {
nvgpu_info(g, "vgpu_ivm_mempool_map failed"); nvgpu_info(g, "vgpu_ivm_mempool_map failed");
err = -EINVAL; err = -EINVAL;
@@ -129,25 +110,21 @@ static int vgpu_css_init_snapshot_buffer(struct gk20a *g)
(void) memset(data->hw_snapshot, 0xff, size); (void) memset(data->hw_snapshot, 0xff, size);
return 0; return 0;
fail: fail:
vgpu_ivm_mempool_unreserve(css_cookie);
css_cookie = NULL;
return err; return err;
} }
void vgpu_css_release_snapshot_buffer(struct gk20a *g) void vgpu_css_release_snapshot_buffer(struct gk20a *g)
{ {
struct gk20a_cs_snapshot *data = g->cs_data; struct gk20a_cs_snapshot *data = g->cs_data;
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
if (!data->hw_snapshot) { if (!data->hw_snapshot) {
return; return;
} }
vgpu_ivm_mempool_unmap(css_cookie, data->hw_snapshot); vgpu_ivm_mempool_unmap(priv->css_cookie, data->hw_snapshot);
data->hw_snapshot = NULL; data->hw_snapshot = NULL;
vgpu_ivm_mempool_unreserve(css_cookie);
css_cookie = NULL;
nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n"); nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n");
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,6 +29,7 @@ struct gk20a;
struct nvgpu_channel; struct nvgpu_channel;
struct gk20a_cs_snapshot_client; struct gk20a_cs_snapshot_client;
int vgpu_css_init(struct gk20a *g);
void vgpu_css_release_snapshot_buffer(struct gk20a *g); void vgpu_css_release_snapshot_buffer(struct gk20a *g);
int vgpu_css_flush_snapshots(struct nvgpu_channel *ch, int vgpu_css_flush_snapshots(struct nvgpu_channel *ch,
u32 *pending, bool *hw_overflow); u32 *pending, bool *hw_overflow);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -42,6 +42,7 @@ struct vgpu_priv_data {
u32 num_freqs; u32 num_freqs;
unsigned long *freqs; unsigned long *freqs;
struct nvgpu_mutex vgpu_clk_get_freq_lock; struct nvgpu_mutex vgpu_clk_get_freq_lock;
struct tegra_hv_ivm_cookie *css_cookie;
}; };
struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g); struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g);