gpu: nvgpu: rename nvhost_dev to nvhost

A couple of structure member variables were named "nvhost_dev". This
causes a name conflict with a structure name. MISRA frowns upon name
conflicts. Therefore, rename the member variables to "nvhost".

JIRA NVGPU-3873

Change-Id: I4d35eb2d121b3c17499055d8781a61641594811e
Signed-off-by: Adeel Raza <araza@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2262190
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Adeel Raza
2019-12-13 15:10:01 -08:00
committed by Alex Waterman
parent 26af1c2270
commit fd870b300e
10 changed files with 63 additions and 63 deletions

View File

@@ -42,7 +42,7 @@
struct nvgpu_channel_sync_syncpt { struct nvgpu_channel_sync_syncpt {
struct nvgpu_channel_sync ops; struct nvgpu_channel_sync ops;
struct nvgpu_channel *c; struct nvgpu_channel *c;
struct nvgpu_nvhost_dev *nvhost_dev; struct nvgpu_nvhost_dev *nvhost;
u32 id; u32 id;
struct nvgpu_mem syncpt_buf; struct nvgpu_mem syncpt_buf;
}; };
@@ -62,7 +62,7 @@ static int channel_sync_syncpt_gen_wait_cmd(struct nvgpu_channel *c,
{ {
int err = 0; int err = 0;
bool is_expired = nvgpu_nvhost_syncpt_is_expired_ext( bool is_expired = nvgpu_nvhost_syncpt_is_expired_ext(
c->g->nvhost_dev, id, thresh); c->g->nvhost, id, thresh);
if (is_expired) { if (is_expired) {
if (preallocated) { if (preallocated) {
@@ -97,7 +97,7 @@ static int channel_sync_syncpt_wait_raw(struct nvgpu_channel_sync_syncpt *s,
int err = 0; int err = 0;
u32 wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size(); u32 wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size();
if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(s->nvhost_dev, id)) { if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(s->nvhost, id)) {
return -EINVAL; return -EINVAL;
} }
@@ -145,7 +145,7 @@ static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd,
nvgpu_os_fence_syncpt_extract_nth_syncpt( nvgpu_os_fence_syncpt_extract_nth_syncpt(
&os_fence_syncpt, i, &syncpt_id, &syncpt_thresh); &os_fence_syncpt, i, &syncpt_id, &syncpt_thresh);
if ((syncpt_id == 0U) || !nvgpu_nvhost_syncpt_is_valid_pt_ext( if ((syncpt_id == 0U) || !nvgpu_nvhost_syncpt_is_valid_pt_ext(
c->g->nvhost_dev, syncpt_id)) { c->g->nvhost, syncpt_id)) {
err = -EINVAL; err = -EINVAL;
goto cleanup; goto cleanup;
} }
@@ -208,7 +208,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
c->g->ops.sync.syncpt.add_incr_cmd(c->g, wfi_cmd, c->g->ops.sync.syncpt.add_incr_cmd(c->g, wfi_cmd,
incr_cmd, sp->id, sp->syncpt_buf.gpu_va); incr_cmd, sp->id, sp->syncpt_buf.gpu_va);
thresh = nvgpu_nvhost_syncpt_incr_max_ext(sp->nvhost_dev, sp->id, thresh = nvgpu_nvhost_syncpt_incr_max_ext(sp->nvhost, sp->id,
c->g->ops.sync.syncpt.get_incr_per_release()); c->g->ops.sync.syncpt.get_incr_per_release());
if (register_irq) { if (register_irq) {
@@ -221,7 +221,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
* channel_sync_syncpt_update() */ * channel_sync_syncpt_update() */
err = nvgpu_nvhost_intr_register_notifier( err = nvgpu_nvhost_intr_register_notifier(
sp->nvhost_dev, sp->nvhost,
sp->id, thresh, sp->id, thresh,
channel_sync_syncpt_update, c); channel_sync_syncpt_update, c);
if (err != 0) { if (err != 0) {
@@ -239,7 +239,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
} }
if (need_sync_fence) { if (need_sync_fence) {
err = nvgpu_os_fence_syncpt_create(&os_fence, c, sp->nvhost_dev, err = nvgpu_os_fence_syncpt_create(&os_fence, c, sp->nvhost,
sp->id, thresh); sp->id, thresh);
if (err != 0) { if (err != 0) {
@@ -247,7 +247,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
} }
} }
err = nvgpu_fence_from_syncpt(fence, sp->nvhost_dev, err = nvgpu_fence_from_syncpt(fence, sp->nvhost,
sp->id, thresh, os_fence); sp->id, thresh, os_fence);
if (err != 0) { if (err != 0) {
@@ -304,7 +304,7 @@ static void channel_sync_syncpt_set_min_eq_max(struct nvgpu_channel_sync *s)
{ {
struct nvgpu_channel_sync_syncpt *sp = struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_ops(s); nvgpu_channel_sync_syncpt_from_ops(s);
nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id); nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost, sp->id);
} }
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT */ #endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT */
@@ -313,7 +313,7 @@ static void channel_sync_syncpt_set_safe_state(struct nvgpu_channel_sync *s)
{ {
struct nvgpu_channel_sync_syncpt *sp = struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_ops(s); nvgpu_channel_sync_syncpt_from_ops(s);
nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id); nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost, sp->id);
} }
static u32 channel_sync_syncpt_get_id(struct nvgpu_channel_sync_syncpt *sp) static u32 channel_sync_syncpt_get_id(struct nvgpu_channel_sync_syncpt *sp)
@@ -334,8 +334,8 @@ static void channel_sync_syncpt_destroy(struct nvgpu_channel_sync *s)
sp->c->g->ops.sync.syncpt.free_buf(sp->c, &sp->syncpt_buf); sp->c->g->ops.sync.syncpt.free_buf(sp->c, &sp->syncpt_buf);
nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id); nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost, sp->id);
nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost_dev, sp->id); nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost, sp->id);
nvgpu_kfree(sp->c->g, sp); nvgpu_kfree(sp->c->g, sp);
} }
@@ -374,7 +374,7 @@ nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c, bool user_managed)
} }
sp->c = c; sp->c = c;
sp->nvhost_dev = c->g->nvhost_dev; sp->nvhost = c->g->nvhost;
if (user_managed) { if (user_managed) {
(void)strncpy(syncpt_name, c->g->name, sizeof(syncpt_name)); (void)strncpy(syncpt_name, c->g->name, sizeof(syncpt_name));
@@ -389,7 +389,7 @@ nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c, bool user_managed)
} }
(void)strcat(syncpt_name, "_user"); (void)strcat(syncpt_name, "_user");
sp->id = nvgpu_nvhost_get_syncpt_client_managed(sp->nvhost_dev, sp->id = nvgpu_nvhost_get_syncpt_client_managed(sp->nvhost,
syncpt_name); syncpt_name);
} }
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT #ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
@@ -397,7 +397,7 @@ nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c, bool user_managed)
snprintf(syncpt_name, sizeof(syncpt_name), snprintf(syncpt_name, sizeof(syncpt_name),
"%s_%d", c->g->name, c->chid); "%s_%d", c->g->name, c->chid);
sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev, sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost,
c->chid, syncpt_name); c->chid, syncpt_name);
} }
#endif #endif
@@ -411,13 +411,13 @@ nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c, bool user_managed)
&sp->syncpt_buf); &sp->syncpt_buf);
if (err != 0) { if (err != 0) {
nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost_dev, sp->id); nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost, sp->id);
nvgpu_kfree(c->g, sp); nvgpu_kfree(c->g, sp);
nvgpu_err(c->g, "failed to allocate syncpoint buffer"); nvgpu_err(c->g, "failed to allocate syncpoint buffer");
return NULL; return NULL;
} }
nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id); nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost, sp->id);
nvgpu_atomic_set(&sp->ops.refcount, 0); nvgpu_atomic_set(&sp->ops.refcount, 0);
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT #ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT

View File

@@ -135,7 +135,7 @@ void gm20b_pbdma_syncpoint_debug_dump(struct gk20a *g,
gk20a_debug_output(o, "%s on syncpt %u (%s) val %u", gk20a_debug_output(o, "%s on syncpt %u (%s) val %u",
info->hw_state.pending_acquire ? "Waiting" : "Waited", info->hw_state.pending_acquire ? "Waiting" : "Waited",
pbdma_syncpointb_syncpt_index_v(syncpointb), pbdma_syncpointb_syncpt_index_v(syncpointb),
nvgpu_nvhost_syncpt_get_name(g->nvhost_dev, nvgpu_nvhost_syncpt_get_name(g->nvhost,
(int) pbdma_syncpointb_syncpt_index_v(syncpointb)), (int) pbdma_syncpointb_syncpt_index_v(syncpointb)),
pbdma_syncpointa_payload_v(syncpointa)); pbdma_syncpointa_payload_v(syncpointa));
} }

View File

@@ -664,7 +664,7 @@ struct gk20a {
* have had the opportunity to free their private data. * have had the opportunity to free their private data.
*/ */
void (*gfree)(struct gk20a *g); void (*gfree)(struct gk20a *g);
struct nvgpu_nvhost_dev *nvhost_dev; struct nvgpu_nvhost_dev *nvhost;
/** /**
* Used by <nvgpu/enabled.h>. Do not access directly! * Used by <nvgpu/enabled.h>. Do not access directly!

View File

@@ -1070,7 +1070,7 @@ static int nvgpu_ioctl_channel_get_user_syncpoint(struct nvgpu_channel *ch,
} }
args->syncpoint_id = nvgpu_channel_sync_get_syncpt_id(user_sync_syncpt); args->syncpoint_id = nvgpu_channel_sync_get_syncpt_id(user_sync_syncpt);
args->syncpoint_max = nvgpu_nvhost_syncpt_read_maxval(g->nvhost_dev, args->syncpoint_max = nvgpu_nvhost_syncpt_read_maxval(g->nvhost,
args->syncpoint_id); args->syncpoint_id);
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS)) {
args->gpu_va = args->gpu_va =

View File

@@ -53,18 +53,18 @@ int nvgpu_get_nvhost_dev(struct gk20a *g)
return 0; return 0;
} }
g->nvhost_dev = nvgpu_kzalloc(g, sizeof(struct nvgpu_nvhost_dev)); g->nvhost = nvgpu_kzalloc(g, sizeof(struct nvgpu_nvhost_dev));
if (!g->nvhost_dev) if (!g->nvhost)
return -ENOMEM; return -ENOMEM;
g->nvhost_dev->host1x_pdev = host1x_pdev; g->nvhost->host1x_pdev = host1x_pdev;
return 0; return 0;
} }
void nvgpu_free_nvhost_dev(struct gk20a *g) void nvgpu_free_nvhost_dev(struct gk20a *g)
{ {
nvgpu_kfree(g, g->nvhost_dev); nvgpu_kfree(g, g->nvhost);
} }
int nvgpu_nvhost_module_busy_ext( int nvgpu_nvhost_module_busy_ext(
@@ -191,9 +191,9 @@ int nvgpu_nvhost_create_symlink(struct gk20a *g)
struct device *dev = dev_from_gk20a(g); struct device *dev = dev_from_gk20a(g);
int err = 0; int err = 0;
if (g->nvhost_dev && if (g->nvhost &&
(dev->parent != &g->nvhost_dev->host1x_pdev->dev)) { (dev->parent != &g->nvhost->host1x_pdev->dev)) {
err = sysfs_create_link(&g->nvhost_dev->host1x_pdev->dev.kobj, err = sysfs_create_link(&g->nvhost->host1x_pdev->dev.kobj,
&dev->kobj, &dev->kobj,
dev_name(dev)); dev_name(dev));
} }
@@ -205,9 +205,9 @@ void nvgpu_nvhost_remove_symlink(struct gk20a *g)
{ {
struct device *dev = dev_from_gk20a(g); struct device *dev = dev_from_gk20a(g);
if (g->nvhost_dev && if (g->nvhost &&
(dev->parent != &g->nvhost_dev->host1x_pdev->dev)) { (dev->parent != &g->nvhost->host1x_pdev->dev)) {
sysfs_remove_link(&g->nvhost_dev->host1x_pdev->dev.kobj, sysfs_remove_link(&g->nvhost->host1x_pdev->dev.kobj,
dev_name(dev)); dev_name(dev));
} }
} }
@@ -275,7 +275,7 @@ int nvgpu_nvhost_syncpt_init(struct gk20a *g)
} }
err = nvgpu_nvhost_syncpt_unit_interface_get_aperture( err = nvgpu_nvhost_syncpt_unit_interface_get_aperture(
g->nvhost_dev, g->nvhost,
&g->syncpt_unit_base, &g->syncpt_unit_base,
&g->syncpt_unit_size); &g->syncpt_unit_size);
if (err) { if (err) {

View File

@@ -588,8 +588,8 @@ void gk20a_tegra_debug_dump(struct device *dev)
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g; struct gk20a *g = platform->g;
if (g->nvhost_dev) if (g->nvhost)
nvgpu_nvhost_debug_dump_device(g->nvhost_dev); nvgpu_nvhost_debug_dump_device(g->nvhost);
#endif #endif
} }
@@ -599,8 +599,8 @@ int gk20a_tegra_busy(struct device *dev)
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g; struct gk20a *g = platform->g;
if (g->nvhost_dev) if (g->nvhost)
return nvgpu_nvhost_module_busy_ext(g->nvhost_dev); return nvgpu_nvhost_module_busy_ext(g->nvhost);
#endif #endif
return 0; return 0;
} }
@@ -611,8 +611,8 @@ void gk20a_tegra_idle(struct device *dev)
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g; struct gk20a *g = platform->g;
if (g->nvhost_dev) if (g->nvhost)
nvgpu_nvhost_module_idle_ext(g->nvhost_dev); nvgpu_nvhost_module_idle_ext(g->nvhost);
#endif #endif
} }

View File

@@ -58,7 +58,7 @@ static int gv11b_vgpu_probe(struct device *dev)
return ret; return ret;
} }
ret = nvgpu_nvhost_syncpt_unit_interface_get_aperture(g->nvhost_dev, ret = nvgpu_nvhost_syncpt_unit_interface_get_aperture(g->nvhost,
&g->syncpt_unit_base, &g->syncpt_unit_base,
&g->syncpt_unit_size); &g->syncpt_unit_size);
if (ret) { if (ret) {

View File

@@ -32,9 +32,9 @@ static inline u32 nvgpu_nvhost_syncpt_nb_hw_pts(
void nvgpu_free_nvhost_dev(struct gk20a *g) { void nvgpu_free_nvhost_dev(struct gk20a *g) {
if (g->nvhost_dev != NULL) { if (g->nvhost != NULL) {
nvgpu_kfree(g, g->nvhost_dev); nvgpu_kfree(g, g->nvhost);
g->nvhost_dev = NULL; g->nvhost = NULL;
} }
} }
@@ -56,16 +56,16 @@ static void allocate_new_syncpt(struct nvgpu_nvhost_dev *nvgpu_syncpt_dev)
int nvgpu_get_nvhost_dev(struct gk20a *g) int nvgpu_get_nvhost_dev(struct gk20a *g)
{ {
int ret = 0; int ret = 0;
g->nvhost_dev = nvgpu_kzalloc(g, sizeof(struct nvgpu_nvhost_dev)); g->nvhost = nvgpu_kzalloc(g, sizeof(struct nvgpu_nvhost_dev));
if (g->nvhost_dev == NULL) { if (g->nvhost == NULL) {
return -ENOMEM; return -ENOMEM;
} }
g->nvhost_dev->host1x_sp_base = 0x60000000; g->nvhost->host1x_sp_base = 0x60000000;
g->nvhost_dev->host1x_sp_size = 0x4000; g->nvhost->host1x_sp_size = 0x4000;
g->nvhost_dev->nb_hw_pts = 704U; g->nvhost->nb_hw_pts = 704U;
ret = nvgpu_nvhost_syncpt_unit_interface_get_aperture( ret = nvgpu_nvhost_syncpt_unit_interface_get_aperture(
g->nvhost_dev, &g->syncpt_unit_base, g->nvhost, &g->syncpt_unit_base,
&g->syncpt_unit_size); &g->syncpt_unit_size);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, "Failed to get syncpt interface"); nvgpu_err(g, "Failed to get syncpt interface");

View File

@@ -135,7 +135,7 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args)
} }
/* /*
* Init g->nvhost_dev containing sync metadata * Init g->nvhost containing sync metadata
*/ */
ret = nvgpu_get_nvhost_dev(g); ret = nvgpu_get_nvhost_dev(g);
if (ret != 0) { if (ret != 0) {
@@ -201,13 +201,13 @@ int test_sync_create_destroy_sync(struct unit_module *m, struct gk20a *g, void *
unit_return_fail(m, "unexpected failure in creating sync points"); unit_return_fail(m, "unexpected failure in creating sync points");
} }
syncpt_value = g->nvhost_dev->syncpt_value; syncpt_value = g->nvhost->syncpt_value;
unit_info(m, "Syncpt ID: %u, Syncpt Value: %u\n", unit_info(m, "Syncpt ID: %u, Syncpt Value: %u\n",
g->nvhost_dev->syncpt_id, syncpt_value); g->nvhost->syncpt_id, syncpt_value);
assert((g->nvhost_dev->syncpt_id > 0U) && assert((g->nvhost->syncpt_id > 0U) &&
(g->nvhost_dev->syncpt_id <= NUM_HW_PTS)); (g->nvhost->syncpt_id <= NUM_HW_PTS));
assert(syncpt_value < (UINT_MAX - SYNCPT_SAFE_STATE_INCR)); assert(syncpt_value < (UINT_MAX - SYNCPT_SAFE_STATE_INCR));
@@ -252,8 +252,8 @@ int test_sync_set_safe_state(struct unit_module *m, struct gk20a *g, void *args)
unit_return_fail(m, "unexpected failure in creating sync points"); unit_return_fail(m, "unexpected failure in creating sync points");
} }
syncpt_id = g->nvhost_dev->syncpt_id; syncpt_id = g->nvhost->syncpt_id;
syncpt_value = g->nvhost_dev->syncpt_value; syncpt_value = g->nvhost->syncpt_value;
unit_info(m, "Syncpt ID: %u, Syncpt Value: %u\n", unit_info(m, "Syncpt ID: %u, Syncpt Value: %u\n",
syncpt_id, syncpt_value); syncpt_id, syncpt_value);
@@ -264,7 +264,7 @@ int test_sync_set_safe_state(struct unit_module *m, struct gk20a *g, void *args)
nvgpu_channel_sync_set_safe_state(sync); nvgpu_channel_sync_set_safe_state(sync);
syncpt_safe_state_val = g->nvhost_dev->syncpt_value; syncpt_safe_state_val = g->nvhost->syncpt_value;
if ((syncpt_safe_state_val - syncpt_value) != SYNCPT_SAFE_STATE_INCR) { if ((syncpt_safe_state_val - syncpt_value) != SYNCPT_SAFE_STATE_INCR) {
unit_return_fail(m, "unexpected increment value for safe state"); unit_return_fail(m, "unexpected increment value for safe state");
@@ -366,7 +366,7 @@ static void clear_test_params(struct gk20a *g, bool *user_managed,
} }
if (branch == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) { if (branch == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
g->nvhost_dev->syncpt_id = 1U; g->nvhost->syncpt_id = 1U;
} }
if (ch->vm->syncpt_ro_map_gpu_va) { if (ch->vm->syncpt_ro_map_gpu_va) {
@@ -395,7 +395,7 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
* This is normally not cleared when a syncpt's last ref * This is normally not cleared when a syncpt's last ref
* is removed. Hence, explicitely zero it after every failure * is removed. Hence, explicitely zero it after every failure
*/ */
g->nvhost_dev->syncpt_id = 0U; g->nvhost->syncpt_id = 0U;
if (branches == F_SYNC_GLOBAL_DISABLE_SYNCPT) { if (branches == F_SYNC_GLOBAL_DISABLE_SYNCPT) {
g->disable_syncpoints = true; g->disable_syncpoints = true;
@@ -406,7 +406,7 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
} else if (branches == F_SYNC_USER_MANAGED) { } else if (branches == F_SYNC_USER_MANAGED) {
user_managed = false; user_managed = false;
} else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) { } else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
g->nvhost_dev->syncpt_id = 20U; /* arbitary id */ g->nvhost->syncpt_id = 20U; /* arbitary id */
} else if (branches == F_SYNC_RO_MAP_GPU_VA_MAP_FAIL) { } else if (branches == F_SYNC_RO_MAP_GPU_VA_MAP_FAIL) {
/* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */ /* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */
ch->vm->guest_managed = true; ch->vm->guest_managed = true;
@@ -439,8 +439,8 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
unit_return_fail(m, "expected failure in creating sync points"); unit_return_fail(m, "expected failure in creating sync points");
} }
syncpt_id = g->nvhost_dev->syncpt_id; syncpt_id = g->nvhost->syncpt_id;
syncpt_value = g->nvhost_dev->syncpt_value; syncpt_value = g->nvhost->syncpt_value;
assert(syncpt_id == 0U); assert(syncpt_id == 0U);
assert(syncpt_value == 0U); assert(syncpt_value == 0U);
@@ -470,7 +470,7 @@ int test_sync_deinit(struct unit_module *m, struct gk20a *g, void *args)
de_init_syncpt_mem(m, g); de_init_syncpt_mem(m, g);
if (g->nvhost_dev == NULL) { if (g->nvhost == NULL) {
unit_return_fail(m ,"no valid nvhost device exists\n"); unit_return_fail(m ,"no valid nvhost device exists\n");
} }

View File

@@ -46,7 +46,7 @@ struct unit_module;
* - init FIFO register space. * - init FIFO register space.
* - init HAL parameters for gv11b. * - init HAL parameters for gv11b.
* - init required for getting the sync ops initialized. * - init required for getting the sync ops initialized.
* - init g->nvhost_dev containing sync metadata. * - init g->nvhost containing sync metadata.
* - alloc memory for g->syncpt_mem. * - alloc memory for g->syncpt_mem.
* - alloc memory for channel. * - alloc memory for channel.
* - alloc and init a VM for the channel. * - alloc and init a VM for the channel.
@@ -68,7 +68,7 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args);
* - put reference to VM put. * - put reference to VM put.
* - free channel memory. * - free channel memory.
* - free memory for g->syncpt_mem. * - free memory for g->syncpt_mem.
* - free g->nvhost_dev. * - free g->nvhost.
* - clear FIFO register space. * - clear FIFO register space.
* *
* Output: Returns PASS if all the above steps are successful. FAIL otherwise. * Output: Returns PASS if all the above steps are successful. FAIL otherwise.