drivers: Remove CONFIG_TEGRA_VIRTUALIZATION

Remove CONFIG_TEGRA_VIRTUALIZATION so that virtualization is always
enabled for drivers. This permits the same drivers to work on bare-metal
Linux machines as well as virtual Linux machines.

Bug 5157858

Change-Id: I29d13dae957ed2273b65f95fff0bee1e06f45a27
Signed-off-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3314409
Reviewed-by: Sumit Gupta <sumitg@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Ketan Patil <ketanp@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Jon Hunter
2025-02-24 17:26:01 +05:30
parent 23e3912ef7
commit eea9f99397
6 changed files with 13 additions and 196 deletions

View File

@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
# SPDX-License-Identifier: GPL-2.0-only
# SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
this_makefile_path := $(abspath $(shell dirname $(lastword $(MAKEFILE_LIST))))
@@ -29,10 +29,6 @@ LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
subdir-ccflags-y += -Werror
subdir-ccflags-y += -Wmissing-prototypes
ifeq ($(CONFIG_TEGRA_VIRTUALIZATION),y)
subdir-ccflags-y += -DCONFIG_TEGRA_VIRTUALIZATION
endif
ifeq ($(CONFIG_TEGRA_SYSTEM_TYPE_ACK),y)
subdir-ccflags-y += -DCONFIG_TEGRA_SYSTEM_TYPE_ACK
subdir-ccflags-y += -Wno-sometimes-uninitialized

View File

@@ -27,11 +27,9 @@
#include <linux/error-injection.h>
#endif /* CONFIG_FUNCTION_ERROR_INJECTION && CONFIG_BPF_KPROBE_OVERRIDE */
#ifdef CONFIG_TEGRA_VIRTUALIZATION
#include <soc/tegra/virt/syscalls.h>
#include <soc/tegra/virt/hv-ivc.h>
#include <uapi/linux/tegra-ivc-dev.h>
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
#include "nvsciipc.h"
@@ -48,9 +46,7 @@ DEFINE_MUTEX(nvsciipc_mutex);
static struct platform_device *nvsciipc_pdev;
static struct nvsciipc *ctx;
#ifdef CONFIG_TEGRA_VIRTUALIZATION
static int32_t s_guestid = -1;
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
long nvsciipc_dev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -580,7 +576,6 @@ static int nvsciipc_ioctl_set_db(struct nvsciipc *ctx, unsigned int cmd,
}
}
#ifdef CONFIG_TEGRA_VIRTUALIZATION
if (s_guestid != -1) {
struct nvsciipc_config_entry *entry;
union nvsciipc_vuid_64 vuid64;
@@ -605,7 +600,6 @@ static int nvsciipc_ioctl_set_db(struct nvsciipc *ctx, unsigned int cmd,
}
}
}
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
kfree(entry_ptr);
@@ -713,14 +707,10 @@ long nvsciipc_dev_ioctl(struct file *filp, unsigned int cmd,
ret = nvsciipc_ioctl_map_vuid(ctx, cmd, arg);
break;
case NVSCIIPC_IOCTL_GET_VMID:
#ifdef CONFIG_TEGRA_VIRTUALIZATION
if (copy_to_user((void __user *) arg, &s_guestid,
sizeof(s_guestid))) {
ret = -EFAULT;
}
#else
ret = -EFAULT;
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
break;
default:
ERR("unrecognised ioctl cmd: 0x%x\n", cmd);
@@ -846,18 +836,14 @@ static int nvsciipc_probe(struct platform_device *pdev)
}
dev_set_drvdata(ctx->device, ctx);
#ifdef CONFIG_TEGRA_VIRTUALIZATION
{
if (is_tegra_hypervisor_mode()) {
ret = hyp_read_gid(&s_guestid);
if (ret != 0) {
ERR("Failed to read guest id\n");
goto error;
}
INFO("guestid: %d\n", s_guestid);
if (is_tegra_hypervisor_mode()) {
ret = hyp_read_gid(&s_guestid);
if (ret != 0) {
ERR("Failed to read guest id\n");
goto error;
}
INFO("guestid: %d\n", s_guestid);
}
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
INFO("loaded module\n");

View File

@@ -522,7 +522,6 @@ static void *nvmap_dma_alloc_attrs(struct device *dev, size_t size,
EXPORT_SYMBOL(nvmap_dma_alloc_attrs);
#endif /* !NV_CONFIG_NVMAP_IN_EMBEDDED_LINUX */
#ifdef CONFIG_TEGRA_VIRTUALIZATION
static void *nvmap_dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
{
@@ -590,14 +589,12 @@ static void nvmap_dma_mark_declared_memory_unoccupied(struct device *dev,
bitmap_clear(mem->bitmap, pos, alloc_size);
spin_unlock_irqrestore(&mem->spinlock, flags);
}
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
phys_addr_t *start)
{
phys_addr_t pa = DMA_MAPPING_ERROR;
struct device *dev = h->dma_dev;
#ifdef CONFIG_TEGRA_VIRTUALIZATION
phys_addr_t sum;
if (start && h->is_ivm) {
@@ -612,9 +609,7 @@ static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
&pa, len);
return DMA_ERROR_CODE;
}
} else
#endif
{
} else {
(void)nvmap_dma_alloc_attrs(dev, len, &pa,
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
if (!dma_mapping_error(dev, pa)) {
@@ -699,12 +694,9 @@ static void nvmap_free_mem(struct nvmap_heap *h, phys_addr_t base,
dev_dbg(dev, "Free base (%pa) size (%zu)\n", &base, len);
#ifdef CONFIG_TEGRA_VIRTUALIZATION
if (h->is_ivm && !h->can_alloc) {
nvmap_dma_mark_declared_memory_unoccupied(dev, base, len);
} else
#endif
{
} else {
nvmap_dma_free_attrs(dev, len,
(void *)(uintptr_t)base,
(dma_addr_t)base,

View File

@@ -26,10 +26,8 @@
#include "nvmap_handle.h"
#include "nvmap_dev_int.h"
#ifdef CONFIG_TEGRA_VIRTUALIZATION
#include <soc/tegra/virt/hv-ivc.h>
#include <soc/tegra/virt/syscalls.h>
#endif
static struct device tegra_generic_dev;
@@ -141,7 +139,6 @@ found:
return NULL;
}
#ifdef CONFIG_TEGRA_VIRTUALIZATION
static int __init nvmap_populate_ivm_carveout(struct device *dev)
{
char *name;
@@ -243,7 +240,6 @@ err:
return ret;
}
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
/*
* This requires proper kernel arguments to have been passed.
@@ -426,9 +422,7 @@ int __init nvmap_init(struct platform_device *pdev)
goto end;
}
#ifdef CONFIG_TEGRA_VIRTUALIZATION
err = nvmap_populate_ivm_carveout(&pdev->dev);
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
end:
return err;

View File

@@ -1,17 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: GPL-2.0-only
# SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Makefile for Hypervisor interface
#
ifdef CONFIG_TEGRA_VIRTUALIZATION
ifneq ($(NV_OOT_TEGRA_HV_SKIP_BUILD),y)
obj-m += tegra_hv.o
else
obj-m += tegra_hv.o
tegra_hv-objs := tegra_hv_dummy.o
endif
endif
obj-m += tegra_hv_pm_ctl.o
obj-m += hvc_sysfs.o
obj-m += ivc-cdev.o

View File

@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
/* SPDX-FileCopyrightText: Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __TEGRA_HV_IVC_H
#define __TEGRA_HV_IVC_H
@@ -32,7 +30,6 @@ struct tegra_hv_ivm_cookie {
void *reserved;
};
#if defined(CONFIG_TEGRA_VIRTUALIZATION)
bool is_tegra_hypervisor_mode(void);
/**
@@ -306,151 +303,5 @@ int tegra_hv_ivc_get_info(struct tegra_hv_ivc_cookie *ivck, uint64_t *pa,
void tegra_hv_ivc_notify(struct tegra_hv_ivc_cookie *ivck);
struct tegra_ivc *tegra_hv_ivc_convert_cookie(struct tegra_hv_ivc_cookie *ivck);
#else
static inline bool is_tegra_hypervisor_mode(void)
{
return false;
}
static inline struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve(
struct device_node *dn, int id,
const struct tegra_hv_ivc_ops *ops)
{
return ERR_PTR(-ENOTSUPP);
};
static inline int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck,
const void *buf, int size)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_write_user(struct tegra_hv_ivc_cookie *ivck,
const void __user *buf, int size)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, void *buf,
int size)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_read_user(struct tegra_hv_ivc_cookie *ivck,
void __user *buf, int size)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck)
{
return -ENOTSUPP;
};
static inline uint32_t tegra_hv_ivc_tx_frames_available(
struct tegra_hv_ivc_cookie *ivck)
{
return 0;
};
static inline int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_set_loopback(struct tegra_hv_ivc_cookie *ivck,
int mode)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck,
void *buf, int off, int count)
{
return -ENOTSUPP;
};
static inline void *tegra_hv_ivc_read_get_next_frame(
struct tegra_hv_ivc_cookie *ivck)
{
return ERR_PTR(-ENOTSUPP);
};
static inline int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_write_poke(struct tegra_hv_ivc_cookie *ivck,
const void *buf, int off, int count)
{
return -ENOTSUPP;
};
static inline void *tegra_hv_ivc_write_get_next_frame(
struct tegra_hv_ivc_cookie *ivck)
{
return ERR_PTR(-ENOTSUPP);
};
static inline int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck)
{
return -ENOTSUPP;
};
static inline struct tegra_hv_ivm_cookie *tegra_hv_mempool_reserve(unsigned id)
{
return ERR_PTR(-ENOTSUPP);
};
static inline int tegra_hv_mempool_unreserve(struct tegra_hv_ivm_cookie *ck)
{
return -ENOTSUPP;
};
static inline int tegra_hv_ivc_channel_notified(
struct tegra_hv_ivc_cookie *ivck)
{
return -ENOTSUPP;
};
static inline void tegra_hv_ivc_channel_reset(struct tegra_hv_ivc_cookie *ivck)
{
return;
};
static inline int tegra_hv_ivc_get_info(struct tegra_hv_ivc_cookie *ivck,
uint64_t *pa, uint64_t *size)
{
return -ENOTSUPP;
};
static inline void tegra_hv_ivc_notify(struct tegra_hv_ivc_cookie *ivck)
{
return;
};
static inline struct tegra_ivc *tegra_hv_ivc_convert_cookie(
struct tegra_hv_ivc_cookie *ivck)
{
return ERR_PTR(-ENOTSUPP);
};
#endif /* defined(CONFIG_TEGRA_VIRTUALIZATION) */
#endif /* __TEGRA_HV_IVC_H */