video: tegra: nvmap: Add new heap for VI mempool

- The VI is allowed to access the whole of the Guest VM currently as it
is a stage-2 SMMU device. Hence it can read and write to any memory.
This may lead to breach of confidentiality and integrity. In order to
restrict the VI from accessing the whole VM, a mempool is being created
by HV that would restrict access to VI to just the mempool memory.
- In nvmap, we have some special rules for IVM carveout like if user
specify IVM carveout to allocate from, then if first IVM carveout does
not have sufficient memory then nvmap tries to allocate from next IVM
carveout. We don't want these rules applicable for VI mempool heap,
hence only carveout initialization part would remain similar to IVM
carveouts but rest of the operations would be performed normally like
any other carveout.
- Add DT binding doc for VI-carveout.

Bug 4648721

Change-Id: Ib40415a4c80da908654c86162c1cd4b50b33ef31
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3196238
Reviewed-by: Pritesh Raithatha <praithatha@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
This commit is contained in:
Ketan Patil
2024-08-19 09:16:56 +00:00
committed by Jon Hunter
parent a59a10dfeb
commit 6e5aec9ce4
3 changed files with 123 additions and 34 deletions

View File

@@ -0,0 +1,67 @@
# Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
%YAML 1.2
---
$id: http://devicetree.org/schemas/vi-carveout/nvidia,vi-carveout.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Nvidia VI Carveout
maintainers:
- Ketan Patil
description: |
This schema defines the properties for the NVIDIA VI carveout (mempool) memory
node. The reference to this DT node should be added memory-region in tegra carveout
to create the VI carveout.
select:
properties:
compatible:
minItems: 1
maxItems: 1
items:
enum:
- nvidia,vi_carveout
required:
- compatible
properties:
ivm:
description: |
A phandle to the tegra-hv node followed by the mempool id.
items:
minItems: 2
maxItems: 2
items:
- $ref: "/schemas/types.yaml#/definitions/phandle"
- $ref: "/schemas/types.yaml#/definitions/uint32"
status:
description: |
The status of the carveouts node.
enum:
- okay
- disabled
required:
- compatible
- ivm
examples:
- |
vi-carveout {
compatible = "nvidia,vi_carveout";
ivm = <&tegra_hv 3>;
status = "okay";
};

View File

@@ -6,15 +6,16 @@
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#define NVMAP_HEAP_IOVMM (1ul<<30) #define NVMAP_HEAP_IOVMM (1ul << 30)
/* common carveout heaps */ /* common carveout heaps */
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28) #define NVMAP_HEAP_CARVEOUT_VPR (1ul << 28)
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27) #define NVMAP_HEAP_CARVEOUT_TSEC (1ul << 27)
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26) #define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul << 26)
#define NVMAP_HEAP_CARVEOUT_GPU (1ul << 3) #define NVMAP_HEAP_CARVEOUT_VI (1ul << 4)
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2) #define NVMAP_HEAP_CARVEOUT_GPU (1ul << 3)
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1) #define NVMAP_HEAP_CARVEOUT_FSI (1ul << 2)
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0) #define NVMAP_HEAP_CARVEOUT_IVM (1ul << 1)
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul << 0)
#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1) #define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)

View File

@@ -91,12 +91,14 @@ static struct nvmap_platform_carveout nvmap_carveouts[] = {
.size = 0, .size = 0,
.numa_node_id = 0, .numa_node_id = 0,
}, },
/* Need uninitialized entries for IVM carveouts */
[5] = { [5] = {
.name = NULL, .name = "vimem",
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM, .usage_mask = NVMAP_HEAP_CARVEOUT_VI,
.base = 0,
.size = 0,
.numa_node_id = 0, .numa_node_id = 0,
}, },
/* Need uninitialized entries for IVM carveouts */
[6] = { [6] = {
.name = NULL, .name = NULL,
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM, .usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
@@ -112,11 +114,16 @@ static struct nvmap_platform_carveout nvmap_carveouts[] = {
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM, .usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
.numa_node_id = 0, .numa_node_id = 0,
}, },
[9] = {
.name = NULL,
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
.numa_node_id = 0,
},
}; };
static struct nvmap_platform_data nvmap_data = { static struct nvmap_platform_data nvmap_data = {
.carveouts = nvmap_carveouts, .carveouts = nvmap_carveouts,
.nr_carveouts = 5, .nr_carveouts = 6,
}; };
static struct nvmap_platform_carveout *nvmap_get_carveout_pdata(const char *name) static struct nvmap_platform_carveout *nvmap_get_carveout_pdata(const char *name)
@@ -149,12 +156,21 @@ static int __init nvmap_populate_ivm_carveout(struct device *dev)
struct tegra_hv_ivm_cookie *ivm; struct tegra_hv_ivm_cookie *ivm;
unsigned long long id; unsigned long long id;
unsigned int guestid, result; unsigned int guestid, result;
bool is_vi_heap;
if (!of_phandle_iterator_init(&it, dev->of_node, "memory-region", NULL, 0)) { if (!of_phandle_iterator_init(&it, dev->of_node, "memory-region", NULL, 0)) {
while (!of_phandle_iterator_next(&it) && it.node) { while (!of_phandle_iterator_next(&it) && it.node) {
if (of_device_is_available(it.node) && is_vi_heap = false;
of_device_is_compatible(it.node, "nvidia,ivm_carveout") > 0) { if (of_device_is_available(it.node)) {
co = nvmap_get_carveout_pdata("nvidia,ivm_carveout"); if (of_device_is_compatible(it.node, "nvidia,ivm_carveout") > 0) {
co = nvmap_get_carveout_pdata("nvidia,ivm_carveout");
} else if (of_device_is_compatible(it.node,
"nvidia,vi_carveout") > 0) {
co = nvmap_get_carveout_pdata("vimem");
is_vi_heap = true;
} else
continue;
if (!co) { if (!co) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
@@ -198,25 +214,29 @@ static int __init nvmap_populate_ivm_carveout(struct device *dev)
/* See if this VM can allocate (or just create handle from ID) /* See if this VM can allocate (or just create handle from ID)
* generated by peer partition * generated by peer partition
*/ */
prop = of_get_property(it.node, "alloc", NULL); if (!is_vi_heap) {
if (!prop) { prop = of_get_property(it.node, "alloc", NULL);
pr_err("failed to read alloc property\n"); if (!prop) {
ret = -EINVAL; pr_err("failed to read alloc property\n");
goto fail; ret = -EINVAL;
} goto fail;
}
name = kzalloc(32, GFP_KERNEL); name = kzalloc(32, GFP_KERNEL);
if (!name) { if (!name) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
} }
co->can_alloc = of_read_number(prop, 1); co->can_alloc = of_read_number(prop, 1);
co->is_ivm = true; co->is_ivm = true;
sprintf(name, "ivm%02u%02u%02d", co->vmid, co->peer, co->can_alloc); sprintf(name, "ivm%02u%02u%02d", co->vmid, co->peer,
pr_info("IVM carveout IPA:%p, size=%zu, peer vmid=%u, name=%s\n", co->can_alloc);
(void *)(uintptr_t)co->base, co->size, co->peer, name); pr_info("IVM carveout IPA:%p, size=%zu, peer vmid=%u,"
co->name = name; "name=%s\n", (void *)(uintptr_t)co->base, co->size,
co->peer, name);
co->name = name;
}
if (check_add_overflow(nvmap_data.nr_carveouts, 1U, &result)) { if (check_add_overflow(nvmap_data.nr_carveouts, 1U, &result)) {
co->name = NULL; co->name = NULL;
@@ -226,12 +246,12 @@ static int __init nvmap_populate_ivm_carveout(struct device *dev)
} }
nvmap_data.nr_carveouts = result; nvmap_data.nr_carveouts = result;
} }
} }
} }
return 0; return 0;
fail: fail:
(void)tegra_hv_mempool_unreserve(ivm);
co->base = 0; co->base = 0;
co->peer = 0; co->peer = 0;
co->size = 0; co->size = 0;
@@ -381,7 +401,8 @@ int __init nvmap_init(struct platform_device *pdev)
if (!of_phandle_iterator_init(&it, np, "memory-region", NULL, 0)) { if (!of_phandle_iterator_init(&it, np, "memory-region", NULL, 0)) {
while (!of_phandle_iterator_next(&it) && it.node) { while (!of_phandle_iterator_next(&it) && it.node) {
if (of_device_is_available(it.node) && if (of_device_is_available(it.node) &&
!of_device_is_compatible(it.node, "nvidia,ivm_carveout")) { !of_device_is_compatible(it.node, "nvidia,ivm_carveout") &&
!of_device_is_compatible(it.node, "nvidia,vi_carveout")) {
rmem2 = of_reserved_mem_lookup(it.node); rmem2 = of_reserved_mem_lookup(it.node);
if (!rmem2) { if (!rmem2) {
if (!of_property_read_string(it.node, "compatible", &compp)) if (!of_property_read_string(it.node, "compatible", &compp))