mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: add tegra_raw support
* This change adds NVGPU_AS_MAP_BUFFER_FLAGS_TEGRA_RAW flag to control buffer format * Add NVGPU_SUPPORT_TEGRA_RAW enabled flag to indicate if feature is enabled for a given chip. * Update gv11b_gpu_phys_addr function to set TEGRA_RAW bit Jira NVGPU-6640 Bug 3489827 Change-Id: I959c22bef906bb9c6dcdc8d5f5e9951ad9937a60 Signed-off-by: Sagar Kadamati <skadamati@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2545128 Reviewed-by: Martin Radev <mradev@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Seema Khowala <seemaj@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: Seema Khowala <seemaj@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
03b1a81ab1
commit
a3ed73a57c
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -1060,7 +1060,15 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm,
|
||||
#endif
|
||||
#endif
|
||||
|
||||
attrs.l3_alloc = ((flags & NVGPU_VM_MAP_L3_ALLOC) != 0U);
|
||||
attrs.l3_alloc = ((flags & NVGPU_VM_MAP_L3_ALLOC) != 0U);
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TEGRA_RAW)) {
|
||||
#ifdef CONFIG_NVGPU_TRACE
|
||||
nvgpu_gmmu_dbg_v(g, &attrs,
|
||||
"TEGRA_RAW format is requested");
|
||||
#endif /* CONFIG_NVGPU_TRACE */
|
||||
attrs.tegra_raw = ((flags & NVGPU_VM_MAP_TEGRA_RAW) != 0U);
|
||||
}
|
||||
#if defined(CONFIG_NVGPU_NON_FUSA)
|
||||
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_3288192) &&
|
||||
(attrs.l3_alloc)) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -25,17 +25,27 @@
|
||||
|
||||
#include "gmmu_gv11b.h"
|
||||
|
||||
/*
|
||||
* On Volta the GPU determines whether to do L3 allocation for a mapping by
|
||||
* checking the l3 alloc bit (bit number depends on soc) of the physical address.
|
||||
* So if a mapping should allocate lines in the L3 this bit must be set.
|
||||
/**
|
||||
* The GPU determines whether to do specific action by checking
|
||||
* the specific bit (bit number depends on soc) of the physical address.
|
||||
*
|
||||
* L3 alloc bit is used to allocate lines in L3.
|
||||
* TEGRA_RAW bit is used to read buffers in TEGRA_RAW format.
|
||||
*/
|
||||
u64 gv11b_gpu_phys_addr(struct gk20a *g,
|
||||
struct nvgpu_gmmu_attrs *attrs, u64 phys)
|
||||
{
|
||||
if ((attrs != NULL) && attrs->l3_alloc &&
|
||||
(g->ops.mm.gmmu.get_iommu_bit != NULL)) {
|
||||
return phys | BIT64(g->ops.mm.gmmu.get_iommu_bit(g));
|
||||
if (attrs == NULL) {
|
||||
return phys;
|
||||
}
|
||||
|
||||
if (attrs->l3_alloc && (g->ops.mm.gmmu.get_iommu_bit != NULL)) {
|
||||
phys |= BIT64(g->ops.mm.gmmu.get_iommu_bit(g));
|
||||
}
|
||||
|
||||
if (attrs->tegra_raw &&
|
||||
(g->ops.mm.gmmu.get_gpu_phys_tegra_raw_bit != NULL)) {
|
||||
phys |= BIT64(g->ops.mm.gmmu.get_gpu_phys_tegra_raw_bit(g));
|
||||
}
|
||||
|
||||
return phys;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -217,6 +217,8 @@ struct gk20a;
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_ROP_IN_GPC, "ROP is part of GPC"), \
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_BUFFER_METADATA, "Buffer metadata support"), \
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_NVS, "Domain scheduler support"), \
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_TEGRA_RAW, \
|
||||
"TEGRA_RAW format support"), \
|
||||
DEFINE_FLAG(NVGPU_MAX_ENABLED_BITS, "Marks max number of flags"),
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -175,6 +175,10 @@ struct nvgpu_gmmu_attrs {
|
||||
* True if l3_alloc flag is valid.
|
||||
*/
|
||||
bool l3_alloc;
|
||||
/**
|
||||
* True if tegra_raw flag is valid.
|
||||
*/
|
||||
bool tegra_raw;
|
||||
/**
|
||||
* True if platform_atomic flag is valid.
|
||||
*/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -352,6 +352,17 @@ struct gops_mm_gmmu {
|
||||
*/
|
||||
u32 (*get_iommu_bit)(struct gk20a *g);
|
||||
|
||||
/**
|
||||
* @brief HAL to get the tegra_raw physical bit position.
|
||||
*
|
||||
* @param g [in] The GPU.
|
||||
*
|
||||
* This HAL is used to get the tegra_raw physical bit position.
|
||||
*
|
||||
* @return tegra_raw physical bit position.
|
||||
*/
|
||||
u32 (*get_gpu_phys_tegra_raw_bit)(struct gk20a *g);
|
||||
|
||||
/**
|
||||
* @brief HAL to convert from tegra_phys to gpu_phys.
|
||||
*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -350,6 +350,7 @@ struct vm_gk20a {
|
||||
#define NVGPU_VM_MAP_DIRECT_KIND_CTRL BIT32(4)
|
||||
#define NVGPU_VM_MAP_L3_ALLOC BIT32(5)
|
||||
#define NVGPU_VM_MAP_PLATFORM_ATOMIC BIT32(6)
|
||||
#define NVGPU_VM_MAP_TEGRA_RAW BIT32(7)
|
||||
|
||||
#define NVGPU_VM_MAP_ACCESS_DEFAULT 0U
|
||||
#define NVGPU_VM_MAP_ACCESS_READ_ONLY 1U
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -62,6 +62,8 @@ static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
|
||||
core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL;
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC)
|
||||
core_flags |= NVGPU_VM_MAP_PLATFORM_ATOMIC;
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_TEGRA_RAW)
|
||||
core_flags |= NVGPU_VM_MAP_TEGRA_RAW;
|
||||
|
||||
/* copy the map access bitfield from flags */
|
||||
core_flags |= (flags & map_access_bitmask);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -102,14 +102,15 @@ struct nvgpu_as_bind_channel_args {
|
||||
* chosen will be returned back to the caller in the 'page_size' parameter in
|
||||
* that case.
|
||||
*/
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET (1 << 0)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE (1 << 2)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT (1 << 4)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE (1 << 5)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS (1 << 6)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC (1 << 7)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC (1 << 9)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET (1 << 0)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE (1 << 2)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT (1 << 4)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE (1 << 5)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS (1 << 6)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC (1 << 7)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC (1 << 9)
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_TEGRA_RAW (1 << 12)
|
||||
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_BITMASK_OFFSET 10U
|
||||
#define NVGPU_AS_MAP_BUFFER_FLAGS_ACCESS_BITMASK_SIZE 2U
|
||||
|
||||
Reference in New Issue
Block a user